Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-for-linus-2.6

This commit is contained in:
Linus Torvalds
2005-09-07 17:31:27 -07:00
119 changed files with 7209 additions and 9426 deletions

View File

@@ -1,5 +1,5 @@
==================================================================== ====================================================================
= Adaptec Aic7xxx Fast -> Ultra160 Family Manager Set v6.2.28 = = Adaptec Aic7xxx Fast -> Ultra160 Family Manager Set v7.0 =
= README for = = README for =
= The Linux Operating System = = The Linux Operating System =
==================================================================== ====================================================================
@@ -131,6 +131,10 @@ The following information is available in this file:
SCSI "stub" effects. SCSI "stub" effects.
2. Version History 2. Version History
7.0 (4th August, 2005)
- Updated driver to use SCSI transport class infrastructure
- Upported sequencer and core fixes from last adaptec released
version of the driver.
6.2.36 (June 3rd, 2003) 6.2.36 (June 3rd, 2003)
- Correct code that disables PCI parity error checking. - Correct code that disables PCI parity error checking.
- Correct and simplify handling of the ignore wide residue - Correct and simplify handling of the ignore wide residue

View File

@@ -373,13 +373,11 @@ Summary:
scsi_activate_tcq - turn on tag command queueing scsi_activate_tcq - turn on tag command queueing
scsi_add_device - creates new scsi device (lu) instance scsi_add_device - creates new scsi device (lu) instance
scsi_add_host - perform sysfs registration and SCSI bus scan. scsi_add_host - perform sysfs registration and SCSI bus scan.
scsi_add_timer - (re-)start timer on a SCSI command.
scsi_adjust_queue_depth - change the queue depth on a SCSI device scsi_adjust_queue_depth - change the queue depth on a SCSI device
scsi_assign_lock - replace default host_lock with given lock scsi_assign_lock - replace default host_lock with given lock
scsi_bios_ptable - return copy of block device's partition table scsi_bios_ptable - return copy of block device's partition table
scsi_block_requests - prevent further commands being queued to given host scsi_block_requests - prevent further commands being queued to given host
scsi_deactivate_tcq - turn off tag command queueing scsi_deactivate_tcq - turn off tag command queueing
scsi_delete_timer - cancel timer on a SCSI command.
scsi_host_alloc - return a new scsi_host instance whose refcount==1 scsi_host_alloc - return a new scsi_host instance whose refcount==1
scsi_host_get - increments Scsi_Host instance's refcount scsi_host_get - increments Scsi_Host instance's refcount
scsi_host_put - decrements Scsi_Host instance's refcount (free if 0) scsi_host_put - decrements Scsi_Host instance's refcount (free if 0)
@@ -457,27 +455,6 @@ struct scsi_device * scsi_add_device(struct Scsi_Host *shost,
int scsi_add_host(struct Scsi_Host *shost, struct device * dev) int scsi_add_host(struct Scsi_Host *shost, struct device * dev)
/**
* scsi_add_timer - (re-)start timer on a SCSI command.
* @scmd: pointer to scsi command instance
* @timeout: duration of timeout in "jiffies"
* @complete: pointer to function to call if timeout expires
*
* Returns nothing
*
* Might block: no
*
* Notes: Each scsi command has its own timer, and as it is added
* to the queue, we set up the timer. When the command completes,
* we cancel the timer. An LLD can use this function to change
* the existing timeout value.
*
* Defined in: drivers/scsi/scsi_error.c
**/
void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
void (*complete)(struct scsi_cmnd *))
/** /**
* scsi_adjust_queue_depth - allow LLD to change queue depth on a SCSI device * scsi_adjust_queue_depth - allow LLD to change queue depth on a SCSI device
* @sdev: pointer to SCSI device to change queue depth on * @sdev: pointer to SCSI device to change queue depth on
@@ -565,24 +542,6 @@ void scsi_block_requests(struct Scsi_Host * shost)
void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
/**
* scsi_delete_timer - cancel timer on a SCSI command.
* @scmd: pointer to scsi command instance
*
* Returns 1 if able to cancel timer else 0 (i.e. too late or already
* cancelled).
*
* Might block: no [may in the future if it invokes del_timer_sync()]
*
* Notes: All commands issued by upper levels already have a timeout
* associated with them. An LLD can use this function to cancel the
* timer.
*
* Defined in: drivers/scsi/scsi_error.c
**/
int scsi_delete_timer(struct scsi_cmnd *scmd)
/** /**
* scsi_host_alloc - create a scsi host adapter instance and perform basic * scsi_host_alloc - create a scsi host adapter instance and perform basic
* initialization. * initialization.

View File

@@ -822,6 +822,13 @@ L: emu10k1-devel@lists.sourceforge.net
W: http://sourceforge.net/projects/emu10k1/ W: http://sourceforge.net/projects/emu10k1/
S: Maintained S: Maintained
EMULEX LPFC FC SCSI DRIVER
P: James Smart
M: james.smart@emulex.com
L: linux-scsi@vger.kernel.org
W: http://sourceforge.net/projects/lpfcxxxx
S: Supported
EPSON 1355 FRAMEBUFFER DRIVER EPSON 1355 FRAMEBUFFER DRIVER
P: Christopher Hoover P: Christopher Hoover
M: ch@murgatroid.com, ch@hpl.hp.com M: ch@murgatroid.com, ch@hpl.hp.com

View File

@@ -22,11 +22,26 @@
/* This is a private structure used to tie the classdev and the /* This is a private structure used to tie the classdev and the
* container .. it should never be visible outside this file */ * container .. it should never be visible outside this file */
struct internal_container { struct internal_container {
struct list_head node; struct klist_node node;
struct attribute_container *cont; struct attribute_container *cont;
struct class_device classdev; struct class_device classdev;
}; };
static void internal_container_klist_get(struct klist_node *n)
{
struct internal_container *ic =
container_of(n, struct internal_container, node);
class_device_get(&ic->classdev);
}
static void internal_container_klist_put(struct klist_node *n)
{
struct internal_container *ic =
container_of(n, struct internal_container, node);
class_device_put(&ic->classdev);
}
/** /**
* attribute_container_classdev_to_container - given a classdev, return the container * attribute_container_classdev_to_container - given a classdev, return the container
* *
@@ -57,7 +72,8 @@ int
attribute_container_register(struct attribute_container *cont) attribute_container_register(struct attribute_container *cont)
{ {
INIT_LIST_HEAD(&cont->node); INIT_LIST_HEAD(&cont->node);
INIT_LIST_HEAD(&cont->containers); klist_init(&cont->containers,internal_container_klist_get,
internal_container_klist_put);
down(&attribute_container_mutex); down(&attribute_container_mutex);
list_add_tail(&cont->node, &attribute_container_list); list_add_tail(&cont->node, &attribute_container_list);
@@ -77,11 +93,13 @@ attribute_container_unregister(struct attribute_container *cont)
{ {
int retval = -EBUSY; int retval = -EBUSY;
down(&attribute_container_mutex); down(&attribute_container_mutex);
if (!list_empty(&cont->containers)) spin_lock(&cont->containers.k_lock);
if (!list_empty(&cont->containers.k_list))
goto out; goto out;
retval = 0; retval = 0;
list_del(&cont->node); list_del(&cont->node);
out: out:
spin_unlock(&cont->containers.k_lock);
up(&attribute_container_mutex); up(&attribute_container_mutex);
return retval; return retval;
@@ -140,7 +158,6 @@ attribute_container_add_device(struct device *dev,
continue; continue;
} }
memset(ic, 0, sizeof(struct internal_container)); memset(ic, 0, sizeof(struct internal_container));
INIT_LIST_HEAD(&ic->node);
ic->cont = cont; ic->cont = cont;
class_device_initialize(&ic->classdev); class_device_initialize(&ic->classdev);
ic->classdev.dev = get_device(dev); ic->classdev.dev = get_device(dev);
@@ -151,11 +168,22 @@ attribute_container_add_device(struct device *dev,
fn(cont, dev, &ic->classdev); fn(cont, dev, &ic->classdev);
else else
attribute_container_add_class_device(&ic->classdev); attribute_container_add_class_device(&ic->classdev);
list_add_tail(&ic->node, &cont->containers); klist_add_tail(&ic->node, &cont->containers);
} }
up(&attribute_container_mutex); up(&attribute_container_mutex);
} }
/* FIXME: can't break out of this unless klist_iter_exit is also
* called before doing the break
*/
#define klist_for_each_entry(pos, head, member, iter) \
for (klist_iter_init(head, iter); (pos = ({ \
struct klist_node *n = klist_next(iter); \
n ? container_of(n, typeof(*pos), member) : \
({ klist_iter_exit(iter) ; NULL; }); \
}) ) != NULL; )
/** /**
* attribute_container_remove_device - make device eligible for removal. * attribute_container_remove_device - make device eligible for removal.
* *
@@ -182,17 +210,19 @@ attribute_container_remove_device(struct device *dev,
down(&attribute_container_mutex); down(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) { list_for_each_entry(cont, &attribute_container_list, node) {
struct internal_container *ic, *tmp; struct internal_container *ic;
struct klist_iter iter;
if (attribute_container_no_classdevs(cont)) if (attribute_container_no_classdevs(cont))
continue; continue;
if (!cont->match(cont, dev)) if (!cont->match(cont, dev))
continue; continue;
list_for_each_entry_safe(ic, tmp, &cont->containers, node) {
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (dev != ic->classdev.dev) if (dev != ic->classdev.dev)
continue; continue;
list_del(&ic->node); klist_del(&ic->node);
if (fn) if (fn)
fn(cont, dev, &ic->classdev); fn(cont, dev, &ic->classdev);
else { else {
@@ -225,12 +255,18 @@ attribute_container_device_trigger(struct device *dev,
down(&attribute_container_mutex); down(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) { list_for_each_entry(cont, &attribute_container_list, node) {
struct internal_container *ic, *tmp; struct internal_container *ic;
struct klist_iter iter;
if (!cont->match(cont, dev)) if (!cont->match(cont, dev))
continue; continue;
list_for_each_entry_safe(ic, tmp, &cont->containers, node) { if (attribute_container_no_classdevs(cont)) {
fn(cont, dev, NULL);
continue;
}
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (dev == ic->classdev.dev) if (dev == ic->classdev.dev)
fn(cont, dev, &ic->classdev); fn(cont, dev, &ic->classdev);
} }
@@ -368,6 +404,36 @@ attribute_container_class_device_del(struct class_device *classdev)
} }
EXPORT_SYMBOL_GPL(attribute_container_class_device_del); EXPORT_SYMBOL_GPL(attribute_container_class_device_del);
/**
* attribute_container_find_class_device - find the corresponding class_device
*
* @cont: the container
* @dev: the generic device
*
* Looks up the device in the container's list of class devices and returns
* the corresponding class_device.
*/
struct class_device *
attribute_container_find_class_device(struct attribute_container *cont,
struct device *dev)
{
struct class_device *cdev = NULL;
struct internal_container *ic;
struct klist_iter iter;
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (ic->classdev.dev == dev) {
cdev = &ic->classdev;
/* FIXME: must exit iterator then break */
klist_iter_exit(&iter);
break;
}
}
return cdev;
}
EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
int __init int __init
attribute_container_init(void) attribute_container_init(void)
{ {

View File

@@ -7,7 +7,7 @@
* This file is licensed under GPLv2 * This file is licensed under GPLv2
* *
* The basic idea here is to allow any "device controller" (which * The basic idea here is to allow any "device controller" (which
* would most often be a Host Bus Adapter" to use the services of one * would most often be a Host Bus Adapter to use the services of one
* or more tranport classes for performing transport specific * or more tranport classes for performing transport specific
* services. Transport specific services are things that the generic * services. Transport specific services are things that the generic
* command layer doesn't want to know about (speed settings, line * command layer doesn't want to know about (speed settings, line
@@ -64,7 +64,9 @@ void transport_class_unregister(struct transport_class *tclass)
} }
EXPORT_SYMBOL_GPL(transport_class_unregister); EXPORT_SYMBOL_GPL(transport_class_unregister);
static int anon_transport_dummy_function(struct device *dev) static int anon_transport_dummy_function(struct transport_container *tc,
struct device *dev,
struct class_device *cdev)
{ {
/* do nothing */ /* do nothing */
return 0; return 0;
@@ -115,9 +117,10 @@ static int transport_setup_classdev(struct attribute_container *cont,
struct class_device *classdev) struct class_device *classdev)
{ {
struct transport_class *tclass = class_to_transport_class(cont->class); struct transport_class *tclass = class_to_transport_class(cont->class);
struct transport_container *tcont = attribute_container_to_transport_container(cont);
if (tclass->setup) if (tclass->setup)
tclass->setup(dev); tclass->setup(tcont, dev, classdev);
return 0; return 0;
} }
@@ -178,12 +181,14 @@ void transport_add_device(struct device *dev)
EXPORT_SYMBOL_GPL(transport_add_device); EXPORT_SYMBOL_GPL(transport_add_device);
static int transport_configure(struct attribute_container *cont, static int transport_configure(struct attribute_container *cont,
struct device *dev) struct device *dev,
struct class_device *cdev)
{ {
struct transport_class *tclass = class_to_transport_class(cont->class); struct transport_class *tclass = class_to_transport_class(cont->class);
struct transport_container *tcont = attribute_container_to_transport_container(cont);
if (tclass->configure) if (tclass->configure)
tclass->configure(dev); tclass->configure(tcont, dev, cdev);
return 0; return 0;
} }
@@ -202,7 +207,7 @@ static int transport_configure(struct attribute_container *cont,
*/ */
void transport_configure_device(struct device *dev) void transport_configure_device(struct device *dev)
{ {
attribute_container_trigger(dev, transport_configure); attribute_container_device_trigger(dev, transport_configure);
} }
EXPORT_SYMBOL_GPL(transport_configure_device); EXPORT_SYMBOL_GPL(transport_configure_device);
@@ -215,7 +220,7 @@ static int transport_remove_classdev(struct attribute_container *cont,
struct transport_class *tclass = class_to_transport_class(cont->class); struct transport_class *tclass = class_to_transport_class(cont->class);
if (tclass->remove) if (tclass->remove)
tclass->remove(dev); tclass->remove(tcont, dev, classdev);
if (tclass->remove != anon_transport_dummy_function) { if (tclass->remove != anon_transport_dummy_function) {
if (tcont->statistics) if (tcont->statistics)

View File

@@ -284,6 +284,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
rq->special = NULL; rq->special = NULL;
rq->data_len = 0; rq->data_len = 0;
rq->data = NULL; rq->data = NULL;
rq->nr_phys_segments = 0;
rq->sense = NULL; rq->sense = NULL;
rq->end_io = NULL; rq->end_io = NULL;
rq->end_io_data = NULL; rq->end_io_data = NULL;
@@ -2115,7 +2116,7 @@ EXPORT_SYMBOL(blk_insert_request);
/** /**
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
* @rw: READ or WRITE data * @rq: request structure to fill
* @ubuf: the user buffer * @ubuf: the user buffer
* @len: length of user data * @len: length of user data
* *
@@ -2132,21 +2133,19 @@ EXPORT_SYMBOL(blk_insert_request);
* original bio must be passed back in to blk_rq_unmap_user() for proper * original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping. * unmapping.
*/ */
struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
unsigned int len) unsigned int len)
{ {
unsigned long uaddr; unsigned long uaddr;
struct request *rq;
struct bio *bio; struct bio *bio;
int reading;
if (len > (q->max_sectors << 9)) if (len > (q->max_sectors << 9))
return ERR_PTR(-EINVAL); return -EINVAL;
if ((!len && ubuf) || (len && !ubuf)) if (!len || !ubuf)
return ERR_PTR(-EINVAL); return -EINVAL;
rq = blk_get_request(q, rw, __GFP_WAIT); reading = rq_data_dir(rq) == READ;
if (!rq)
return ERR_PTR(-ENOMEM);
/* /*
* if alignment requirement is satisfied, map in user pages for * if alignment requirement is satisfied, map in user pages for
@@ -2154,9 +2153,9 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
*/ */
uaddr = (unsigned long) ubuf; uaddr = (unsigned long) ubuf;
if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
bio = bio_map_user(q, NULL, uaddr, len, rw == READ); bio = bio_map_user(q, NULL, uaddr, len, reading);
else else
bio = bio_copy_user(q, uaddr, len, rw == READ); bio = bio_copy_user(q, uaddr, len, reading);
if (!IS_ERR(bio)) { if (!IS_ERR(bio)) {
rq->bio = rq->biotail = bio; rq->bio = rq->biotail = bio;
@@ -2164,28 +2163,70 @@ struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
rq->buffer = rq->data = NULL; rq->buffer = rq->data = NULL;
rq->data_len = len; rq->data_len = len;
return rq; return 0;
} }
/* /*
* bio is the err-ptr * bio is the err-ptr
*/ */
blk_put_request(rq); return PTR_ERR(bio);
return (struct request *) bio;
} }
EXPORT_SYMBOL(blk_rq_map_user); EXPORT_SYMBOL(blk_rq_map_user);
/**
* blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
* @iov: pointer to the iovec
* @iov_count: number of elements in the iovec
*
* Description:
* Data will be mapped directly for zero copy io, if possible. Otherwise
* a kernel bounce buffer is used.
*
* A matching blk_rq_unmap_user() must be issued at the end of io, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
* before being submitted to the device, as pages mapped may be out of
* reach. It's the callers responsibility to make sure this happens. The
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
struct sg_iovec *iov, int iov_count)
{
struct bio *bio;
if (!iov || iov_count <= 0)
return -EINVAL;
/* we don't allow misaligned data like bio_map_user() does. If the
* user is using sg, they're expected to know the alignment constraints
* and respect them accordingly */
bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
if (IS_ERR(bio))
return PTR_ERR(bio);
rq->bio = rq->biotail = bio;
blk_rq_bio_prep(q, rq, bio);
rq->buffer = rq->data = NULL;
rq->data_len = bio->bi_size;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
/** /**
* blk_rq_unmap_user - unmap a request with user data * blk_rq_unmap_user - unmap a request with user data
* @rq: request to be unmapped * @bio: bio to be unmapped
* @bio: bio for the request
* @ulen: length of user buffer * @ulen: length of user buffer
* *
* Description: * Description:
* Unmap a request previously mapped by blk_rq_map_user(). * Unmap a bio previously mapped by blk_rq_map_user().
*/ */
int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
{ {
int ret = 0; int ret = 0;
@@ -2196,31 +2237,89 @@ int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
ret = bio_uncopy_user(bio); ret = bio_uncopy_user(bio);
} }
blk_put_request(rq); return 0;
return ret;
} }
EXPORT_SYMBOL(blk_rq_unmap_user); EXPORT_SYMBOL(blk_rq_unmap_user);
/**
* blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to fill
* @kbuf: the kernel buffer
* @len: length of user data
* @gfp_mask: memory allocation flags
*/
int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
unsigned int len, unsigned int gfp_mask)
{
struct bio *bio;
if (len > (q->max_sectors << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
bio = bio_map_kern(q, kbuf, len, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << BIO_RW);
rq->bio = rq->biotail = bio;
blk_rq_bio_prep(q, rq, bio);
rq->buffer = rq->data = NULL;
rq->data_len = len;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);
/**
* blk_execute_rq_nowait - insert a request into queue for execution
* @q: queue to insert the request in
* @bd_disk: matching gendisk
* @rq: request to insert
* @at_head: insert request at head or tail of queue
* @done: I/O completion handler
*
* Description:
* Insert a fully prepared request at the back of the io scheduler queue
* for execution. Don't wait for completion.
*/
void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
struct request *rq, int at_head,
void (*done)(struct request *))
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
rq->rq_disk = bd_disk;
rq->flags |= REQ_NOMERGE;
rq->end_io = done;
elv_add_request(q, rq, where, 1);
generic_unplug_device(q);
}
/** /**
* blk_execute_rq - insert a request into queue for execution * blk_execute_rq - insert a request into queue for execution
* @q: queue to insert the request in * @q: queue to insert the request in
* @bd_disk: matching gendisk * @bd_disk: matching gendisk
* @rq: request to insert * @rq: request to insert
* @at_head: insert request at head or tail of queue
* *
* Description: * Description:
* Insert a fully prepared request at the back of the io scheduler queue * Insert a fully prepared request at the back of the io scheduler queue
* for execution. * for execution and wait for completion.
*/ */
int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
struct request *rq) struct request *rq, int at_head)
{ {
DECLARE_COMPLETION(wait); DECLARE_COMPLETION(wait);
char sense[SCSI_SENSE_BUFFERSIZE]; char sense[SCSI_SENSE_BUFFERSIZE];
int err = 0; int err = 0;
rq->rq_disk = bd_disk;
/* /*
* we need an extra reference to the request, so we can look at * we need an extra reference to the request, so we can look at
* it after io completion * it after io completion
@@ -2233,11 +2332,8 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
rq->sense_len = 0; rq->sense_len = 0;
} }
rq->flags |= REQ_NOMERGE;
rq->waiting = &wait; rq->waiting = &wait;
rq->end_io = blk_end_sync_rq; blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
generic_unplug_device(q);
wait_for_completion(&wait); wait_for_completion(&wait);
rq->waiting = NULL; rq->waiting = NULL;
@@ -2277,6 +2373,44 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
EXPORT_SYMBOL(blkdev_issue_flush); EXPORT_SYMBOL(blkdev_issue_flush);
/**
* blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
* @q: device queue
* @disk: gendisk
* @error_sector: error offset
*
* Description:
* Devices understanding the SCSI command set, can use this function as
* a helper for issuing a cache flush. Note: driver is required to store
* the error offset (in case of error flushing) in ->sector of struct
* request.
*/
int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
sector_t *error_sector)
{
struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
int ret;
rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
rq->sector = 0;
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd[0] = 0x35;
rq->cmd_len = 12;
rq->data = NULL;
rq->data_len = 0;
rq->timeout = 60 * HZ;
ret = blk_execute_rq(q, disk, rq, 0);
if (ret && error_sector)
*error_sector = rq->sector;
blk_put_request(rq);
return ret;
}
EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
{ {
int rw = rq_data_dir(rq); int rw = rq_data_dir(rq);

View File

@@ -216,7 +216,7 @@ static int sg_io(struct file *file, request_queue_t *q,
struct gendisk *bd_disk, struct sg_io_hdr *hdr) struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{ {
unsigned long start_time; unsigned long start_time;
int reading, writing; int writing = 0, ret = 0;
struct request *rq; struct request *rq;
struct bio *bio; struct bio *bio;
char sense[SCSI_SENSE_BUFFERSIZE]; char sense[SCSI_SENSE_BUFFERSIZE];
@@ -231,38 +231,48 @@ static int sg_io(struct file *file, request_queue_t *q,
if (verify_command(file, cmd)) if (verify_command(file, cmd))
return -EPERM; return -EPERM;
/*
* we'll do that later
*/
if (hdr->iovec_count)
return -EOPNOTSUPP;
if (hdr->dxfer_len > (q->max_sectors << 9)) if (hdr->dxfer_len > (q->max_sectors << 9))
return -EIO; return -EIO;
reading = writing = 0; if (hdr->dxfer_len)
if (hdr->dxfer_len) {
switch (hdr->dxfer_direction) { switch (hdr->dxfer_direction) {
default: default:
return -EINVAL; return -EINVAL;
case SG_DXFER_TO_FROM_DEV: case SG_DXFER_TO_FROM_DEV:
reading = 1;
/* fall through */
case SG_DXFER_TO_DEV: case SG_DXFER_TO_DEV:
writing = 1; writing = 1;
break; break;
case SG_DXFER_FROM_DEV: case SG_DXFER_FROM_DEV:
reading = 1;
break; break;
} }
rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp, rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
hdr->dxfer_len); if (!rq)
return -ENOMEM;
if (IS_ERR(rq)) if (hdr->iovec_count) {
return PTR_ERR(rq); const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
} else struct sg_iovec *iov;
rq = blk_get_request(q, READ, __GFP_WAIT);
iov = kmalloc(size, GFP_KERNEL);
if (!iov) {
ret = -ENOMEM;
goto out;
}
if (copy_from_user(iov, hdr->dxferp, size)) {
kfree(iov);
ret = -EFAULT;
goto out;
}
ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
kfree(iov);
} else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
if (ret)
goto out;
/* /*
* fill in request structure * fill in request structure
@@ -298,7 +308,7 @@ static int sg_io(struct file *file, request_queue_t *q,
* (if he doesn't check that is his problem). * (if he doesn't check that is his problem).
* N.B. a non-zero SCSI status is _not_ necessarily an error. * N.B. a non-zero SCSI status is _not_ necessarily an error.
*/ */
blk_execute_rq(q, bd_disk, rq); blk_execute_rq(q, bd_disk, rq, 0);
/* write to all output members */ /* write to all output members */
hdr->status = 0xff & rq->errors; hdr->status = 0xff & rq->errors;
@@ -320,12 +330,14 @@ static int sg_io(struct file *file, request_queue_t *q,
hdr->sb_len_wr = len; hdr->sb_len_wr = len;
} }
if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) if (blk_rq_unmap_user(bio, hdr->dxfer_len))
return -EFAULT; ret = -EFAULT;
/* may not have succeeded, but output values written to control /* may not have succeeded, but output values written to control
* structure (struct sg_io_hdr). */ * structure (struct sg_io_hdr). */
return 0; out:
blk_put_request(rq);
return ret;
} }
#define OMAX_SB_LEN 16 /* For backward compatibility */ #define OMAX_SB_LEN 16 /* For backward compatibility */
@@ -408,7 +420,7 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
rq->data_len = bytes; rq->data_len = bytes;
rq->flags |= REQ_BLOCK_PC; rq->flags |= REQ_BLOCK_PC;
blk_execute_rq(q, bd_disk, rq); blk_execute_rq(q, bd_disk, rq, 0);
err = rq->errors & 0xff; /* only 8 bit SCSI status */ err = rq->errors & 0xff; /* only 8 bit SCSI status */
if (err) { if (err) {
if (rq->sense_len && rq->sense) { if (rq->sense_len && rq->sense) {
@@ -561,7 +573,7 @@ int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd,
rq->cmd[0] = GPCMD_START_STOP_UNIT; rq->cmd[0] = GPCMD_START_STOP_UNIT;
rq->cmd[4] = 0x02 + (close != 0); rq->cmd[4] = 0x02 + (close != 0);
rq->cmd_len = 6; rq->cmd_len = 6;
err = blk_execute_rq(q, bd_disk, rq); err = blk_execute_rq(q, bd_disk, rq, 0);
blk_put_request(rq); blk_put_request(rq);
break; break;
default: default:

View File

@@ -2097,6 +2097,10 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if (!q) if (!q)
return -ENXIO; return -ENXIO;
rq = blk_get_request(q, READ, GFP_KERNEL);
if (!rq)
return -ENOMEM;
cdi->last_sense = 0; cdi->last_sense = 0;
while (nframes) { while (nframes) {
@@ -2108,9 +2112,9 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
len = nr * CD_FRAMESIZE_RAW; len = nr * CD_FRAMESIZE_RAW;
rq = blk_rq_map_user(q, READ, ubuf, len); ret = blk_rq_map_user(q, rq, ubuf, len);
if (IS_ERR(rq)) if (ret)
return PTR_ERR(rq); break;
memset(rq->cmd, 0, sizeof(rq->cmd)); memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd[0] = GPCMD_READ_CD; rq->cmd[0] = GPCMD_READ_CD;
@@ -2132,13 +2136,13 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if (rq->bio) if (rq->bio)
blk_queue_bounce(q, &rq->bio); blk_queue_bounce(q, &rq->bio);
if (blk_execute_rq(q, cdi->disk, rq)) { if (blk_execute_rq(q, cdi->disk, rq, 0)) {
struct request_sense *s = rq->sense; struct request_sense *s = rq->sense;
ret = -EIO; ret = -EIO;
cdi->last_sense = s->sense_key; cdi->last_sense = s->sense_key;
} }
if (blk_rq_unmap_user(rq, bio, len)) if (blk_rq_unmap_user(bio, len))
ret = -EFAULT; ret = -EFAULT;
if (ret) if (ret)
@@ -2149,6 +2153,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
ubuf += len; ubuf += len;
} }
blk_put_request(rq);
return ret; return ret;
} }

View File

@@ -754,7 +754,7 @@ static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
idedisk_prepare_flush(q, rq); idedisk_prepare_flush(q, rq);
ret = blk_execute_rq(q, disk, rq); ret = blk_execute_rq(q, disk, rq, 0);
/* /*
* if we failed and caller wants error offset, get it * if we failed and caller wants error offset, get it

View File

@@ -6,7 +6,7 @@
* Title: MPI Message independent structures and definitions * Title: MPI Message independent structures and definitions
* Creation Date: July 27, 2000 * Creation Date: July 27, 2000
* *
* mpi.h Version: 01.05.07 * mpi.h Version: 01.05.08
* *
* Version History * Version History
* --------------- * ---------------
@@ -71,6 +71,9 @@
* 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and * 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and
* TargetAssistExtended requests. * TargetAssistExtended requests.
* Removed EEDP IOCStatus codes. * Removed EEDP IOCStatus codes.
* 06-24-05 01.05.08 Added function codes for SCSI IO 32 and
* TargetAssistExtended requests.
* Added EEDP IOCStatus codes.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
*/ */
@@ -101,7 +104,7 @@
/* Note: The major versions of 0xe0 through 0xff are reserved */ /* Note: The major versions of 0xe0 through 0xff are reserved */
/* versioning for this MPI header set */ /* versioning for this MPI header set */
#define MPI_HEADER_VERSION_UNIT (0x09) #define MPI_HEADER_VERSION_UNIT (0x0A)
#define MPI_HEADER_VERSION_DEV (0x00) #define MPI_HEADER_VERSION_DEV (0x00)
#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI_HEADER_VERSION_UNIT_SHIFT (8) #define MPI_HEADER_VERSION_UNIT_SHIFT (8)
@@ -292,10 +295,13 @@
#define MPI_FUNCTION_DIAG_BUFFER_POST (0x1D) #define MPI_FUNCTION_DIAG_BUFFER_POST (0x1D)
#define MPI_FUNCTION_DIAG_RELEASE (0x1E) #define MPI_FUNCTION_DIAG_RELEASE (0x1E)
#define MPI_FUNCTION_SCSI_IO_32 (0x1F)
#define MPI_FUNCTION_LAN_SEND (0x20) #define MPI_FUNCTION_LAN_SEND (0x20)
#define MPI_FUNCTION_LAN_RECEIVE (0x21) #define MPI_FUNCTION_LAN_RECEIVE (0x21)
#define MPI_FUNCTION_LAN_RESET (0x22) #define MPI_FUNCTION_LAN_RESET (0x22)
#define MPI_FUNCTION_TARGET_ASSIST_EXTENDED (0x23)
#define MPI_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) #define MPI_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24)
#define MPI_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) #define MPI_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25)
@@ -680,6 +686,15 @@ typedef struct _MSG_DEFAULT_REPLY
#define MPI_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B) #define MPI_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
#define MPI_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) #define MPI_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
/****************************************************************************/
/* For use by SCSI Initiator and SCSI Target end-to-end data protection */
/****************************************************************************/
#define MPI_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
#define MPI_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
#define MPI_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
/****************************************************************************/ /****************************************************************************/
/* SCSI Target values */ /* SCSI Target values */
/****************************************************************************/ /****************************************************************************/

View File

@@ -6,7 +6,7 @@
* Title: MPI Config message, structures, and Pages * Title: MPI Config message, structures, and Pages
* Creation Date: July 27, 2000 * Creation Date: July 27, 2000
* *
* mpi_cnfg.h Version: 01.05.08 * mpi_cnfg.h Version: 01.05.09
* *
* Version History * Version History
* --------------- * ---------------
@@ -232,6 +232,23 @@
* New physical mapping mode in SAS IO Unit Page 2. * New physical mapping mode in SAS IO Unit Page 2.
* Added CONFIG_PAGE_SAS_ENCLOSURE_0. * Added CONFIG_PAGE_SAS_ENCLOSURE_0.
* Added Slot and Enclosure fields to SAS Device Page 0. * Added Slot and Enclosure fields to SAS Device Page 0.
* 06-24-05 01.05.09 Added EEDP defines to IOC Page 1.
* Added more RAID type defines to IOC Page 2.
* Added Port Enable Delay settings to BIOS Page 1.
* Added Bad Block Table Full define to RAID Volume Page 0.
* Added Previous State defines to RAID Physical Disk
* Page 0.
* Added Max Sata Targets define for DiscoveryStatus field
* of SAS IO Unit Page 0.
* Added Device Self Test to Control Flags of SAS IO Unit
* Page 1.
* Added Direct Attach Starting Slot Number define for SAS
* IO Unit Page 2.
* Added new fields in SAS Device Page 2 for enclosure
* mapping.
* Added OwnerDevHandle and Flags field to SAS PHY Page 0.
* Added IOC GPIO Flags define to SAS Enclosure Page 0.
* Fixed the value for MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
*/ */
@@ -477,6 +494,7 @@ typedef struct _MSG_CONFIG_REPLY
#define MPI_MANUFACTPAGE_DEVICEID_FC929X (0x0626) #define MPI_MANUFACTPAGE_DEVICEID_FC929X (0x0626)
#define MPI_MANUFACTPAGE_DEVICEID_FC939X (0x0642) #define MPI_MANUFACTPAGE_DEVICEID_FC939X (0x0642)
#define MPI_MANUFACTPAGE_DEVICEID_FC949X (0x0640) #define MPI_MANUFACTPAGE_DEVICEID_FC949X (0x0640)
#define MPI_MANUFACTPAGE_DEVICEID_FC949ES (0x0646)
/* SCSI */ /* SCSI */
#define MPI_MANUFACTPAGE_DEVID_53C1030 (0x0030) #define MPI_MANUFACTPAGE_DEVID_53C1030 (0x0030)
#define MPI_MANUFACTPAGE_DEVID_53C1030ZC (0x0031) #define MPI_MANUFACTPAGE_DEVID_53C1030ZC (0x0031)
@@ -769,9 +787,13 @@ typedef struct _CONFIG_PAGE_IOC_1
} CONFIG_PAGE_IOC_1, MPI_POINTER PTR_CONFIG_PAGE_IOC_1, } CONFIG_PAGE_IOC_1, MPI_POINTER PTR_CONFIG_PAGE_IOC_1,
IOCPage1_t, MPI_POINTER pIOCPage1_t; IOCPage1_t, MPI_POINTER pIOCPage1_t;
#define MPI_IOCPAGE1_PAGEVERSION (0x02) #define MPI_IOCPAGE1_PAGEVERSION (0x03)
/* defines for the Flags field */ /* defines for the Flags field */
#define MPI_IOCPAGE1_EEDP_MODE_MASK (0x07000000)
#define MPI_IOCPAGE1_EEDP_MODE_OFF (0x00000000)
#define MPI_IOCPAGE1_EEDP_MODE_T10 (0x01000000)
#define MPI_IOCPAGE1_EEDP_MODE_LSI_1 (0x02000000)
#define MPI_IOCPAGE1_INITIATOR_CONTEXT_REPLY_DISABLE (0x00000010) #define MPI_IOCPAGE1_INITIATOR_CONTEXT_REPLY_DISABLE (0x00000010)
#define MPI_IOCPAGE1_REPLY_COALESCING (0x00000001) #define MPI_IOCPAGE1_REPLY_COALESCING (0x00000001)
@@ -795,6 +817,11 @@ typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL
#define MPI_RAID_VOL_TYPE_IS (0x00) #define MPI_RAID_VOL_TYPE_IS (0x00)
#define MPI_RAID_VOL_TYPE_IME (0x01) #define MPI_RAID_VOL_TYPE_IME (0x01)
#define MPI_RAID_VOL_TYPE_IM (0x02) #define MPI_RAID_VOL_TYPE_IM (0x02)
#define MPI_RAID_VOL_TYPE_RAID_5 (0x03)
#define MPI_RAID_VOL_TYPE_RAID_6 (0x04)
#define MPI_RAID_VOL_TYPE_RAID_10 (0x05)
#define MPI_RAID_VOL_TYPE_RAID_50 (0x06)
#define MPI_RAID_VOL_TYPE_UNKNOWN (0xFF)
/* IOC Page 2 Volume Flags values */ /* IOC Page 2 Volume Flags values */
@@ -820,13 +847,17 @@ typedef struct _CONFIG_PAGE_IOC_2
} CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2, } CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2,
IOCPage2_t, MPI_POINTER pIOCPage2_t; IOCPage2_t, MPI_POINTER pIOCPage2_t;
#define MPI_IOCPAGE2_PAGEVERSION (0x02) #define MPI_IOCPAGE2_PAGEVERSION (0x03)
/* IOC Page 2 Capabilities flags */ /* IOC Page 2 Capabilities flags */
#define MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT (0x00000001) #define MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT (0x00000001)
#define MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT (0x00000002) #define MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT (0x00000002)
#define MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT (0x00000004) #define MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT (0x00000004)
#define MPI_IOCPAGE2_CAP_FLAGS_RAID_5_SUPPORT (0x00000008)
#define MPI_IOCPAGE2_CAP_FLAGS_RAID_6_SUPPORT (0x00000010)
#define MPI_IOCPAGE2_CAP_FLAGS_RAID_10_SUPPORT (0x00000020)
#define MPI_IOCPAGE2_CAP_FLAGS_RAID_50_SUPPORT (0x00000040)
#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000) #define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000)
#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000) #define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000)
#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000) #define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000)
@@ -945,7 +976,7 @@ typedef struct _CONFIG_PAGE_BIOS_1
} CONFIG_PAGE_BIOS_1, MPI_POINTER PTR_CONFIG_PAGE_BIOS_1, } CONFIG_PAGE_BIOS_1, MPI_POINTER PTR_CONFIG_PAGE_BIOS_1,
BIOSPage1_t, MPI_POINTER pBIOSPage1_t; BIOSPage1_t, MPI_POINTER pBIOSPage1_t;
#define MPI_BIOSPAGE1_PAGEVERSION (0x01) #define MPI_BIOSPAGE1_PAGEVERSION (0x02)
/* values for the BiosOptions field */ /* values for the BiosOptions field */
#define MPI_BIOSPAGE1_OPTIONS_SPI_ENABLE (0x00000400) #define MPI_BIOSPAGE1_OPTIONS_SPI_ENABLE (0x00000400)
@@ -954,6 +985,8 @@ typedef struct _CONFIG_PAGE_BIOS_1
#define MPI_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) #define MPI_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
/* values for the IOCSettings field */ /* values for the IOCSettings field */
#define MPI_BIOSPAGE1_IOCSET_MASK_PORT_ENABLE_DELAY (0x00F00000)
#define MPI_BIOSPAGE1_IOCSET_SHIFT_PORT_ENABLE_DELAY (20)
#define MPI_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000) #define MPI_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
#define MPI_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000) #define MPI_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
#define MPI_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000) #define MPI_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
@@ -1167,6 +1200,7 @@ typedef struct _CONFIG_PAGE_BIOS_2
#define MPI_BIOSPAGE2_FORM_PCI_SLOT_NUMBER (0x03) #define MPI_BIOSPAGE2_FORM_PCI_SLOT_NUMBER (0x03)
#define MPI_BIOSPAGE2_FORM_FC_WWN (0x04) #define MPI_BIOSPAGE2_FORM_FC_WWN (0x04)
#define MPI_BIOSPAGE2_FORM_SAS_WWN (0x05) #define MPI_BIOSPAGE2_FORM_SAS_WWN (0x05)
#define MPI_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
/**************************************************************************** /****************************************************************************
@@ -1957,11 +1991,11 @@ typedef struct _RAID_VOL0_STATUS
RaidVol0Status_t, MPI_POINTER pRaidVol0Status_t; RaidVol0Status_t, MPI_POINTER pRaidVol0Status_t;
/* RAID Volume Page 0 VolumeStatus defines */ /* RAID Volume Page 0 VolumeStatus defines */
#define MPI_RAIDVOL0_STATUS_FLAG_ENABLED (0x01) #define MPI_RAIDVOL0_STATUS_FLAG_ENABLED (0x01)
#define MPI_RAIDVOL0_STATUS_FLAG_QUIESCED (0x02) #define MPI_RAIDVOL0_STATUS_FLAG_QUIESCED (0x02)
#define MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x04) #define MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x04)
#define MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x08) #define MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x08)
#define MPI_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x10)
#define MPI_RAIDVOL0_STATUS_STATE_OPTIMAL (0x00) #define MPI_RAIDVOL0_STATUS_STATE_OPTIMAL (0x00)
#define MPI_RAIDVOL0_STATUS_STATE_DEGRADED (0x01) #define MPI_RAIDVOL0_STATUS_STATE_DEGRADED (0x01)
@@ -2025,7 +2059,7 @@ typedef struct _CONFIG_PAGE_RAID_VOL_0
} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0, } CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t; RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x04) #define MPI_RAIDVOLPAGE0_PAGEVERSION (0x05)
/* values for RAID Volume Page 0 InactiveStatus field */ /* values for RAID Volume Page 0 InactiveStatus field */
#define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) #define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
@@ -2104,6 +2138,8 @@ typedef struct _RAID_PHYS_DISK0_STATUS
#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01) #define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01)
#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02) #define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02)
#define MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x04) #define MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x04)
#define MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00)
#define MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x08)
#define MPI_PHYSDISK0_STATUS_ONLINE (0x00) #define MPI_PHYSDISK0_STATUS_ONLINE (0x00)
#define MPI_PHYSDISK0_STATUS_MISSING (0x01) #define MPI_PHYSDISK0_STATUS_MISSING (0x01)
@@ -2132,7 +2168,7 @@ typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_0
} CONFIG_PAGE_RAID_PHYS_DISK_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_0, } CONFIG_PAGE_RAID_PHYS_DISK_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_0,
RaidPhysDiskPage0_t, MPI_POINTER pRaidPhysDiskPage0_t; RaidPhysDiskPage0_t, MPI_POINTER pRaidPhysDiskPage0_t;
#define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION (0x01) #define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION (0x02)
typedef struct _RAID_PHYS_DISK1_PATH typedef struct _RAID_PHYS_DISK1_PATH
@@ -2263,7 +2299,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
} CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0, } CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0,
SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t; SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t;
#define MPI_SASIOUNITPAGE0_PAGEVERSION (0x02) #define MPI_SASIOUNITPAGE0_PAGEVERSION (0x03)
/* values for SAS IO Unit Page 0 PortFlags */ /* values for SAS IO Unit Page 0 PortFlags */
#define MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS (0x08) #define MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS (0x08)
@@ -2299,6 +2335,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
#define MPI_SAS_IOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200) #define MPI_SAS_IOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
#define MPI_SAS_IOUNIT0_DS_TABLE_LINK (0x00000400) #define MPI_SAS_IOUNIT0_DS_TABLE_LINK (0x00000400)
#define MPI_SAS_IOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800) #define MPI_SAS_IOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
#define MPI_SAS_IOUNIT0_DS_MAX_SATA_TARGETS (0x00001000)
typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA
@@ -2336,6 +2373,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x04) #define MPI_SASIOUNITPAGE1_PAGEVERSION (0x04)
/* values for SAS IO Unit Page 1 ControlFlags */ /* values for SAS IO Unit Page 1 ControlFlags */
#define MPI_SAS_IOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_3_0_MAX (0x4000) #define MPI_SAS_IOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) #define MPI_SAS_IOUNIT1_CONTROL_SATA_1_5_MAX (0x2000)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000) #define MPI_SAS_IOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
@@ -2345,9 +2383,8 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
#define MPI_SAS_IOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9) #define MPI_SAS_IOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
#define MPI_SAS_IOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x00) #define MPI_SAS_IOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x00)
#define MPI_SAS_IOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x01) #define MPI_SAS_IOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x01)
#define MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x10) #define MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x02)
#define MPI_SAS_IOUNIT1_CONTROL_AUTO_PORT_SAME_SAS_ADDR (0x0100)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080) #define MPI_SAS_IOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040) #define MPI_SAS_IOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
#define MPI_SAS_IOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020) #define MPI_SAS_IOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
@@ -2390,7 +2427,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
} CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2, } CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2,
SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t; SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t;
#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x03) #define MPI_SASIOUNITPAGE2_PAGEVERSION (0x04)
/* values for SAS IO Unit Page 2 Status field */ /* values for SAS IO Unit Page 2 Status field */
#define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02) #define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02)
@@ -2406,6 +2443,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
#define MPI_SAS_IOUNIT2_FLAGS_ENCLOSURE_SLOT_PHYS_MAP (0x02) #define MPI_SAS_IOUNIT2_FLAGS_ENCLOSURE_SLOT_PHYS_MAP (0x02)
#define MPI_SAS_IOUNIT2_FLAGS_RESERVE_ID_0_FOR_BOOT (0x10) #define MPI_SAS_IOUNIT2_FLAGS_RESERVE_ID_0_FOR_BOOT (0x10)
#define MPI_SAS_IOUNIT2_FLAGS_DA_STARTING_SLOT (0x20)
typedef struct _CONFIG_PAGE_SAS_IO_UNIT_3 typedef struct _CONFIG_PAGE_SAS_IO_UNIT_3
@@ -2584,11 +2622,19 @@ typedef struct _CONFIG_PAGE_SAS_DEVICE_2
{ {
CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
U64 PhysicalIdentifier; /* 08h */ U64 PhysicalIdentifier; /* 08h */
U32 Reserved1; /* 10h */ U32 EnclosureMapping; /* 10h */
} CONFIG_PAGE_SAS_DEVICE_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_2, } CONFIG_PAGE_SAS_DEVICE_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_2,
SasDevicePage2_t, MPI_POINTER pSasDevicePage2_t; SasDevicePage2_t, MPI_POINTER pSasDevicePage2_t;
#define MPI_SASDEVICE2_PAGEVERSION (0x00) #define MPI_SASDEVICE2_PAGEVERSION (0x01)
/* defines for SAS Device Page 2 EnclosureMapping field */
#define MPI_SASDEVICE2_ENC_MAP_MASK_MISSING_COUNT (0x0000000F)
#define MPI_SASDEVICE2_ENC_MAP_SHIFT_MISSING_COUNT (0)
#define MPI_SASDEVICE2_ENC_MAP_MASK_NUM_SLOTS (0x000007F0)
#define MPI_SASDEVICE2_ENC_MAP_SHIFT_NUM_SLOTS (4)
#define MPI_SASDEVICE2_ENC_MAP_MASK_START_INDEX (0x001FF800)
#define MPI_SASDEVICE2_ENC_MAP_SHIFT_START_INDEX (11)
/**************************************************************************** /****************************************************************************
@@ -2598,7 +2644,8 @@ typedef struct _CONFIG_PAGE_SAS_DEVICE_2
typedef struct _CONFIG_PAGE_SAS_PHY_0 typedef struct _CONFIG_PAGE_SAS_PHY_0
{ {
CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
U32 Reserved1; /* 08h */ U16 OwnerDevHandle; /* 08h */
U16 Reserved1; /* 0Ah */
U64 SASAddress; /* 0Ch */ U64 SASAddress; /* 0Ch */
U16 AttachedDevHandle; /* 14h */ U16 AttachedDevHandle; /* 14h */
U8 AttachedPhyIdentifier; /* 16h */ U8 AttachedPhyIdentifier; /* 16h */
@@ -2607,12 +2654,12 @@ typedef struct _CONFIG_PAGE_SAS_PHY_0
U8 ProgrammedLinkRate; /* 20h */ U8 ProgrammedLinkRate; /* 20h */
U8 HwLinkRate; /* 21h */ U8 HwLinkRate; /* 21h */
U8 ChangeCount; /* 22h */ U8 ChangeCount; /* 22h */
U8 Reserved3; /* 23h */ U8 Flags; /* 23h */
U32 PhyInfo; /* 24h */ U32 PhyInfo; /* 24h */
} CONFIG_PAGE_SAS_PHY_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_0, } CONFIG_PAGE_SAS_PHY_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_0,
SasPhyPage0_t, MPI_POINTER pSasPhyPage0_t; SasPhyPage0_t, MPI_POINTER pSasPhyPage0_t;
#define MPI_SASPHY0_PAGEVERSION (0x00) #define MPI_SASPHY0_PAGEVERSION (0x01)
/* values for SAS PHY Page 0 ProgrammedLinkRate field */ /* values for SAS PHY Page 0 ProgrammedLinkRate field */
#define MPI_SAS_PHY0_PRATE_MAX_RATE_MASK (0xF0) #define MPI_SAS_PHY0_PRATE_MAX_RATE_MASK (0xF0)
@@ -2632,6 +2679,9 @@ typedef struct _CONFIG_PAGE_SAS_PHY_0
#define MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5 (0x08) #define MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5 (0x08)
#define MPI_SAS_PHY0_HWRATE_MIN_RATE_3_0 (0x09) #define MPI_SAS_PHY0_HWRATE_MIN_RATE_3_0 (0x09)
/* values for SAS PHY Page 0 Flags field */
#define MPI_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
/* values for SAS PHY Page 0 PhyInfo field */ /* values for SAS PHY Page 0 PhyInfo field */
#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_ACTIVE (0x00004000) #define MPI_SAS_PHY0_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_SELECTOR (0x00002000) #define MPI_SAS_PHY0_PHYINFO_SATA_PORT_SELECTOR (0x00002000)
@@ -2690,7 +2740,7 @@ typedef struct _CONFIG_PAGE_SAS_ENCLOSURE_0
} CONFIG_PAGE_SAS_ENCLOSURE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_ENCLOSURE_0, } CONFIG_PAGE_SAS_ENCLOSURE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_ENCLOSURE_0,
SasEnclosurePage0_t, MPI_POINTER pSasEnclosurePage0_t; SasEnclosurePage0_t, MPI_POINTER pSasEnclosurePage0_t;
#define MPI_SASENCLOSURE0_PAGEVERSION (0x00) #define MPI_SASENCLOSURE0_PAGEVERSION (0x01)
/* values for SAS Enclosure Page 0 Flags field */ /* values for SAS Enclosure Page 0 Flags field */
#define MPI_SAS_ENCLS0_FLAGS_SEP_BUS_ID_VALID (0x0020) #define MPI_SAS_ENCLS0_FLAGS_SEP_BUS_ID_VALID (0x0020)
@@ -2702,6 +2752,7 @@ typedef struct _CONFIG_PAGE_SAS_ENCLOSURE_0
#define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) #define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
#define MPI_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) #define MPI_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
#define MPI_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) #define MPI_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
#define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
/**************************************************************************** /****************************************************************************

View File

@@ -6,17 +6,17 @@
Copyright (c) 2000-2005 LSI Logic Corporation. Copyright (c) 2000-2005 LSI Logic Corporation.
--------------------------------------- ---------------------------------------
Header Set Release Version: 01.05.09 Header Set Release Version: 01.05.10
Header Set Release Date: 03-11-05 Header Set Release Date: 03-11-05
--------------------------------------- ---------------------------------------
Filename Current version Prior version Filename Current version Prior version
---------- --------------- ------------- ---------- --------------- -------------
mpi.h 01.05.07 01.05.06 mpi.h 01.05.08 01.05.07
mpi_ioc.h 01.05.08 01.05.07 mpi_ioc.h 01.05.09 01.05.08
mpi_cnfg.h 01.05.08 01.05.07 mpi_cnfg.h 01.05.09 01.05.08
mpi_init.h 01.05.04 01.05.03 mpi_init.h 01.05.05 01.05.04
mpi_targ.h 01.05.04 01.05.03 mpi_targ.h 01.05.05 01.05.04
mpi_fc.h 01.05.01 01.05.01 mpi_fc.h 01.05.01 01.05.01
mpi_lan.h 01.05.01 01.05.01 mpi_lan.h 01.05.01 01.05.01
mpi_raid.h 01.05.02 01.05.02 mpi_raid.h 01.05.02 01.05.02
@@ -24,7 +24,7 @@
mpi_inb.h 01.05.01 01.05.01 mpi_inb.h 01.05.01 01.05.01
mpi_sas.h 01.05.01 01.05.01 mpi_sas.h 01.05.01 01.05.01
mpi_type.h 01.05.01 01.05.01 mpi_type.h 01.05.01 01.05.01
mpi_history.txt 01.05.09 01.05.08 mpi_history.txt 01.05.09 01.05.09
* Date Version Description * Date Version Description
@@ -88,6 +88,9 @@ mpi.h
* 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and * 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and
* TargetAssistExtended requests. * TargetAssistExtended requests.
* Removed EEDP IOCStatus codes. * Removed EEDP IOCStatus codes.
* 06-24-05 01.05.08 Added function codes for SCSI IO 32 and
* TargetAssistExtended requests.
* Added EEDP IOCStatus codes.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
mpi_ioc.h mpi_ioc.h
@@ -159,6 +162,8 @@ mpi_ioc.h
* Reply and IOC Init Request. * Reply and IOC Init Request.
* 03-11-05 01.05.08 Added family code for 1068E family. * 03-11-05 01.05.08 Added family code for 1068E family.
* Removed IOCFacts Reply EEDP Capability bit. * Removed IOCFacts Reply EEDP Capability bit.
* 06-24-05 01.05.09 Added 5 new IOCFacts Reply IOCCapabilities bits.
* Added Max SATA Targets to SAS Discovery Error event.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
mpi_cnfg.h mpi_cnfg.h
@@ -380,6 +385,23 @@ mpi_cnfg.h
* New physical mapping mode in SAS IO Unit Page 2. * New physical mapping mode in SAS IO Unit Page 2.
* Added CONFIG_PAGE_SAS_ENCLOSURE_0. * Added CONFIG_PAGE_SAS_ENCLOSURE_0.
* Added Slot and Enclosure fields to SAS Device Page 0. * Added Slot and Enclosure fields to SAS Device Page 0.
* 06-24-05 01.05.09 Added EEDP defines to IOC Page 1.
* Added more RAID type defines to IOC Page 2.
* Added Port Enable Delay settings to BIOS Page 1.
* Added Bad Block Table Full define to RAID Volume Page 0.
* Added Previous State defines to RAID Physical Disk
* Page 0.
* Added Max Sata Targets define for DiscoveryStatus field
* of SAS IO Unit Page 0.
* Added Device Self Test to Control Flags of SAS IO Unit
* Page 1.
* Added Direct Attach Starting Slot Number define for SAS
* IO Unit Page 2.
* Added new fields in SAS Device Page 2 for enclosure
* mapping.
* Added OwnerDevHandle and Flags field to SAS PHY Page 0.
* Added IOC GPIO Flags define to SAS Enclosure Page 0.
* Fixed the value for MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
mpi_init.h mpi_init.h
@@ -418,6 +440,8 @@ mpi_init.h
* Modified SCSI Enclosure Processor Request and Reply to * Modified SCSI Enclosure Processor Request and Reply to
* support Enclosure/Slot addressing rather than WWID * support Enclosure/Slot addressing rather than WWID
* addressing. * addressing.
* 06-24-05 01.05.05 Added SCSI IO 32 structures and defines.
* Added four new defines for SEP SlotStatus.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
mpi_targ.h mpi_targ.h
@@ -461,6 +485,7 @@ mpi_targ.h
* 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added. * 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added.
* 02-22-05 01.05.03 Changed a comment. * 02-22-05 01.05.03 Changed a comment.
* 03-11-05 01.05.04 Removed TargetAssistExtended Request. * 03-11-05 01.05.04 Removed TargetAssistExtended Request.
* 06-24-05 01.05.05 Added TargetAssistExtended structures and defines.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
mpi_fc.h mpi_fc.h
@@ -571,20 +596,20 @@ mpi_type.h
mpi_history.txt Parts list history mpi_history.txt Parts list history
Filename 01.05.09 Filename 01.05.10 01.05.09
---------- -------- ---------- -------- --------
mpi.h 01.05.07 mpi.h 01.05.08 01.05.07
mpi_ioc.h 01.05.08 mpi_ioc.h 01.05.09 01.05.08
mpi_cnfg.h 01.05.08 mpi_cnfg.h 01.05.09 01.05.08
mpi_init.h 01.05.04 mpi_init.h 01.05.05 01.05.04
mpi_targ.h 01.05.04 mpi_targ.h 01.05.05 01.05.04
mpi_fc.h 01.05.01 mpi_fc.h 01.05.01 01.05.01
mpi_lan.h 01.05.01 mpi_lan.h 01.05.01 01.05.01
mpi_raid.h 01.05.02 mpi_raid.h 01.05.02 01.05.02
mpi_tool.h 01.05.03 mpi_tool.h 01.05.03 01.05.03
mpi_inb.h 01.05.01 mpi_inb.h 01.05.01 01.05.01
mpi_sas.h 01.05.01 mpi_sas.h 01.05.01 01.05.01
mpi_type.h 01.05.01 mpi_type.h 01.05.01 01.05.01
Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03 Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03
---------- -------- -------- -------- -------- -------- -------- ---------- -------- -------- -------- -------- -------- --------

View File

@@ -6,7 +6,7 @@
* Title: MPI initiator mode messages and structures * Title: MPI initiator mode messages and structures
* Creation Date: June 8, 2000 * Creation Date: June 8, 2000
* *
* mpi_init.h Version: 01.05.04 * mpi_init.h Version: 01.05.05
* *
* Version History * Version History
* --------------- * ---------------
@@ -48,6 +48,8 @@
* Modified SCSI Enclosure Processor Request and Reply to * Modified SCSI Enclosure Processor Request and Reply to
* support Enclosure/Slot addressing rather than WWID * support Enclosure/Slot addressing rather than WWID
* addressing. * addressing.
* 06-24-05 01.05.05 Added SCSI IO 32 structures and defines.
* Added four new defines for SEP SlotStatus.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
*/ */
@@ -202,6 +204,197 @@ typedef struct _MSG_SCSI_IO_REPLY
#define MPI_SCSI_TASKTAG_UNKNOWN (0xFFFF) #define MPI_SCSI_TASKTAG_UNKNOWN (0xFFFF)
/****************************************************************************/
/* SCSI IO 32 messages and associated structures */
/****************************************************************************/
typedef struct
{
U8 CDB[20]; /* 00h */
U32 PrimaryReferenceTag; /* 14h */
U16 PrimaryApplicationTag; /* 18h */
U16 PrimaryApplicationTagMask; /* 1Ah */
U32 TransferLength; /* 1Ch */
} MPI_SCSI_IO32_CDB_EEDP32, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_EEDP32,
MpiScsiIo32CdbEedp32_t, MPI_POINTER pMpiScsiIo32CdbEedp32_t;
typedef struct
{
U8 CDB[16]; /* 00h */
U32 DataLength; /* 10h */
U32 PrimaryReferenceTag; /* 14h */
U16 PrimaryApplicationTag; /* 18h */
U16 PrimaryApplicationTagMask; /* 1Ah */
U32 TransferLength; /* 1Ch */
} MPI_SCSI_IO32_CDB_EEDP16, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_EEDP16,
MpiScsiIo32CdbEedp16_t, MPI_POINTER pMpiScsiIo32CdbEedp16_t;
typedef union
{
U8 CDB32[32];
MPI_SCSI_IO32_CDB_EEDP32 EEDP32;
MPI_SCSI_IO32_CDB_EEDP16 EEDP16;
SGE_SIMPLE_UNION SGE;
} MPI_SCSI_IO32_CDB_UNION, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_UNION,
MpiScsiIo32Cdb_t, MPI_POINTER pMpiScsiIo32Cdb_t;
typedef struct
{
U8 TargetID; /* 00h */
U8 Bus; /* 01h */
U16 Reserved1; /* 02h */
U32 Reserved2; /* 04h */
} MPI_SCSI_IO32_BUS_TARGET_ID_FORM, MPI_POINTER PTR_MPI_SCSI_IO32_BUS_TARGET_ID_FORM,
MpiScsiIo32BusTargetIdForm_t, MPI_POINTER pMpiScsiIo32BusTargetIdForm_t;
typedef union
{
MPI_SCSI_IO32_BUS_TARGET_ID_FORM SCSIID;
U64 WWID;
} MPI_SCSI_IO32_ADDRESS, MPI_POINTER PTR_MPI_SCSI_IO32_ADDRESS,
MpiScsiIo32Address_t, MPI_POINTER pMpiScsiIo32Address_t;
typedef struct _MSG_SCSI_IO32_REQUEST
{
U8 Port; /* 00h */
U8 Reserved1; /* 01h */
U8 ChainOffset; /* 02h */
U8 Function; /* 03h */
U8 CDBLength; /* 04h */
U8 SenseBufferLength; /* 05h */
U8 Flags; /* 06h */
U8 MsgFlags; /* 07h */
U32 MsgContext; /* 08h */
U8 LUN[8]; /* 0Ch */
U32 Control; /* 14h */
MPI_SCSI_IO32_CDB_UNION CDB; /* 18h */
U32 DataLength; /* 38h */
U32 BidirectionalDataLength; /* 3Ch */
U32 SecondaryReferenceTag; /* 40h */
U16 SecondaryApplicationTag; /* 44h */
U16 Reserved2; /* 46h */
U16 EEDPFlags; /* 48h */
U16 ApplicationTagTranslationMask; /* 4Ah */
U32 EEDPBlockSize; /* 4Ch */
MPI_SCSI_IO32_ADDRESS DeviceAddress; /* 50h */
U8 SGLOffset0; /* 58h */
U8 SGLOffset1; /* 59h */
U8 SGLOffset2; /* 5Ah */
U8 SGLOffset3; /* 5Bh */
U32 Reserved3; /* 5Ch */
U32 Reserved4; /* 60h */
U32 SenseBufferLowAddr; /* 64h */
SGE_IO_UNION SGL; /* 68h */
} MSG_SCSI_IO32_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO32_REQUEST,
SCSIIO32Request_t, MPI_POINTER pSCSIIO32Request_t;
/* SCSI IO 32 MsgFlags bits */
#define MPI_SCSIIO32_MSGFLGS_SENSE_WIDTH (0x01)
#define MPI_SCSIIO32_MSGFLGS_SENSE_WIDTH_32 (0x00)
#define MPI_SCSIIO32_MSGFLGS_SENSE_WIDTH_64 (0x01)
#define MPI_SCSIIO32_MSGFLGS_SENSE_LOCATION (0x02)
#define MPI_SCSIIO32_MSGFLGS_SENSE_LOC_HOST (0x00)
#define MPI_SCSIIO32_MSGFLGS_SENSE_LOC_IOC (0x02)
#define MPI_SCSIIO32_MSGFLGS_CMD_DETERMINES_DATA_DIR (0x04)
#define MPI_SCSIIO32_MSGFLGS_SGL_OFFSETS_CHAINS (0x08)
#define MPI_SCSIIO32_MSGFLGS_MULTICAST (0x10)
#define MPI_SCSIIO32_MSGFLGS_BIDIRECTIONAL (0x20)
#define MPI_SCSIIO32_MSGFLGS_LARGE_CDB (0x40)
/* SCSI IO 32 Flags bits */
#define MPI_SCSIIO32_FLAGS_FORM_MASK (0x03)
#define MPI_SCSIIO32_FLAGS_FORM_SCSIID (0x00)
#define MPI_SCSIIO32_FLAGS_FORM_WWID (0x01)
/* SCSI IO 32 LUN fields */
#define MPI_SCSIIO32_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
#define MPI_SCSIIO32_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
#define MPI_SCSIIO32_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
#define MPI_SCSIIO32_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
#define MPI_SCSIIO32_LUN_LEVEL_1_WORD (0xFF00)
#define MPI_SCSIIO32_LUN_LEVEL_1_DWORD (0x0000FF00)
/* SCSI IO 32 Control bits */
#define MPI_SCSIIO32_CONTROL_DATADIRECTION_MASK (0x03000000)
#define MPI_SCSIIO32_CONTROL_NODATATRANSFER (0x00000000)
#define MPI_SCSIIO32_CONTROL_WRITE (0x01000000)
#define MPI_SCSIIO32_CONTROL_READ (0x02000000)
#define MPI_SCSIIO32_CONTROL_BIDIRECTIONAL (0x03000000)
#define MPI_SCSIIO32_CONTROL_ADDCDBLEN_MASK (0xFC000000)
#define MPI_SCSIIO32_CONTROL_ADDCDBLEN_SHIFT (26)
#define MPI_SCSIIO32_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
#define MPI_SCSIIO32_CONTROL_SIMPLEQ (0x00000000)
#define MPI_SCSIIO32_CONTROL_HEADOFQ (0x00000100)
#define MPI_SCSIIO32_CONTROL_ORDEREDQ (0x00000200)
#define MPI_SCSIIO32_CONTROL_ACAQ (0x00000400)
#define MPI_SCSIIO32_CONTROL_UNTAGGED (0x00000500)
#define MPI_SCSIIO32_CONTROL_NO_DISCONNECT (0x00000700)
#define MPI_SCSIIO32_CONTROL_TASKMANAGE_MASK (0x00FF0000)
#define MPI_SCSIIO32_CONTROL_OBSOLETE (0x00800000)
#define MPI_SCSIIO32_CONTROL_CLEAR_ACA_RSV (0x00400000)
#define MPI_SCSIIO32_CONTROL_TARGET_RESET (0x00200000)
#define MPI_SCSIIO32_CONTROL_LUN_RESET_RSV (0x00100000)
#define MPI_SCSIIO32_CONTROL_RESERVED (0x00080000)
#define MPI_SCSIIO32_CONTROL_CLR_TASK_SET_RSV (0x00040000)
#define MPI_SCSIIO32_CONTROL_ABORT_TASK_SET (0x00020000)
#define MPI_SCSIIO32_CONTROL_RESERVED2 (0x00010000)
/* SCSI IO 32 EEDPFlags */
#define MPI_SCSIIO32_EEDPFLAGS_MASK_OP (0x0007)
#define MPI_SCSIIO32_EEDPFLAGS_NOOP_OP (0x0000)
#define MPI_SCSIIO32_EEDPFLAGS_CHK_OP (0x0001)
#define MPI_SCSIIO32_EEDPFLAGS_STRIP_OP (0x0002)
#define MPI_SCSIIO32_EEDPFLAGS_CHKRM_OP (0x0003)
#define MPI_SCSIIO32_EEDPFLAGS_INSERT_OP (0x0004)
#define MPI_SCSIIO32_EEDPFLAGS_REPLACE_OP (0x0006)
#define MPI_SCSIIO32_EEDPFLAGS_CHKREGEN_OP (0x0007)
#define MPI_SCSIIO32_EEDPFLAGS_PASS_REF_TAG (0x0008)
#define MPI_SCSIIO32_EEDPFLAGS_8_9THS_MODE (0x0010)
#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_MASK (0x0700)
#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_GUARD (0x0100)
#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_REFTAG (0x0200)
#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_LBATAG (0x0400)
#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_SHIFT (8)
#define MPI_SCSIIO32_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
#define MPI_SCSIIO32_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
#define MPI_SCSIIO32_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
#define MPI_SCSIIO32_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
/* SCSIIO32 IO reply structure */
typedef struct _MSG_SCSIIO32_IO_REPLY
{
U8 Port; /* 00h */
U8 Reserved1; /* 01h */
U8 MsgLength; /* 02h */
U8 Function; /* 03h */
U8 CDBLength; /* 04h */
U8 SenseBufferLength; /* 05h */
U8 Flags; /* 06h */
U8 MsgFlags; /* 07h */
U32 MsgContext; /* 08h */
U8 SCSIStatus; /* 0Ch */
U8 SCSIState; /* 0Dh */
U16 IOCStatus; /* 0Eh */
U32 IOCLogInfo; /* 10h */
U32 TransferCount; /* 14h */
U32 SenseCount; /* 18h */
U32 ResponseInfo; /* 1Ch */
U16 TaskTag; /* 20h */
U16 Reserved2; /* 22h */
U32 BidirectionalTransferCount; /* 24h */
} MSG_SCSIIO32_IO_REPLY, MPI_POINTER PTR_MSG_SCSIIO32_IO_REPLY,
SCSIIO32Reply_t, MPI_POINTER pSCSIIO32Reply_t;
/****************************************************************************/ /****************************************************************************/
/* SCSI Task Management messages */ /* SCSI Task Management messages */
/****************************************************************************/ /****************************************************************************/
@@ -310,10 +503,14 @@ typedef struct _MSG_SEP_REQUEST
#define MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) #define MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
#define MPI_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) #define MPI_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
#define MPI_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) #define MPI_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
#define MPI_SEP_REQ_SLOTSTATUS_REQ_CONSISTENCY_CHECK (0x00001000)
#define MPI_SEP_REQ_SLOTSTATUS_DISABLE (0x00002000)
#define MPI_SEP_REQ_SLOTSTATUS_REQ_RESERVED_DEVICE (0x00004000)
#define MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) #define MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) #define MPI_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_INSERT (0x00080000) #define MPI_SEP_REQ_SLOTSTATUS_REQUEST_INSERT (0x00080000)
#define MPI_SEP_REQ_SLOTSTATUS_DO_NOT_MOVE (0x00400000) #define MPI_SEP_REQ_SLOTSTATUS_DO_NOT_MOVE (0x00400000)
#define MPI_SEP_REQ_SLOTSTATUS_ACTIVE (0x00800000)
#define MPI_SEP_REQ_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000) #define MPI_SEP_REQ_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000)
#define MPI_SEP_REQ_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000) #define MPI_SEP_REQ_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000)
#define MPI_SEP_REQ_SLOTSTATUS_DEV_OFF (0x10000000) #define MPI_SEP_REQ_SLOTSTATUS_DEV_OFF (0x10000000)
@@ -352,11 +549,15 @@ typedef struct _MSG_SEP_REPLY
#define MPI_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) #define MPI_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
#define MPI_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) #define MPI_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
#define MPI_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) #define MPI_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
#define MPI_SEP_REPLY_SLOTSTATUS_CONSISTENCY_CHECK (0x00001000)
#define MPI_SEP_REPLY_SLOTSTATUS_DISABLE (0x00002000)
#define MPI_SEP_REPLY_SLOTSTATUS_RESERVED_DEVICE (0x00004000)
#define MPI_SEP_REPLY_SLOTSTATUS_REPORT (0x00010000) #define MPI_SEP_REPLY_SLOTSTATUS_REPORT (0x00010000)
#define MPI_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) #define MPI_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
#define MPI_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) #define MPI_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
#define MPI_SEP_REPLY_SLOTSTATUS_INSERT_READY (0x00080000) #define MPI_SEP_REPLY_SLOTSTATUS_INSERT_READY (0x00080000)
#define MPI_SEP_REPLY_SLOTSTATUS_DO_NOT_REMOVE (0x00400000) #define MPI_SEP_REPLY_SLOTSTATUS_DO_NOT_REMOVE (0x00400000)
#define MPI_SEP_REPLY_SLOTSTATUS_ACTIVE (0x00800000)
#define MPI_SEP_REPLY_SLOTSTATUS_B_BYPASS_ENABLED (0x01000000) #define MPI_SEP_REPLY_SLOTSTATUS_B_BYPASS_ENABLED (0x01000000)
#define MPI_SEP_REPLY_SLOTSTATUS_A_BYPASS_ENABLED (0x02000000) #define MPI_SEP_REPLY_SLOTSTATUS_A_BYPASS_ENABLED (0x02000000)
#define MPI_SEP_REPLY_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000) #define MPI_SEP_REPLY_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000)

View File

@@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: August 11, 2000 * Creation Date: August 11, 2000
* *
* mpi_ioc.h Version: 01.05.08 * mpi_ioc.h Version: 01.05.09
* *
* Version History * Version History
* --------------- * ---------------
@@ -81,6 +81,8 @@
* Reply and IOC Init Request. * Reply and IOC Init Request.
* 03-11-05 01.05.08 Added family code for 1068E family. * 03-11-05 01.05.08 Added family code for 1068E family.
* Removed IOCFacts Reply EEDP Capability bit. * Removed IOCFacts Reply EEDP Capability bit.
* 06-24-05 01.05.09 Added 5 new IOCFacts Reply IOCCapabilities bits.
* Added Max SATA Targets to SAS Discovery Error event.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
*/ */
@@ -261,7 +263,11 @@ typedef struct _MSG_IOC_FACTS_REPLY
#define MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) #define MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
#define MPI_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) #define MPI_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
#define MPI_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020) #define MPI_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
#define MPI_IOCFACTS_CAPABILITY_EEDP (0x00000040)
#define MPI_IOCFACTS_CAPABILITY_BIDIRECTIONAL (0x00000080)
#define MPI_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
#define MPI_IOCFACTS_CAPABILITY_SCSIIO32 (0x00000200)
#define MPI_IOCFACTS_CAPABILITY_NO_SCSIIO16 (0x00000400)
/***************************************************************************** /*****************************************************************************
@@ -677,6 +683,7 @@ typedef struct _EVENT_DATA_DISCOVERY_ERROR
#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_SUBTRACTIVE (0x00000200) #define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_SUBTRACTIVE (0x00000200)
#define MPI_EVENT_DSCVRY_ERR_DS_TABLE_TO_TABLE (0x00000400) #define MPI_EVENT_DSCVRY_ERR_DS_TABLE_TO_TABLE (0x00000400)
#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_PATHS (0x00000800) #define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_PATHS (0x00000800)
#define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS (0x00001000)
/***************************************************************************** /*****************************************************************************

View File

@@ -6,7 +6,7 @@
* Title: MPI Target mode messages and structures * Title: MPI Target mode messages and structures
* Creation Date: June 22, 2000 * Creation Date: June 22, 2000
* *
* mpi_targ.h Version: 01.05.04 * mpi_targ.h Version: 01.05.05
* *
* Version History * Version History
* --------------- * ---------------
@@ -53,6 +53,7 @@
* 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added. * 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added.
* 02-22-05 01.05.03 Changed a comment. * 02-22-05 01.05.03 Changed a comment.
* 03-11-05 01.05.04 Removed TargetAssistExtended Request. * 03-11-05 01.05.04 Removed TargetAssistExtended Request.
* 06-24-05 01.05.05 Added TargetAssistExtended structures and defines.
* -------------------------------------------------------------------------- * --------------------------------------------------------------------------
*/ */
@@ -370,6 +371,77 @@ typedef struct _MSG_TARGET_ERROR_REPLY
TargetErrorReply_t, MPI_POINTER pTargetErrorReply_t; TargetErrorReply_t, MPI_POINTER pTargetErrorReply_t;
/****************************************************************************/
/* Target Assist Extended Request */
/****************************************************************************/
typedef struct _MSG_TARGET_ASSIST_EXT_REQUEST
{
U8 StatusCode; /* 00h */
U8 TargetAssistFlags; /* 01h */
U8 ChainOffset; /* 02h */
U8 Function; /* 03h */
U16 QueueTag; /* 04h */
U8 Reserved1; /* 06h */
U8 MsgFlags; /* 07h */
U32 MsgContext; /* 08h */
U32 ReplyWord; /* 0Ch */
U8 LUN[8]; /* 10h */
U32 RelativeOffset; /* 18h */
U32 Reserved2; /* 1Ch */
U32 Reserved3; /* 20h */
U32 PrimaryReferenceTag; /* 24h */
U16 PrimaryApplicationTag; /* 28h */
U16 PrimaryApplicationTagMask; /* 2Ah */
U32 Reserved4; /* 2Ch */
U32 DataLength; /* 30h */
U32 BidirectionalDataLength; /* 34h */
U32 SecondaryReferenceTag; /* 38h */
U16 SecondaryApplicationTag; /* 3Ch */
U16 Reserved5; /* 3Eh */
U16 EEDPFlags; /* 40h */
U16 ApplicationTagTranslationMask; /* 42h */
U32 EEDPBlockSize; /* 44h */
U8 SGLOffset0; /* 48h */
U8 SGLOffset1; /* 49h */
U8 SGLOffset2; /* 4Ah */
U8 SGLOffset3; /* 4Bh */
U32 Reserved6; /* 4Ch */
SGE_IO_UNION SGL[1]; /* 50h */
} MSG_TARGET_ASSIST_EXT_REQUEST, MPI_POINTER PTR_MSG_TARGET_ASSIST_EXT_REQUEST,
TargetAssistExtRequest_t, MPI_POINTER pTargetAssistExtRequest_t;
/* see the defines after MSG_TARGET_ASSIST_REQUEST for TargetAssistFlags */
/* defines for the MsgFlags field */
#define TARGET_ASSIST_EXT_MSGFLAGS_BIDIRECTIONAL (0x20)
#define TARGET_ASSIST_EXT_MSGFLAGS_MULTICAST (0x10)
#define TARGET_ASSIST_EXT_MSGFLAGS_SGL_OFFSET_CHAINS (0x08)
/* defines for the EEDPFlags field */
#define TARGET_ASSIST_EXT_EEDP_MASK_OP (0x0007)
#define TARGET_ASSIST_EXT_EEDP_NOOP_OP (0x0000)
#define TARGET_ASSIST_EXT_EEDP_CHK_OP (0x0001)
#define TARGET_ASSIST_EXT_EEDP_STRIP_OP (0x0002)
#define TARGET_ASSIST_EXT_EEDP_CHKRM_OP (0x0003)
#define TARGET_ASSIST_EXT_EEDP_INSERT_OP (0x0004)
#define TARGET_ASSIST_EXT_EEDP_REPLACE_OP (0x0006)
#define TARGET_ASSIST_EXT_EEDP_CHKREGEN_OP (0x0007)
#define TARGET_ASSIST_EXT_EEDP_PASS_REF_TAG (0x0008)
#define TARGET_ASSIST_EXT_EEDP_T10_CHK_MASK (0x0700)
#define TARGET_ASSIST_EXT_EEDP_T10_CHK_GUARD (0x0100)
#define TARGET_ASSIST_EXT_EEDP_T10_CHK_APPTAG (0x0200)
#define TARGET_ASSIST_EXT_EEDP_T10_CHK_REFTAG (0x0400)
#define TARGET_ASSIST_EXT_EEDP_T10_CHK_SHIFT (8)
#define TARGET_ASSIST_EXT_EEDP_INC_SEC_APPTAG (0x1000)
#define TARGET_ASSIST_EXT_EEDP_INC_PRI_APPTAG (0x2000)
#define TARGET_ASSIST_EXT_EEDP_INC_SEC_REFTAG (0x4000)
#define TARGET_ASSIST_EXT_EEDP_INC_PRI_REFTAG (0x8000)
/****************************************************************************/ /****************************************************************************/
/* Target Status Send Request */ /* Target Status Send Request */
/****************************************************************************/ /****************************************************************************/

View File

@@ -218,8 +218,7 @@ pci_enable_io_access(struct pci_dev *pdev)
* (also referred to as a IO Controller or IOC). * (also referred to as a IO Controller or IOC).
* This routine must clear the interrupt from the adapter and does * This routine must clear the interrupt from the adapter and does
* so by reading the reply FIFO. Multiple replies may be processed * so by reading the reply FIFO. Multiple replies may be processed
* per single call to this routine; up to MPT_MAX_REPLIES_PER_ISR * per single call to this routine.
* which is currently set to 32 in mptbase.h.
* *
* This routine handles register-level access of the adapter but * This routine handles register-level access of the adapter but
* dispatches (calls) a protocol-specific callback routine to handle * dispatches (calls) a protocol-specific callback routine to handle
@@ -279,11 +278,11 @@ mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx; cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
mf = MPT_INDEX_2_MFPTR(ioc, req_idx); mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x\n", dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
ioc->name, mr, req_idx)); ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
DBG_DUMP_REPLY_FRAME(mr) DBG_DUMP_REPLY_FRAME(mr)
/* Check/log IOC log info /* Check/log IOC log info
*/ */
ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus); ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
@@ -345,7 +344,7 @@ mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth)) if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))
|| (mf < ioc->req_frames)) ) { || (mf < ioc->req_frames)) ) {
printk(MYIOC_s_WARN_FMT printk(MYIOC_s_WARN_FMT
"mpt_interrupt: Invalid mf (%p) req_idx (%d)!\n", ioc->name, (void *)mf, req_idx); "mpt_interrupt: Invalid mf (%p)!\n", ioc->name, (void *)mf);
cb_idx = 0; cb_idx = 0;
pa = 0; pa = 0;
freeme = 0; freeme = 0;
@@ -399,7 +398,7 @@ mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
* @mf: Pointer to original MPT request frame * @mf: Pointer to original MPT request frame
* @reply: Pointer to MPT reply frame (NULL if TurboReply) * @reply: Pointer to MPT reply frame (NULL if TurboReply)
* *
* Returns 1 indicating original alloc'd request frame ptr * Returns 1 indicating original alloc'd request frame ptr
* should be freed, or 0 if it shouldn't. * should be freed, or 0 if it shouldn't.
*/ */
static int static int
@@ -408,28 +407,17 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
int freereq = 1; int freereq = 1;
u8 func; u8 func;
dprintk((MYIOC_s_INFO_FMT "mpt_base_reply() called\n", ioc->name)); dmfprintk((MYIOC_s_INFO_FMT "mpt_base_reply() called\n", ioc->name));
if ((mf == NULL) ||
(mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) {
printk(MYIOC_s_ERR_FMT "NULL or BAD request frame ptr! (=%p)\n",
ioc->name, (void *)mf);
return 1;
}
if (reply == NULL) {
dprintk((MYIOC_s_ERR_FMT "Unexpected NULL Event (turbo?) reply!\n",
ioc->name));
return 1;
}
#if defined(MPT_DEBUG_MSG_FRAME)
if (!(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) { if (!(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) {
dmfprintk((KERN_INFO MYNAM ": Original request frame (@%p) header\n", mf)); dmfprintk((KERN_INFO MYNAM ": Original request frame (@%p) header\n", mf));
DBG_DUMP_REQUEST_FRAME_HDR(mf) DBG_DUMP_REQUEST_FRAME_HDR(mf)
} }
#endif
func = reply->u.hdr.Function; func = reply->u.hdr.Function;
dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, Function=%02Xh\n", dmfprintk((MYIOC_s_INFO_FMT "mpt_base_reply, Function=%02Xh\n",
ioc->name, func)); ioc->name, func));
if (func == MPI_FUNCTION_EVENT_NOTIFICATION) { if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
@@ -448,8 +436,14 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
* Hmmm... It seems that EventNotificationReply is an exception * Hmmm... It seems that EventNotificationReply is an exception
* to the rule of one reply per request. * to the rule of one reply per request.
*/ */
if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
freereq = 0; freereq = 0;
devtprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p does not return Request frame\n",
ioc->name, pEvReply));
} else {
devtprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n",
ioc->name, pEvReply));
}
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
// LogEvent(ioc, pEvReply); // LogEvent(ioc, pEvReply);
@@ -491,10 +485,21 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
pCfg->status = status; pCfg->status = status;
if (status == MPI_IOCSTATUS_SUCCESS) { if (status == MPI_IOCSTATUS_SUCCESS) {
pCfg->hdr->PageVersion = pReply->Header.PageVersion; if ((pReply->Header.PageType &
pCfg->hdr->PageLength = pReply->Header.PageLength; MPI_CONFIG_PAGETYPE_MASK) ==
pCfg->hdr->PageNumber = pReply->Header.PageNumber; MPI_CONFIG_PAGETYPE_EXTENDED) {
pCfg->hdr->PageType = pReply->Header.PageType; pCfg->cfghdr.ehdr->ExtPageLength =
le16_to_cpu(pReply->ExtPageLength);
pCfg->cfghdr.ehdr->ExtPageType =
pReply->ExtPageType;
}
pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
/* If this is a regular header, save PageLength. */
/* LMP Do this better so not using a reserved field! */
pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
} }
} }
@@ -705,7 +710,7 @@ mpt_device_driver_deregister(int cb_idx)
if (dd_cbfunc->remove) if (dd_cbfunc->remove)
dd_cbfunc->remove(ioc->pcidev); dd_cbfunc->remove(ioc->pcidev);
} }
MptDeviceDriverHandlers[cb_idx] = NULL; MptDeviceDriverHandlers[cb_idx] = NULL;
} }
@@ -818,7 +823,7 @@ mpt_put_msg_frame(int handle, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
} }
#endif #endif
mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx]; mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
dsgprintk((MYIOC_s_INFO_FMT "mf_dma_addr=%x req_idx=%d RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx, ioc->RequestNB[req_idx])); dsgprintk((MYIOC_s_INFO_FMT "mf_dma_addr=%x req_idx=%d RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx, ioc->RequestNB[req_idx]));
CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr); CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
} }
@@ -920,7 +925,7 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
/* Make sure there are no doorbells */ /* Make sure there are no doorbells */
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
CHIPREG_WRITE32(&ioc->chip->Doorbell, CHIPREG_WRITE32(&ioc->chip->Doorbell,
((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) | ((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT))); ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
@@ -935,14 +940,14 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
return -5; return -5;
dhsprintk((KERN_INFO MYNAM ": %s: mpt_send_handshake_request start, WaitCnt=%d\n", dhsprintk((KERN_INFO MYNAM ": %s: mpt_send_handshake_request start, WaitCnt=%d\n",
ioc->name, ii)); ioc->name, ii));
CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) { if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
return -2; return -2;
} }
/* Send request via doorbell handshake */ /* Send request via doorbell handshake */
req_as_bytes = (u8 *) req; req_as_bytes = (u8 *) req;
for (ii = 0; ii < reqBytes/4; ii++) { for (ii = 0; ii < reqBytes/4; ii++) {
@@ -988,9 +993,9 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
if (ioc->id == iocid) { if (ioc->id == iocid) {
*iocpp =ioc; *iocpp =ioc;
return iocid; return iocid;
} }
} }
*iocpp = NULL; *iocpp = NULL;
return -1; return -1;
} }
@@ -1032,9 +1037,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_enable_device(pdev)) if (pci_enable_device(pdev))
return r; return r;
dinitprintk((KERN_WARNING MYNAM ": mpt_adapter_install\n")); dinitprintk((KERN_WARNING MYNAM ": mpt_adapter_install\n"));
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
dprintk((KERN_INFO MYNAM dprintk((KERN_INFO MYNAM
": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n")); ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n"));
@@ -1059,7 +1064,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->alloc_total = sizeof(MPT_ADAPTER); ioc->alloc_total = sizeof(MPT_ADAPTER);
ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
ioc->reply_sz = MPT_REPLY_FRAME_SIZE; ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
ioc->pcidev = pdev; ioc->pcidev = pdev;
ioc->diagPending = 0; ioc->diagPending = 0;
spin_lock_init(&ioc->diagLock); spin_lock_init(&ioc->diagLock);
@@ -1088,7 +1093,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* Find lookup slot. */ /* Find lookup slot. */
INIT_LIST_HEAD(&ioc->list); INIT_LIST_HEAD(&ioc->list);
ioc->id = mpt_ids++; ioc->id = mpt_ids++;
mem_phys = msize = 0; mem_phys = msize = 0;
port = psize = 0; port = psize = 0;
for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) { for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) {
@@ -1143,7 +1148,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->prod_name = "LSIFC909"; ioc->prod_name = "LSIFC909";
ioc->bus_type = FC; ioc->bus_type = FC;
} }
if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) { else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) {
ioc->prod_name = "LSIFC929"; ioc->prod_name = "LSIFC929";
ioc->bus_type = FC; ioc->bus_type = FC;
} }
@@ -1322,7 +1327,7 @@ mpt_detach(struct pci_dev *pdev)
remove_proc_entry(pname, NULL); remove_proc_entry(pname, NULL);
sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name); sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
remove_proc_entry(pname, NULL); remove_proc_entry(pname, NULL);
/* call per device driver remove entry point */ /* call per device driver remove entry point */
for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) { for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
if(MptDeviceDriverHandlers[ii] && if(MptDeviceDriverHandlers[ii] &&
@@ -1330,7 +1335,7 @@ mpt_detach(struct pci_dev *pdev)
MptDeviceDriverHandlers[ii]->remove(pdev); MptDeviceDriverHandlers[ii]->remove(pdev);
} }
} }
/* Disable interrupts! */ /* Disable interrupts! */
CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
@@ -1403,7 +1408,7 @@ mpt_resume(struct pci_dev *pdev)
u32 device_state = pdev->current_state; u32 device_state = pdev->current_state;
int recovery_state; int recovery_state;
int ii; int ii;
printk(MYIOC_s_INFO_FMT printk(MYIOC_s_INFO_FMT
"pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n", "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n",
ioc->name, pdev, pci_name(pdev), device_state); ioc->name, pdev, pci_name(pdev), device_state);
@@ -1534,7 +1539,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0) if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
break; break;
} }
if (ii == 5) { if (ii == 5) {
dinitprintk((MYIOC_s_INFO_FMT "Retry IocFacts failed rc=%x\n", ioc->name, rc)); dinitprintk((MYIOC_s_INFO_FMT "Retry IocFacts failed rc=%x\n", ioc->name, rc));
@@ -1542,7 +1547,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
} else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
MptDisplayIocCapabilities(ioc); MptDisplayIocCapabilities(ioc);
} }
if (alt_ioc_ready) { if (alt_ioc_ready) {
if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) { if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
dinitprintk((MYIOC_s_INFO_FMT "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc)); dinitprintk((MYIOC_s_INFO_FMT "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc));
@@ -1613,7 +1618,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
if (reset_alt_ioc_active && ioc->alt_ioc) { if (reset_alt_ioc_active && ioc->alt_ioc) {
/* (re)Enable alt-IOC! (reply interrupt) */ /* (re)Enable alt-IOC! (reply interrupt) */
dprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n", dinitprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n",
ioc->alt_ioc->name)); ioc->alt_ioc->name));
CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM)); CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM));
ioc->alt_ioc->active = 1; ioc->alt_ioc->active = 1;
@@ -1670,7 +1675,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
/* Find IM volumes /* Find IM volumes
*/ */
if (ioc->facts.MsgVersion >= 0x0102) if (ioc->facts.MsgVersion >= MPI_VERSION_01_02)
mpt_findImVolumes(ioc); mpt_findImVolumes(ioc);
/* Check, and possibly reset, the coalescing value /* Check, and possibly reset, the coalescing value
@@ -1700,7 +1705,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
} }
if (alt_ioc_ready && MptResetHandlers[ii]) { if (alt_ioc_ready && MptResetHandlers[ii]) {
dprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n", drsprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n",
ioc->name, ioc->alt_ioc->name, ii)); ioc->name, ioc->alt_ioc->name, ii));
rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET); rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET);
handlers++; handlers++;
@@ -1733,8 +1738,8 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
dprintk((MYIOC_s_INFO_FMT "PCI device %s devfn=%x/%x," dprintk((MYIOC_s_INFO_FMT "PCI device %s devfn=%x/%x,"
" searching for devfn match on %x or %x\n", " searching for devfn match on %x or %x\n",
ioc->name, pci_name(pdev), pdev->devfn, ioc->name, pci_name(pdev), pdev->bus->number,
func-1, func+1)); pdev->devfn, func-1, func+1));
peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1)); peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1));
if (!peer) { if (!peer) {
@@ -1861,36 +1866,39 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
static void static void
mpt_adapter_dispose(MPT_ADAPTER *ioc) mpt_adapter_dispose(MPT_ADAPTER *ioc)
{ {
if (ioc != NULL) { int sz_first, sz_last;
int sz_first, sz_last;
sz_first = ioc->alloc_total; if (ioc == NULL)
return;
mpt_adapter_disable(ioc); sz_first = ioc->alloc_total;
if (ioc->pci_irq != -1) { mpt_adapter_disable(ioc);
free_irq(ioc->pci_irq, ioc);
ioc->pci_irq = -1;
}
if (ioc->memmap != NULL) if (ioc->pci_irq != -1) {
iounmap(ioc->memmap); free_irq(ioc->pci_irq, ioc);
ioc->pci_irq = -1;
}
if (ioc->memmap != NULL) {
iounmap(ioc->memmap);
ioc->memmap = NULL;
}
#if defined(CONFIG_MTRR) && 0 #if defined(CONFIG_MTRR) && 0
if (ioc->mtrr_reg > 0) { if (ioc->mtrr_reg > 0) {
mtrr_del(ioc->mtrr_reg, 0, 0); mtrr_del(ioc->mtrr_reg, 0, 0);
dprintk((KERN_INFO MYNAM ": %s: MTRR region de-registered\n", ioc->name)); dprintk((KERN_INFO MYNAM ": %s: MTRR region de-registered\n", ioc->name));
} }
#endif #endif
/* Zap the adapter lookup ptr! */ /* Zap the adapter lookup ptr! */
list_del(&ioc->list); list_del(&ioc->list);
sz_last = ioc->alloc_total; sz_last = ioc->alloc_total;
dprintk((KERN_INFO MYNAM ": %s: free'd %d of %d bytes\n", dprintk((KERN_INFO MYNAM ": %s: free'd %d of %d bytes\n",
ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first)); ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
kfree(ioc); kfree(ioc);
}
} }
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1977,7 +1985,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
} }
/* Is it already READY? */ /* Is it already READY? */
if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY) if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)
return 0; return 0;
/* /*
@@ -1995,7 +2003,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
* Hmmm... Did it get left operational? * Hmmm... Did it get left operational?
*/ */
if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) { if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
dinitprintk((MYIOC_s_WARN_FMT "IOC operational unexpected\n", dinitprintk((MYIOC_s_INFO_FMT "IOC operational unexpected\n",
ioc->name)); ioc->name));
/* Check WhoInit. /* Check WhoInit.
@@ -2004,8 +2012,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
* Else, fall through to KickStart case * Else, fall through to KickStart case
*/ */
whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT; whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
dprintk((KERN_WARNING MYNAM dinitprintk((KERN_INFO MYNAM
": whoinit 0x%x\n statefault %d force %d\n", ": whoinit 0x%x statefault %d force %d\n",
whoinit, statefault, force)); whoinit, statefault, force));
if (whoinit == MPI_WHOINIT_PCI_PEER) if (whoinit == MPI_WHOINIT_PCI_PEER)
return -4; return -4;
@@ -2140,8 +2148,8 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
get_facts.Function = MPI_FUNCTION_IOC_FACTS; get_facts.Function = MPI_FUNCTION_IOC_FACTS;
/* Assert: All other get_facts fields are zero! */ /* Assert: All other get_facts fields are zero! */
dinitprintk((MYIOC_s_INFO_FMT dinitprintk((MYIOC_s_INFO_FMT
"Sending get IocFacts request req_sz=%d reply_sz=%d\n", "Sending get IocFacts request req_sz=%d reply_sz=%d\n",
ioc->name, req_sz, reply_sz)); ioc->name, req_sz, reply_sz));
/* No non-zero fields in the get_facts request are greater than /* No non-zero fields in the get_facts request are greater than
@@ -2174,7 +2182,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions); facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
facts->IOCStatus = le16_to_cpu(facts->IOCStatus); facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo); facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
status = facts->IOCStatus & MPI_IOCSTATUS_MASK; status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK;
/* CHECKME! IOCStatus, IOCLogInfo */ /* CHECKME! IOCStatus, IOCLogInfo */
facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth); facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
@@ -2221,7 +2229,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
if ( sz & 0x02 ) if ( sz & 0x02 )
sz += 2; sz += 2;
facts->FWImageSize = sz; facts->FWImageSize = sz;
if (!facts->RequestFrameSize) { if (!facts->RequestFrameSize) {
/* Something is wrong! */ /* Something is wrong! */
printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n", printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n",
@@ -2240,7 +2248,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
ioc->NBShiftFactor = shiftFactor; ioc->NBShiftFactor = shiftFactor;
dinitprintk((MYIOC_s_INFO_FMT "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n", dinitprintk((MYIOC_s_INFO_FMT "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n",
ioc->name, vv, shiftFactor, r)); ioc->name, vv, shiftFactor, r));
if (reason == MPT_HOSTEVENT_IOC_BRINGUP) { if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
/* /*
* Set values for this IOC's request & reply frame sizes, * Set values for this IOC's request & reply frame sizes,
@@ -2261,7 +2269,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
return r; return r;
} }
} else { } else {
printk(MYIOC_s_ERR_FMT printk(MYIOC_s_ERR_FMT
"Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n", "Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n",
ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t, ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t,
RequestFrameSize)/sizeof(u32))); RequestFrameSize)/sizeof(u32)));
@@ -2413,9 +2421,11 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n", dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n",
ioc->name, &ioc_init)); ioc->name, &ioc_init));
if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) {
printk(MYIOC_s_ERR_FMT "Sending PortEnable failed(%d)!\n",ioc->name, r);
return r; return r;
}
/* YIKES! SUPER IMPORTANT!!! /* YIKES! SUPER IMPORTANT!!!
* Poll IocState until _OPERATIONAL while IOC is doing * Poll IocState until _OPERATIONAL while IOC is doing
@@ -2440,7 +2450,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
state = mpt_GetIocState(ioc, 1); state = mpt_GetIocState(ioc, 1);
count++; count++;
} }
dhsprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n", dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n",
ioc->name, count)); ioc->name, count));
return r; return r;
@@ -2529,7 +2539,7 @@ mpt_free_fw_memory(MPT_ADAPTER *ioc)
int sz; int sz;
sz = ioc->facts.FWImageSize; sz = ioc->facts.FWImageSize;
dinitprintk((KERN_WARNING MYNAM "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n", dinitprintk((KERN_INFO MYNAM "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz)); ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
pci_free_consistent(ioc->pcidev, sz, pci_free_consistent(ioc->pcidev, sz,
ioc->cached_fw, ioc->cached_fw_dma); ioc->cached_fw, ioc->cached_fw_dma);
@@ -2573,9 +2583,9 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
mpt_alloc_fw_memory(ioc, sz); mpt_alloc_fw_memory(ioc, sz);
dinitprintk((KERN_WARNING MYNAM ": FW Image @ %p[%p], sz=%d[%x] bytes\n", dinitprintk((KERN_INFO MYNAM ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz)); ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
if (ioc->cached_fw == NULL) { if (ioc->cached_fw == NULL) {
/* Major Failure. /* Major Failure.
*/ */
@@ -2605,14 +2615,14 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
mpt_add_sge(&request[sgeoffset], flagsLength, ioc->cached_fw_dma); mpt_add_sge(&request[sgeoffset], flagsLength, ioc->cached_fw_dma);
sgeoffset += sizeof(u32) + sizeof(dma_addr_t); sgeoffset += sizeof(u32) + sizeof(dma_addr_t);
dinitprintk((KERN_WARNING MYNAM "Sending FW Upload (req @ %p) sgeoffset=%d \n", dinitprintk((KERN_INFO MYNAM ": Sending FW Upload (req @ %p) sgeoffset=%d \n",
prequest, sgeoffset)); prequest, sgeoffset));
DBG_DUMP_FW_REQUEST_FRAME(prequest) DBG_DUMP_FW_REQUEST_FRAME(prequest)
ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest, ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest,
reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag); reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag);
dinitprintk((KERN_WARNING MYNAM "FW Upload completed rc=%x \n", ii)); dinitprintk((KERN_INFO MYNAM ": FW Upload completed rc=%x \n", ii));
cmdStatus = -EFAULT; cmdStatus = -EFAULT;
if (ii == 0) { if (ii == 0) {
@@ -2627,10 +2637,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
cmdStatus = 0; cmdStatus = 0;
} }
} }
dinitprintk((MYIOC_s_INFO_FMT ": do_upload status %d \n", dinitprintk((MYIOC_s_INFO_FMT ": do_upload cmdStatus=%d \n",
ioc->name, cmdStatus)); ioc->name, cmdStatus));
if (cmdStatus) { if (cmdStatus) {
ddlprintk((MYIOC_s_INFO_FMT ": fw upload failed, freeing image \n", ddlprintk((MYIOC_s_INFO_FMT ": fw upload failed, freeing image \n",
@@ -2761,8 +2771,8 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
fwSize = (pExtImage->ImageSize + 3) >> 2; fwSize = (pExtImage->ImageSize + 3) >> 2;
ptrFw = (u32 *)pExtImage; ptrFw = (u32 *)pExtImage;
ddlprintk((MYIOC_s_INFO_FMT "Write Ext Image: 0x%x bytes @ %p load_addr=%x\n", ddlprintk((MYIOC_s_INFO_FMT "Write Ext Image: 0x%x (%d) bytes @ %p load_addr=%x\n",
ioc->name, fwSize*4, ptrFw, load_addr)); ioc->name, fwSize*4, fwSize*4, ptrFw, load_addr));
CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr); CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr);
while (fwSize--) { while (fwSize--) {
@@ -2845,9 +2855,9 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
* 0 else * 0 else
* *
* Returns: * Returns:
* 1 - hard reset, READY * 1 - hard reset, READY
* 0 - no reset due to History bit, READY * 0 - no reset due to History bit, READY
* -1 - no reset due to History bit but not READY * -1 - no reset due to History bit but not READY
* OR reset but failed to come READY * OR reset but failed to come READY
* -2 - no reset, could not enter DIAG mode * -2 - no reset, could not enter DIAG mode
* -3 - reset but bad FW bit * -3 - reset but bad FW bit
@@ -2990,7 +3000,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
* *
*/ */
CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM); CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM);
mdelay (1); mdelay(1);
/* /*
* Now hit the reset bit in the Diagnostic register * Now hit the reset bit in the Diagnostic register
@@ -3170,7 +3180,7 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
u32 state; u32 state;
int cntdn, count; int cntdn, count;
drsprintk((KERN_WARNING MYNAM ": %s: Sending IOC reset(0x%02x)!\n", drsprintk((KERN_INFO MYNAM ": %s: Sending IOC reset(0x%02x)!\n",
ioc->name, reset_type)); ioc->name, reset_type));
CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT); CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT);
if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
@@ -3374,6 +3384,9 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->reply_frames = (MPT_FRAME_HDR *) mem; ioc->reply_frames = (MPT_FRAME_HDR *) mem;
ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF); ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
dinitprintk((KERN_INFO MYNAM ": %s ReplyBuffers @ %p[%p]\n",
ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
alloc_dma += reply_sz; alloc_dma += reply_sz;
mem += reply_sz; mem += reply_sz;
@@ -3382,7 +3395,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->req_frames = (MPT_FRAME_HDR *) mem; ioc->req_frames = (MPT_FRAME_HDR *) mem;
ioc->req_frames_dma = alloc_dma; ioc->req_frames_dma = alloc_dma;
dinitprintk((KERN_INFO MYNAM ": %s.RequestBuffers @ %p[%p]\n", dinitprintk((KERN_INFO MYNAM ": %s RequestBuffers @ %p[%p]\n",
ioc->name, mem, (void *)(ulong)alloc_dma)); ioc->name, mem, (void *)(ulong)alloc_dma));
ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF); ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
@@ -3408,7 +3421,7 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->ChainBuffer = mem; ioc->ChainBuffer = mem;
ioc->ChainBufferDMA = alloc_dma; ioc->ChainBufferDMA = alloc_dma;
dinitprintk((KERN_INFO MYNAM " :%s.ChainBuffers @ %p(%p)\n", dinitprintk((KERN_INFO MYNAM " :%s ChainBuffers @ %p(%p)\n",
ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA)); ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA));
/* Initialize the free chain Q. /* Initialize the free chain Q.
@@ -3513,7 +3526,7 @@ out_fail:
*/ */
static int static int
mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req, mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
int replyBytes, u16 *u16reply, int maxwait, int sleepFlag) int replyBytes, u16 *u16reply, int maxwait, int sleepFlag)
{ {
MPIDefaultReply_t *mptReply; MPIDefaultReply_t *mptReply;
int failcnt = 0; int failcnt = 0;
@@ -3588,7 +3601,7 @@ mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
*/ */
if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0) if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0)
failcnt++; failcnt++;
dhsprintk((MYIOC_s_INFO_FMT "HandShake reply count=%d%s\n", dhsprintk((MYIOC_s_INFO_FMT "HandShake reply count=%d%s\n",
ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : "")); ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : ""));
@@ -3747,7 +3760,7 @@ WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
} }
dhsprintk((MYIOC_s_INFO_FMT "WaitCnt=%d First handshake reply word=%08x%s\n", dhsprintk((MYIOC_s_INFO_FMT "WaitCnt=%d First handshake reply word=%08x%s\n",
ioc->name, t, le32_to_cpu(*(u32 *)hs_reply), ioc->name, t, le32_to_cpu(*(u32 *)hs_reply),
failcnt ? " - MISSING DOORBELL HANDSHAKE!" : "")); failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
/* /*
@@ -3819,7 +3832,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
hdr.PageLength = 0; hdr.PageLength = 0;
hdr.PageNumber = 0; hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_LAN; hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
cfg.hdr = &hdr; cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; cfg.dir = 0;
@@ -3863,7 +3876,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
hdr.PageLength = 0; hdr.PageLength = 0;
hdr.PageNumber = 1; hdr.PageNumber = 1;
hdr.PageType = MPI_CONFIG_PAGETYPE_LAN; hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
cfg.hdr = &hdr; cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; cfg.dir = 0;
@@ -3930,7 +3943,7 @@ GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
hdr.PageLength = 0; hdr.PageLength = 0;
hdr.PageNumber = 0; hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT; hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
cfg.hdr = &hdr; cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; cfg.dir = 0;
@@ -4012,7 +4025,7 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
hdr.PageLength = 0; hdr.PageLength = 0;
hdr.PageNumber = 2; hdr.PageNumber = 2;
hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT; hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT;
cfg.hdr = &hdr; cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; cfg.dir = 0;
@@ -4102,7 +4115,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
header.PageLength = 0; header.PageLength = 0;
header.PageNumber = 0; header.PageNumber = 0;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT; header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
cfg.hdr = &header; cfg.cfghdr.hdr = &header;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.pageAddr = portnum; cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4122,6 +4135,8 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
ioc->spi_data.minSyncFactor = MPT_ASYNC; ioc->spi_data.minSyncFactor = MPT_ASYNC;
ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN; ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN;
rc = 1; rc = 1;
ddvprintk((MYIOC_s_INFO_FMT "Unable to read PortPage0 minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
} else { } else {
/* Save the Port Page 0 data /* Save the Port Page 0 data
*/ */
@@ -4131,7 +4146,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) { if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) {
ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS; ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS;
dinitprintk((KERN_INFO MYNAM " :%s noQas due to Capabilities=%x\n", ddvprintk((KERN_INFO MYNAM " :%s noQas due to Capabilities=%x\n",
ioc->name, pPP0->Capabilities)); ioc->name, pPP0->Capabilities));
} }
ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0; ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0;
@@ -4140,6 +4155,8 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
ioc->spi_data.maxSyncOffset = (u8) (data >> 16); ioc->spi_data.maxSyncOffset = (u8) (data >> 16);
data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK; data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK;
ioc->spi_data.minSyncFactor = (u8) (data >> 8); ioc->spi_data.minSyncFactor = (u8) (data >> 8);
ddvprintk((MYIOC_s_INFO_FMT "PortPage0 minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
} else { } else {
ioc->spi_data.maxSyncOffset = 0; ioc->spi_data.maxSyncOffset = 0;
ioc->spi_data.minSyncFactor = MPT_ASYNC; ioc->spi_data.minSyncFactor = MPT_ASYNC;
@@ -4152,8 +4169,11 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) || if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) ||
(ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) { (ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) {
if (ioc->spi_data.minSyncFactor < MPT_ULTRA) if (ioc->spi_data.minSyncFactor < MPT_ULTRA) {
ioc->spi_data.minSyncFactor = MPT_ULTRA; ioc->spi_data.minSyncFactor = MPT_ULTRA;
ddvprintk((MYIOC_s_INFO_FMT "HVD or SE detected, minSyncFactor=%x\n",
ioc->name, ioc->spi_data.minSyncFactor));
}
} }
} }
if (pbuf) { if (pbuf) {
@@ -4168,7 +4188,7 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
header.PageLength = 0; header.PageLength = 0;
header.PageNumber = 2; header.PageNumber = 2;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT; header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
cfg.hdr = &header; cfg.cfghdr.hdr = &header;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.pageAddr = portnum; cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4236,7 +4256,7 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
header.PageLength = 0; header.PageLength = 0;
header.PageNumber = 1; header.PageNumber = 1;
header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.hdr = &header; cfg.cfghdr.hdr = &header;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.pageAddr = portnum; cfg.pageAddr = portnum;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4245,8 +4265,8 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
if (mpt_config(ioc, &cfg) != 0) if (mpt_config(ioc, &cfg) != 0)
return -EFAULT; return -EFAULT;
ioc->spi_data.sdp1version = cfg.hdr->PageVersion; ioc->spi_data.sdp1version = cfg.cfghdr.hdr->PageVersion;
ioc->spi_data.sdp1length = cfg.hdr->PageLength; ioc->spi_data.sdp1length = cfg.cfghdr.hdr->PageLength;
header.PageVersion = 0; header.PageVersion = 0;
header.PageLength = 0; header.PageLength = 0;
@@ -4255,8 +4275,8 @@ mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
if (mpt_config(ioc, &cfg) != 0) if (mpt_config(ioc, &cfg) != 0)
return -EFAULT; return -EFAULT;
ioc->spi_data.sdp0version = cfg.hdr->PageVersion; ioc->spi_data.sdp0version = cfg.cfghdr.hdr->PageVersion;
ioc->spi_data.sdp0length = cfg.hdr->PageLength; ioc->spi_data.sdp0length = cfg.cfghdr.hdr->PageLength;
dcprintk((MYIOC_s_INFO_FMT "Headers: 0: version %d length %d\n", dcprintk((MYIOC_s_INFO_FMT "Headers: 0: version %d length %d\n",
ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length)); ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length));
@@ -4298,7 +4318,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
header.PageLength = 0; header.PageLength = 0;
header.PageNumber = 2; header.PageNumber = 2;
header.PageType = MPI_CONFIG_PAGETYPE_IOC; header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.hdr = &header; cfg.cfghdr.hdr = &header;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.pageAddr = 0; cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4394,7 +4414,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
header.PageLength = 0; header.PageLength = 0;
header.PageNumber = 3; header.PageNumber = 3;
header.PageType = MPI_CONFIG_PAGETYPE_IOC; header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.hdr = &header; cfg.cfghdr.hdr = &header;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.pageAddr = 0; cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4446,7 +4466,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
header.PageLength = 0; header.PageLength = 0;
header.PageNumber = 4; header.PageNumber = 4;
header.PageType = MPI_CONFIG_PAGETYPE_IOC; header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.hdr = &header; cfg.cfghdr.hdr = &header;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.pageAddr = 0; cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4498,7 +4518,7 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
header.PageLength = 0; header.PageLength = 0;
header.PageNumber = 1; header.PageNumber = 1;
header.PageType = MPI_CONFIG_PAGETYPE_IOC; header.PageType = MPI_CONFIG_PAGETYPE_IOC;
cfg.hdr = &header; cfg.cfghdr.hdr = &header;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.pageAddr = 0; cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -4580,13 +4600,13 @@ SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch)
evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc); evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc);
if (evnp == NULL) { if (evnp == NULL) {
dprintk((MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n", devtprintk((MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
ioc->name)); ioc->name));
return 0; return 0;
} }
memset(evnp, 0, sizeof(*evnp)); memset(evnp, 0, sizeof(*evnp));
dprintk((MYIOC_s_INFO_FMT "Sending EventNotification(%d)\n", ioc->name, EvSwitch)); devtprintk((MYIOC_s_INFO_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp));
evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION; evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
evnp->ChainOffset = 0; evnp->ChainOffset = 0;
@@ -4610,8 +4630,10 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
EventAck_t *pAck; EventAck_t *pAck;
if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK request frame!\n", printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK "
ioc->name); "request frame for Event=%x EventContext=%x EventData=%x!\n",
ioc->name, evnp->Event, le32_to_cpu(evnp->EventContext),
le32_to_cpu(evnp->Data[0]));
return -1; return -1;
} }
memset(pAck, 0, sizeof(*pAck)); memset(pAck, 0, sizeof(*pAck));
@@ -4647,10 +4669,11 @@ int
mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
{ {
Config_t *pReq; Config_t *pReq;
ConfigExtendedPageHeader_t *pExtHdr = NULL;
MPT_FRAME_HDR *mf; MPT_FRAME_HDR *mf;
unsigned long flags; unsigned long flags;
int ii, rc; int ii, rc;
u32 flagsLength; int flagsLength;
int in_isr; int in_isr;
/* Prevent calling wait_event() (below), if caller happens /* Prevent calling wait_event() (below), if caller happens
@@ -4675,16 +4698,30 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
pReq->Reserved = 0; pReq->Reserved = 0;
pReq->ChainOffset = 0; pReq->ChainOffset = 0;
pReq->Function = MPI_FUNCTION_CONFIG; pReq->Function = MPI_FUNCTION_CONFIG;
/* Assume page type is not extended and clear "reserved" fields. */
pReq->ExtPageLength = 0; pReq->ExtPageLength = 0;
pReq->ExtPageType = 0; pReq->ExtPageType = 0;
pReq->MsgFlags = 0; pReq->MsgFlags = 0;
for (ii=0; ii < 8; ii++) for (ii=0; ii < 8; ii++)
pReq->Reserved2[ii] = 0; pReq->Reserved2[ii] = 0;
pReq->Header.PageVersion = pCfg->hdr->PageVersion; pReq->Header.PageVersion = pCfg->cfghdr.hdr->PageVersion;
pReq->Header.PageLength = pCfg->hdr->PageLength; pReq->Header.PageLength = pCfg->cfghdr.hdr->PageLength;
pReq->Header.PageNumber = pCfg->hdr->PageNumber; pReq->Header.PageNumber = pCfg->cfghdr.hdr->PageNumber;
pReq->Header.PageType = (pCfg->hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); pReq->Header.PageType = (pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
pExtHdr = (ConfigExtendedPageHeader_t *)pCfg->cfghdr.ehdr;
pReq->ExtPageLength = cpu_to_le16(pExtHdr->ExtPageLength);
pReq->ExtPageType = pExtHdr->ExtPageType;
pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
/* Page Length must be treated as a reserved field for the extended header. */
pReq->Header.PageLength = 0;
}
pReq->PageAddress = cpu_to_le32(pCfg->pageAddr); pReq->PageAddress = cpu_to_le32(pCfg->pageAddr);
/* Add a SGE to the config request. /* Add a SGE to the config request.
@@ -4694,13 +4731,21 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
else else
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
flagsLength |= pCfg->hdr->PageLength * 4; if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
flagsLength |= pExtHdr->ExtPageLength * 4;
dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n",
ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action));
}
else {
flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n",
ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
}
mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n",
ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
/* Append pCfg pointer to end of mf /* Append pCfg pointer to end of mf
*/ */
*((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg; *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg;
@@ -4789,8 +4834,8 @@ mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
pReq->Reserved3 = 0; pReq->Reserved3 = 0;
pReq->NumAddressBytes = 0x01; pReq->NumAddressBytes = 0x01;
pReq->Reserved4 = 0; pReq->Reserved4 = 0;
pReq->DataLength = 0x04; pReq->DataLength = cpu_to_le16(0x04);
pdev = (struct pci_dev *) ioc->pcidev; pdev = ioc->pcidev;
if (pdev->devfn & 1) if (pdev->devfn & 1)
pReq->DeviceAddr = 0xB2; pReq->DeviceAddr = 0xB2;
else else
@@ -5504,6 +5549,8 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
* If needed, send (a single) EventAck. * If needed, send (a single) EventAck.
*/ */
if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) { if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) {
devtprintk((MYIOC_s_WARN_FMT
"EventAck required\n",ioc->name));
if ((ii = SendEventAck(ioc, pEventReply)) != 0) { if ((ii = SendEventAck(ioc, pEventReply)) != 0) {
devtprintk((MYIOC_s_WARN_FMT "SendEventAck returned %d\n", devtprintk((MYIOC_s_WARN_FMT "SendEventAck returned %d\n",
ioc->name, ii)); ioc->name, ii));
@@ -5584,7 +5631,7 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
case 0x00080000: case 0x00080000:
desc = "Outbound DMA Overrun"; desc = "Outbound DMA Overrun";
break; break;
case 0x00090000: case 0x00090000:
desc = "Task Management"; desc = "Task Management";
break; break;
@@ -5600,7 +5647,7 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
case 0x000C0000: case 0x000C0000:
desc = "Untagged Table Size"; desc = "Untagged Table Size";
break; break;
} }
printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc); printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
@@ -5692,7 +5739,7 @@ mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
break; break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
/* This error is checked in scsi_io_done(). Skip. /* This error is checked in scsi_io_done(). Skip.
desc = "SCSI Data Underrun"; desc = "SCSI Data Underrun";
*/ */
break; break;

View File

@@ -915,7 +915,10 @@ struct scsi_cmnd;
typedef struct _x_config_parms { typedef struct _x_config_parms {
struct list_head linkage; /* linked list */ struct list_head linkage; /* linked list */
struct timer_list timer; /* timer function for this request */ struct timer_list timer; /* timer function for this request */
ConfigPageHeader_t *hdr; union {
ConfigExtendedPageHeader_t *ehdr;
ConfigPageHeader_t *hdr;
} cfghdr;
dma_addr_t physAddr; dma_addr_t physAddr;
int wait_done; /* wait for this request */ int wait_done; /* wait for this request */
u32 pageAddr; /* properly formatted */ u32 pageAddr; /* properly formatted */

View File

@@ -242,7 +242,7 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
/* Set the command status to GOOD if IOC Status is GOOD /* Set the command status to GOOD if IOC Status is GOOD
* OR if SCSI I/O cmd and data underrun or recovered error. * OR if SCSI I/O cmd and data underrun or recovered error.
*/ */
iocStatus = reply->u.reply.IOCStatus & MPI_IOCSTATUS_MASK; iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK;
if (iocStatus == MPI_IOCSTATUS_SUCCESS) if (iocStatus == MPI_IOCSTATUS_SUCCESS)
ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
@@ -2324,7 +2324,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
hdr.PageLength = 0; hdr.PageLength = 0;
hdr.PageNumber = 0; hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
cfg.hdr = &hdr; cfg.cfghdr.hdr = &hdr;
cfg.physAddr = -1; cfg.physAddr = -1;
cfg.pageAddr = 0; cfg.pageAddr = 0;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
@@ -2333,7 +2333,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
strncpy(karg.serial_number, " ", 24); strncpy(karg.serial_number, " ", 24);
if (mpt_config(ioc, &cfg) == 0) { if (mpt_config(ioc, &cfg) == 0) {
if (cfg.hdr->PageLength > 0) { if (cfg.cfghdr.hdr->PageLength > 0) {
/* Issue the second config page request */ /* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
@@ -2479,7 +2479,7 @@ mptctl_hp_targetinfo(unsigned long arg)
hdr.PageNumber = 0; hdr.PageNumber = 0;
hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.hdr = &hdr; cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0; cfg.dir = 0;
cfg.timeout = 0; cfg.timeout = 0;
@@ -2527,15 +2527,15 @@ mptctl_hp_targetinfo(unsigned long arg)
hdr.PageNumber = 3; hdr.PageNumber = 3;
hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.hdr = &hdr; cfg.cfghdr.hdr = &hdr;
cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
cfg.dir = 0; cfg.dir = 0;
cfg.timeout = 0; cfg.timeout = 0;
cfg.physAddr = -1; cfg.physAddr = -1;
if ((mpt_config(ioc, &cfg) == 0) && (cfg.hdr->PageLength > 0)) { if ((mpt_config(ioc, &cfg) == 0) && (cfg.cfghdr.hdr->PageLength > 0)) {
/* Issue the second config page request */ /* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
data_sz = (int) cfg.hdr->PageLength * 4; data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent( pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent(
ioc->pcidev, data_sz, &page_dma); ioc->pcidev, data_sz, &page_dma);
if (pg3_alloc) { if (pg3_alloc) {

View File

@@ -281,12 +281,12 @@ mptscsih_getFreeChainBuffer(MPT_ADAPTER *ioc, int *retIndex)
offset = (u8 *)chainBuf - (u8 *)ioc->ChainBuffer; offset = (u8 *)chainBuf - (u8 *)ioc->ChainBuffer;
chain_idx = offset / ioc->req_sz; chain_idx = offset / ioc->req_sz;
rc = SUCCESS; rc = SUCCESS;
dsgprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer (index %d), got buf=%p\n", dsgprintk((MYIOC_s_ERR_FMT "getFreeChainBuffer chainBuf=%p ChainBuffer=%p offset=%d chain_idx=%d\n",
ioc->name, *retIndex, chainBuf)); ioc->name, chainBuf, ioc->ChainBuffer, offset, chain_idx));
} else { } else {
rc = FAILED; rc = FAILED;
chain_idx = MPT_HOST_NO_CHAIN; chain_idx = MPT_HOST_NO_CHAIN;
dfailprintk((MYIOC_s_ERR_FMT "getFreeChainBuffer failed\n", dfailprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer failed\n",
ioc->name)); ioc->name));
} }
spin_unlock_irqrestore(&ioc->FreeQlock, flags); spin_unlock_irqrestore(&ioc->FreeQlock, flags);
@@ -432,7 +432,7 @@ nextSGEset:
*/ */
pReq->ChainOffset = 0; pReq->ChainOffset = 0;
RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03; RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
dsgprintk((MYIOC_s_ERR_FMT dsgprintk((MYIOC_s_INFO_FMT
"Single Buffer RequestNB=%x, sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset)); "Single Buffer RequestNB=%x, sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
ioc->RequestNB[req_idx] = RequestNB; ioc->RequestNB[req_idx] = RequestNB;
} }
@@ -491,11 +491,12 @@ nextSGEset:
/* NOTE: psge points to the beginning of the chain element /* NOTE: psge points to the beginning of the chain element
* in current buffer. Get a chain buffer. * in current buffer. Get a chain buffer.
*/ */
dsgprintk((MYIOC_s_INFO_FMT if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED) {
"calling getFreeChainBuffer SCSI cmd=%02x (%p)\n", dfailprintk((MYIOC_s_INFO_FMT
ioc->name, pReq->CDB[0], SCpnt)); "getFreeChainBuffer FAILED SCSI cmd=%02x (%p)\n",
if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED) ioc->name, pReq->CDB[0], SCpnt));
return FAILED; return FAILED;
}
/* Update the tracking arrays. /* Update the tracking arrays.
* If chainSge == NULL, update ReqToChain, else ChainToChain * If chainSge == NULL, update ReqToChain, else ChainToChain
@@ -577,14 +578,20 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
return 1; return 1;
} }
dmfprintk((MYIOC_s_INFO_FMT
"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
ioc->name, mf, mr, sc, req_idx));
sc->result = DID_OK << 16; /* Set default reply as OK */ sc->result = DID_OK << 16; /* Set default reply as OK */
pScsiReq = (SCSIIORequest_t *) mf; pScsiReq = (SCSIIORequest_t *) mf;
pScsiReply = (SCSIIOReply_t *) mr; pScsiReply = (SCSIIOReply_t *) mr;
if((ioc->facts.MsgVersion >= MPI_VERSION_01_05) && pScsiReply){
dmfprintk((MYIOC_s_INFO_FMT
"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d,task-tag=%d)\n",
ioc->name, mf, mr, sc, req_idx, pScsiReply->TaskTag));
}else{
dmfprintk((MYIOC_s_INFO_FMT
"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
ioc->name, mf, mr, sc, req_idx));
}
if (pScsiReply == NULL) { if (pScsiReply == NULL) {
/* special context reply handling */ /* special context reply handling */
; ;
@@ -658,8 +665,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
/* Sufficient data transfer occurred */ /* Sufficient data transfer occurred */
sc->result = (DID_OK << 16) | scsi_status; sc->result = (DID_OK << 16) | scsi_status;
} else if ( xfer_cnt == 0 ) { } else if ( xfer_cnt == 0 ) {
/* A CRC Error causes this condition; retry */ /* A CRC Error causes this condition; retry */
sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) | sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) |
(CHECK_CONDITION << 1); (CHECK_CONDITION << 1);
sc->sense_buffer[0] = 0x70; sc->sense_buffer[0] = 0x70;
sc->sense_buffer[2] = NO_SENSE; sc->sense_buffer[2] = NO_SENSE;
@@ -668,7 +675,9 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
} else { } else {
sc->result = DID_SOFT_ERROR << 16; sc->result = DID_SOFT_ERROR << 16;
} }
dreplyprintk((KERN_NOTICE "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->target)); dreplyprintk((KERN_NOTICE
"RESIDUAL_MISMATCH: result=%x on id=%d\n",
sc->result, sc->device->id));
break; break;
case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
@@ -796,7 +805,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
return 1; return 1;
} }
/* /*
* mptscsih_flush_running_cmds - For each command found, search * mptscsih_flush_running_cmds - For each command found, search
* Scsi_Host instance taskQ and reply to OS. * Scsi_Host instance taskQ and reply to OS.
@@ -1017,7 +1025,7 @@ mptscsih_remove(struct pci_dev *pdev)
scsi_host_put(host); scsi_host_put(host);
mpt_detach(pdev); mpt_detach(pdev);
} }
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1072,7 +1080,7 @@ mptscsih_resume(struct pci_dev *pdev)
MPT_SCSI_HOST *hd; MPT_SCSI_HOST *hd;
mpt_resume(pdev); mpt_resume(pdev);
if(!host) if(!host)
return 0; return 0;
@@ -1214,8 +1222,8 @@ mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t off
int size = 0; int size = 0;
if (func) { if (func) {
/* /*
* write is not supported * write is not supported
*/ */
} else { } else {
if (start) if (start)
@@ -1535,17 +1543,17 @@ mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, in
*/ */
if (mptscsih_tm_pending_wait(hd) == FAILED) { if (mptscsih_tm_pending_wait(hd) == FAILED) {
if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler abort: " dtmprintk((KERN_INFO MYNAM ": %s: TMHandler abort: "
"Timed out waiting for last TM (%d) to complete! \n", "Timed out waiting for last TM (%d) to complete! \n",
hd->ioc->name, hd->tmPending)); hd->ioc->name, hd->tmPending));
return FAILED; return FAILED;
} else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler target reset: " dtmprintk((KERN_INFO MYNAM ": %s: TMHandler target reset: "
"Timed out waiting for last TM (%d) to complete! \n", "Timed out waiting for last TM (%d) to complete! \n",
hd->ioc->name, hd->tmPending)); hd->ioc->name, hd->tmPending));
return FAILED; return FAILED;
} else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) { } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler bus reset: " dtmprintk((KERN_INFO MYNAM ": %s: TMHandler bus reset: "
"Timed out waiting for last TM (%d) to complete! \n", "Timed out waiting for last TM (%d) to complete! \n",
hd->ioc->name, hd->tmPending)); hd->ioc->name, hd->tmPending));
if (hd->tmPending & (1 << MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS)) if (hd->tmPending & (1 << MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS))
@@ -1631,8 +1639,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun
if ((mf = mpt_get_msg_frame(hd->ioc->TaskCtx, hd->ioc)) == NULL) { if ((mf = mpt_get_msg_frame(hd->ioc->TaskCtx, hd->ioc)) == NULL) {
dfailprintk((MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n", dfailprintk((MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n",
hd->ioc->name)); hd->ioc->name));
//return FAILED; return FAILED;
return -999;
} }
dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n", dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n",
hd->ioc->name, mf)); hd->ioc->name, mf));
@@ -1661,9 +1668,8 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun
pScsiTm->TaskMsgContext = ctx2abort; pScsiTm->TaskMsgContext = ctx2abort;
dtmprintk((MYIOC_s_INFO_FMT dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt: ctx2abort (0x%08x) type=%d\n",
"IssueTaskMgmt: ctx2abort (0x%08x) type=%d\n", hd->ioc->name, ctx2abort, type));
hd->ioc->name, ctx2abort, type));
DBG_DUMP_TM_REQUEST_FRAME((u32 *)pScsiTm); DBG_DUMP_TM_REQUEST_FRAME((u32 *)pScsiTm);
@@ -1902,13 +1908,13 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
/* If we can't locate the host to reset, then we failed. */ /* If we can't locate the host to reset, then we failed. */
if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){ if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
dtmprintk( ( KERN_WARNING MYNAM ": mptscsih_host_reset: " dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: "
"Can't locate host! (sc=%p)\n", "Can't locate host! (sc=%p)\n",
SCpnt ) ); SCpnt ) );
return FAILED; return FAILED;
} }
printk(KERN_WARNING MYNAM ": %s: >> Attempting host reset! (sc=%p)\n", printk(KERN_WARNING MYNAM ": %s: Attempting host reset! (sc=%p)\n",
hd->ioc->name, SCpnt); hd->ioc->name, SCpnt);
/* If our attempts to reset the host failed, then return a failed /* If our attempts to reset the host failed, then return a failed
@@ -1924,7 +1930,7 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
hd->tmState = TM_STATE_NONE; hd->tmState = TM_STATE_NONE;
} }
dtmprintk( ( KERN_WARNING MYNAM ": mptscsih_host_reset: " dtmprintk( ( KERN_INFO MYNAM ": mptscsih_host_reset: "
"Status = %s\n", "Status = %s\n",
(status == SUCCESS) ? "SUCCESS" : "FAILED" ) ); (status == SUCCESS) ? "SUCCESS" : "FAILED" ) );
@@ -1951,8 +1957,8 @@ mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd)
if (hd->tmState == TM_STATE_NONE) { if (hd->tmState == TM_STATE_NONE) {
hd->tmState = TM_STATE_IN_PROGRESS; hd->tmState = TM_STATE_IN_PROGRESS;
hd->tmPending = 1; hd->tmPending = 1;
status = SUCCESS;
spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
status = SUCCESS;
break; break;
} }
spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
@@ -1980,7 +1986,7 @@ mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
spin_lock_irqsave(&hd->ioc->FreeQlock, flags); spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
if(hd->tmPending == 0) { if(hd->tmPending == 0) {
status = SUCCESS; status = SUCCESS;
spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
break; break;
} }
spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
@@ -2318,10 +2324,10 @@ mptscsih_slave_configure(struct scsi_device *device)
if (pTarget == NULL) { if (pTarget == NULL) {
/* Driver doesn't know about this device. /* Driver doesn't know about this device.
* Kernel may generate a "Dummy Lun 0" which * Kernel may generate a "Dummy Lun 0" which
* may become a real Lun if a * may become a real Lun if a
* "scsi add-single-device" command is executed * "scsi add-single-device" command is executed
* while the driver is active (hot-plug a * while the driver is active (hot-plug a
* device). LSI Raid controllers need * device). LSI Raid controllers need
* queue_depth set to DEV_HIGH for this reason. * queue_depth set to DEV_HIGH for this reason.
*/ */
scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG, scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
@@ -2691,7 +2697,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
* If the peripheral qualifier filter is enabled then if the target reports a 0x1 * If the peripheral qualifier filter is enabled then if the target reports a 0x1
* (i.e. The targer is capable of supporting the specified peripheral device type * (i.e. The targer is capable of supporting the specified peripheral device type
* on this logical unit; however, the physical device is not currently connected * on this logical unit; however, the physical device is not currently connected
* to this logical unit) it will be converted to a 0x3 (i.e. The target is not * to this logical unit) it will be converted to a 0x3 (i.e. The target is not
* capable of supporting a physical device on this logical unit). This is to work * capable of supporting a physical device on this logical unit). This is to work
* around a bug in th emid-layer in some distributions in which the mid-layer will * around a bug in th emid-layer in some distributions in which the mid-layer will
* continue to try to communicate to the LUN and evntually create a dummy LUN. * continue to try to communicate to the LUN and evntually create a dummy LUN.
@@ -3194,8 +3200,8 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
/* Get a MF for this command. /* Get a MF for this command.
*/ */
if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) { if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
dprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n", dfailprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n",
ioc->name)); ioc->name));
return -EAGAIN; return -EAGAIN;
} }
@@ -3289,7 +3295,7 @@ mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus)
/* Get a MF for this command. /* Get a MF for this command.
*/ */
if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) { if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
dprintk((MYIOC_s_WARN_FMT "writeIOCPage4 : no msg frames!\n", dfailprintk((MYIOC_s_WARN_FMT "writeIOCPage4 : no msg frames!\n",
ioc->name)); ioc->name));
return -EAGAIN; return -EAGAIN;
} }
@@ -3447,7 +3453,7 @@ mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
* some type of error occurred. * some type of error occurred.
*/ */
MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr; MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr;
if (pr->ActionStatus == MPI_RAID_ACTION_ASTATUS_SUCCESS) if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS)
completionCode = MPT_SCANDV_GOOD; completionCode = MPT_SCANDV_GOOD;
else else
completionCode = MPT_SCANDV_SOME_ERROR; completionCode = MPT_SCANDV_SOME_ERROR;
@@ -3955,7 +3961,7 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
header1.PageLength = ioc->spi_data.sdp1length; header1.PageLength = ioc->spi_data.sdp1length;
header1.PageNumber = 1; header1.PageNumber = 1;
header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
cfg.hdr = &header1; cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr; cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1; cfg.dir = 1;
@@ -3996,9 +4002,9 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC " dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
"offset=0 negoFlags=%x request=%x config=%x\n", "offset=0 negoFlags=%x request=%x config=%x\n",
id, flags, requested, configuration)); id, flags, requested, configuration));
pcfg1Data->RequestedParameters = le32_to_cpu(requested); pcfg1Data->RequestedParameters = cpu_to_le32(requested);
pcfg1Data->Reserved = 0; pcfg1Data->Reserved = 0;
pcfg1Data->Configuration = le32_to_cpu(configuration); pcfg1Data->Configuration = cpu_to_le32(configuration);
cfg.pageAddr = (bus<<8) | id; cfg.pageAddr = (bus<<8) | id;
mpt_config(hd->ioc, &cfg); mpt_config(hd->ioc, &cfg);
} }
@@ -4353,7 +4359,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
/* Prep cfg structure /* Prep cfg structure
*/ */
cfg.pageAddr = (bus<<8) | id; cfg.pageAddr = (bus<<8) | id;
cfg.hdr = NULL; cfg.cfghdr.hdr = NULL;
/* Prep SDP0 header /* Prep SDP0 header
*/ */
@@ -4399,7 +4405,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
pcfg1Data = (SCSIDevicePage1_t *) (pDvBuf + sz); pcfg1Data = (SCSIDevicePage1_t *) (pDvBuf + sz);
cfg1_dma_addr = dvbuf_dma + sz; cfg1_dma_addr = dvbuf_dma + sz;
/* Skip this ID? Set cfg.hdr to force config page write /* Skip this ID? Set cfg.cfghdr.hdr to force config page write
*/ */
{ {
ScsiCfgData *pspi_data = &hd->ioc->spi_data; ScsiCfgData *pspi_data = &hd->ioc->spi_data;
@@ -4417,7 +4423,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
dv.cmd = MPT_SET_MAX; dv.cmd = MPT_SET_MAX;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
cfg.hdr = &header1; cfg.cfghdr.hdr = &header1;
/* Save the final negotiated settings to /* Save the final negotiated settings to
* SCSI device page 1. * SCSI device page 1.
@@ -4483,7 +4489,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
dv.cmd = MPT_SET_MIN; dv.cmd = MPT_SET_MIN;
mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data); mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
cfg.hdr = &header1; cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr; cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1; cfg.dir = 1;
@@ -4596,8 +4602,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
if ((pbuf1[56] & 0x02) == 0) { if ((pbuf1[56] & 0x02) == 0) {
pTarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS; pTarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS; hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
ddvprintk((MYIOC_s_NOTE_FMT ddvprintk((MYIOC_s_NOTE_FMT
"DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n", "DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n",
ioc->name, id, pbuf1[56])); ioc->name, id, pbuf1[56]));
} }
} }
@@ -4637,7 +4643,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
u32 sdp0_info; u32 sdp0_info;
u32 sdp0_nego; u32 sdp0_nego;
cfg.hdr = &header0; cfg.cfghdr.hdr = &header0;
cfg.physAddr = cfg0_dma_addr; cfg.physAddr = cfg0_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.dir = 0; cfg.dir = 0;
@@ -4673,7 +4679,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
if (!firstPass) if (!firstPass)
doFallback = 1; doFallback = 1;
} else { } else {
ddvprintk((MYIOC_s_NOTE_FMT ddvprintk((MYIOC_s_NOTE_FMT
"DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id)); "DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id));
hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE; hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE;
mptscsih_initTarget(hd, mptscsih_initTarget(hd,
@@ -4689,8 +4695,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
} else if (rc == MPT_SCANDV_ISSUE_SENSE) } else if (rc == MPT_SCANDV_ISSUE_SENSE)
doFallback = 1; /* set fallback flag */ doFallback = 1; /* set fallback flag */
else if ((rc == MPT_SCANDV_DID_RESET) || else if ((rc == MPT_SCANDV_DID_RESET) ||
(rc == MPT_SCANDV_SENSE) || (rc == MPT_SCANDV_SENSE) ||
(rc == MPT_SCANDV_FALLBACK)) (rc == MPT_SCANDV_FALLBACK))
doFallback = 1; /* set fallback flag */ doFallback = 1; /* set fallback flag */
else else
@@ -4722,7 +4728,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
* 4) release * 4) release
* 5) update nego parms to target struct * 5) update nego parms to target struct
*/ */
cfg.hdr = &header1; cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr; cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1; cfg.dir = 1;
@@ -5121,12 +5127,12 @@ target_done:
/* Set if cfg1_dma_addr contents is valid /* Set if cfg1_dma_addr contents is valid
*/ */
if ((cfg.hdr != NULL) && (retcode == 0)){ if ((cfg.cfghdr.hdr != NULL) && (retcode == 0)){
/* If disk, not U320, disable QAS /* If disk, not U320, disable QAS
*/ */
if ((inq0 == 0) && (dv.now.factor > MPT_ULTRA320)) { if ((inq0 == 0) && (dv.now.factor > MPT_ULTRA320)) {
hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS; hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
ddvprintk((MYIOC_s_NOTE_FMT ddvprintk((MYIOC_s_NOTE_FMT
"noQas set due to id=%d has factor=%x\n", ioc->name, id, dv.now.factor)); "noQas set due to id=%d has factor=%x\n", ioc->name, id, dv.now.factor));
} }
@@ -5137,7 +5143,7 @@ target_done:
* skip save of the final negotiated settings to * skip save of the final negotiated settings to
* SCSI device page 1. * SCSI device page 1.
* *
cfg.hdr = &header1; cfg.cfghdr.hdr = &header1;
cfg.physAddr = cfg1_dma_addr; cfg.physAddr = cfg1_dma_addr;
cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
cfg.dir = 1; cfg.dir = 1;
@@ -5248,7 +5254,7 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
/* Update tmax values with those from Device Page 0.*/ /* Update tmax values with those from Device Page 0.*/
pPage0 = (SCSIDevicePage0_t *) pPage; pPage0 = (SCSIDevicePage0_t *) pPage;
if (pPage0) { if (pPage0) {
val = cpu_to_le32(pPage0->NegotiatedParameters); val = le32_to_cpu(pPage0->NegotiatedParameters);
dv->max.width = val & MPI_SCSIDEVPAGE0_NP_WIDE ? 1 : 0; dv->max.width = val & MPI_SCSIDEVPAGE0_NP_WIDE ? 1 : 0;
dv->max.offset = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> 16; dv->max.offset = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> 16;
dv->max.factor = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8; dv->max.factor = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8;
@@ -5276,12 +5282,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
dv->now.offset, &val, &configuration, dv->now.flags); dv->now.offset, &val, &configuration, dv->now.flags);
dnegoprintk(("Setting Max: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n", dnegoprintk(("Setting Max: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration)); id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
pPage1->RequestedParameters = le32_to_cpu(val); pPage1->RequestedParameters = cpu_to_le32(val);
pPage1->Reserved = 0; pPage1->Reserved = 0;
pPage1->Configuration = le32_to_cpu(configuration); pPage1->Configuration = cpu_to_le32(configuration);
} }
ddvprintk(("id=%d width=%d factor=%x offset=%x flags=%x request=%x configuration=%x\n", ddvprintk(("id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x configuration=%x\n",
id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration)); id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
break; break;
@@ -5301,9 +5307,9 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
offset, &val, &configuration, negoFlags); offset, &val, &configuration, negoFlags);
dnegoprintk(("Setting Min: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n", dnegoprintk(("Setting Min: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
id, width, factor, offset, negoFlags, val, configuration)); id, width, factor, offset, negoFlags, val, configuration));
pPage1->RequestedParameters = le32_to_cpu(val); pPage1->RequestedParameters = cpu_to_le32(val);
pPage1->Reserved = 0; pPage1->Reserved = 0;
pPage1->Configuration = le32_to_cpu(configuration); pPage1->Configuration = cpu_to_le32(configuration);
} }
ddvprintk(("id=%d width=%d factor=%x offset=%x request=%x config=%x negoFlags=%x\n", ddvprintk(("id=%d width=%d factor=%x offset=%x request=%x config=%x negoFlags=%x\n",
id, width, factor, offset, val, configuration, negoFlags)); id, width, factor, offset, val, configuration, negoFlags));
@@ -5377,12 +5383,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
if (pPage1) { if (pPage1) {
mptscsih_setDevicePage1Flags (width, factor, offset, &val, mptscsih_setDevicePage1Flags (width, factor, offset, &val,
&configuration, dv->now.flags); &configuration, dv->now.flags);
dnegoprintk(("Finish: id=%d width=%d offset=%d factor=%x flags=%x request=%x config=%x\n", dnegoprintk(("Finish: id=%d width=%d offset=%d factor=%x negoFlags=%x request=%x config=%x\n",
id, width, offset, factor, dv->now.flags, val, configuration)); id, width, offset, factor, dv->now.flags, val, configuration));
pPage1->RequestedParameters = le32_to_cpu(val); pPage1->RequestedParameters = cpu_to_le32(val);
pPage1->Reserved = 0; pPage1->Reserved = 0;
pPage1->Configuration = le32_to_cpu(configuration); pPage1->Configuration = cpu_to_le32(configuration);
} }
ddvprintk(("Finish: id=%d offset=%d factor=%x width=%d request=%x config=%x\n", ddvprintk(("Finish: id=%d offset=%d factor=%x width=%d request=%x config=%x\n",

View File

@@ -162,15 +162,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
u8 *mem; u8 *mem;
int error=0; int error=0;
int r; int r;
if ((r = mpt_attach(pdev,id)) != 0) if ((r = mpt_attach(pdev,id)) != 0)
return r; return r;
ioc = pci_get_drvdata(pdev); ioc = pci_get_drvdata(pdev);
ioc->DoneCtx = mptspiDoneCtx; ioc->DoneCtx = mptspiDoneCtx;
ioc->TaskCtx = mptspiTaskCtx; ioc->TaskCtx = mptspiTaskCtx;
ioc->InternalCtx = mptspiInternalCtx; ioc->InternalCtx = mptspiInternalCtx;
/* Added sanity check on readiness of the MPT adapter. /* Added sanity check on readiness of the MPT adapter.
*/ */
if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) { if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {

View File

@@ -1499,22 +1499,43 @@ static int tw_scsiop_inquiry(TW_Device_Extension *tw_dev, int request_id)
return 0; return 0;
} /* End tw_scsiop_inquiry() */ } /* End tw_scsiop_inquiry() */
static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
void *data, unsigned int len)
{
struct scsi_cmnd *cmd = tw_dev->srb[request_id];
void *buf;
unsigned int transfer_len;
if (cmd->use_sg) {
struct scatterlist *sg =
(struct scatterlist *)cmd->request_buffer;
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
transfer_len = min(sg->length, len);
} else {
buf = cmd->request_buffer;
transfer_len = min(cmd->request_bufflen, len);
}
memcpy(buf, data, transfer_len);
if (cmd->use_sg) {
struct scatterlist *sg;
sg = (struct scatterlist *)cmd->request_buffer;
kunmap_atomic(buf - sg->offset, KM_IRQ0);
}
}
/* This function is called by the isr to complete an inquiry command */ /* This function is called by the isr to complete an inquiry command */
static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_id) static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_id)
{ {
unsigned char *is_unit_present; unsigned char *is_unit_present;
unsigned char *request_buffer; unsigned char request_buffer[36];
TW_Param *param; TW_Param *param;
dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_inquiry_complete()\n"); dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_inquiry_complete()\n");
/* Fill request buffer */ memset(request_buffer, 0, sizeof(request_buffer));
if (tw_dev->srb[request_id]->request_buffer == NULL) {
printk(KERN_WARNING "3w-xxxx: tw_scsiop_inquiry_complete(): Request buffer NULL.\n");
return 1;
}
request_buffer = tw_dev->srb[request_id]->request_buffer;
memset(request_buffer, 0, tw_dev->srb[request_id]->request_bufflen);
request_buffer[0] = TYPE_DISK; /* Peripheral device type */ request_buffer[0] = TYPE_DISK; /* Peripheral device type */
request_buffer[1] = 0; /* Device type modifier */ request_buffer[1] = 0; /* Device type modifier */
request_buffer[2] = 0; /* No ansi/iso compliance */ request_buffer[2] = 0; /* No ansi/iso compliance */
@@ -1522,6 +1543,8 @@ static int tw_scsiop_inquiry_complete(TW_Device_Extension *tw_dev, int request_i
memcpy(&request_buffer[8], "3ware ", 8); /* Vendor ID */ memcpy(&request_buffer[8], "3ware ", 8); /* Vendor ID */
sprintf(&request_buffer[16], "Logical Disk %-2d ", tw_dev->srb[request_id]->device->id); sprintf(&request_buffer[16], "Logical Disk %-2d ", tw_dev->srb[request_id]->device->id);
memcpy(&request_buffer[32], TW_DRIVER_VERSION, 3); memcpy(&request_buffer[32], TW_DRIVER_VERSION, 3);
tw_transfer_internal(tw_dev, request_id, request_buffer,
sizeof(request_buffer));
param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
if (param == NULL) { if (param == NULL) {
@@ -1612,7 +1635,7 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
{ {
TW_Param *param; TW_Param *param;
unsigned char *flags; unsigned char *flags;
unsigned char *request_buffer; unsigned char request_buffer[8];
dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_mode_sense_complete()\n"); dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_mode_sense_complete()\n");
@@ -1622,8 +1645,7 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
return 1; return 1;
} }
flags = (char *)&(param->data[0]); flags = (char *)&(param->data[0]);
request_buffer = tw_dev->srb[request_id]->buffer; memset(request_buffer, 0, sizeof(request_buffer));
memset(request_buffer, 0, tw_dev->srb[request_id]->request_bufflen);
request_buffer[0] = 0xf; /* mode data length */ request_buffer[0] = 0xf; /* mode data length */
request_buffer[1] = 0; /* default medium type */ request_buffer[1] = 0; /* default medium type */
@@ -1635,6 +1657,8 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
request_buffer[6] = 0x4; /* WCE on */ request_buffer[6] = 0x4; /* WCE on */
else else
request_buffer[6] = 0x0; /* WCE off */ request_buffer[6] = 0x0; /* WCE off */
tw_transfer_internal(tw_dev, request_id, request_buffer,
sizeof(request_buffer));
return 0; return 0;
} /* End tw_scsiop_mode_sense_complete() */ } /* End tw_scsiop_mode_sense_complete() */
@@ -1701,17 +1725,12 @@ static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int req
{ {
unsigned char *param_data; unsigned char *param_data;
u32 capacity; u32 capacity;
char *buff; char buff[8];
TW_Param *param; TW_Param *param;
dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity_complete()\n"); dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity_complete()\n");
buff = tw_dev->srb[request_id]->request_buffer; memset(buff, 0, sizeof(buff));
if (buff == NULL) {
printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Request buffer NULL.\n");
return 1;
}
memset(buff, 0, tw_dev->srb[request_id]->request_bufflen);
param = (TW_Param *)tw_dev->alignment_virtual_address[request_id]; param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
if (param == NULL) { if (param == NULL) {
printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Bad alignment virtual address.\n"); printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_capacity_complete(): Bad alignment virtual address.\n");
@@ -1739,6 +1758,8 @@ static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int req
buff[6] = (TW_BLOCK_SIZE >> 8) & 0xff; buff[6] = (TW_BLOCK_SIZE >> 8) & 0xff;
buff[7] = TW_BLOCK_SIZE & 0xff; buff[7] = TW_BLOCK_SIZE & 0xff;
tw_transfer_internal(tw_dev, request_id, buff, sizeof(buff));
return 0; return 0;
} /* End tw_scsiop_read_capacity_complete() */ } /* End tw_scsiop_read_capacity_complete() */

View File

@@ -1,5 +1,11 @@
menu "SCSI device support" menu "SCSI device support"
config RAID_ATTRS
tristate "RAID Transport Class"
default n
---help---
Provides RAID
config SCSI config SCSI
tristate "SCSI device support" tristate "SCSI device support"
---help--- ---help---

View File

@@ -22,6 +22,8 @@ subdir-$(CONFIG_PCMCIA) += pcmcia
obj-$(CONFIG_SCSI) += scsi_mod.o obj-$(CONFIG_SCSI) += scsi_mod.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
# --- NOTE ORDERING HERE --- # --- NOTE ORDERING HERE ---
# For kernel non-modular link, transport attributes need to # For kernel non-modular link, transport attributes need to
# be initialised before drivers # be initialised before drivers

View File

@@ -133,6 +133,7 @@ struct inquiry_data {
static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap); static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg); static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO #ifdef AAC_DETAILED_STATUS_INFO
static char *aac_get_status_string(u32 status); static char *aac_get_status_string(u32 status);
@@ -348,6 +349,27 @@ static void aac_io_done(struct scsi_cmnd * scsicmd)
spin_unlock_irqrestore(host->host_lock, cpu_flags); spin_unlock_irqrestore(host->host_lock, cpu_flags);
} }
static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len)
{
void *buf;
unsigned int transfer_len;
struct scatterlist *sg = scsicmd->request_buffer;
if (scsicmd->use_sg) {
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
transfer_len = min(sg->length, len + offset);
} else {
buf = scsicmd->request_buffer;
transfer_len = min(scsicmd->request_bufflen, len + offset);
}
memcpy(buf + offset, data, transfer_len - offset);
if (scsicmd->use_sg)
kunmap_atomic(buf - sg->offset, KM_IRQ0);
}
static void get_container_name_callback(void *context, struct fib * fibptr) static void get_container_name_callback(void *context, struct fib * fibptr)
{ {
struct aac_get_name_resp * get_name_reply; struct aac_get_name_resp * get_name_reply;
@@ -363,18 +385,22 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
/* Failure is irrelevant, using default value instead */ /* Failure is irrelevant, using default value instead */
if ((le32_to_cpu(get_name_reply->status) == CT_OK) if ((le32_to_cpu(get_name_reply->status) == CT_OK)
&& (get_name_reply->data[0] != '\0')) { && (get_name_reply->data[0] != '\0')) {
int count; char *sp = get_name_reply->data;
char * dp;
char * sp = get_name_reply->data;
sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0'; sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
while (*sp == ' ') while (*sp == ' ')
++sp; ++sp;
count = sizeof(((struct inquiry_data *)NULL)->inqd_pid); if (*sp) {
dp = ((struct inquiry_data *)scsicmd->request_buffer)->inqd_pid; char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
if (*sp) do { int count = sizeof(d);
*dp++ = (*sp) ? *sp++ : ' '; char *dp = d;
} while (--count > 0); do {
*dp++ = (*sp) ? *sp++ : ' ';
} while (--count > 0);
aac_internal_transfer(scsicmd, d,
offsetof(struct inquiry_data, inqd_pid), sizeof(d));
}
} }
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
fib_complete(fibptr); fib_complete(fibptr);
@@ -777,34 +803,36 @@ int aac_get_adapter_info(struct aac_dev* dev)
/* /*
* 57 scatter gather elements * 57 scatter gather elements
*/ */
dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - if (!(dev->raw_io_interface)) {
sizeof(struct aac_fibhdr) - dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
sizeof(struct aac_write) + sizeof(struct sgmap)) /
sizeof(struct sgmap);
if (dev->dac_support) {
/*
* 38 scatter gather elements
*/
dev->scsi_host_ptr->sg_tablesize =
(dev->max_fib_size -
sizeof(struct aac_fibhdr) - sizeof(struct aac_fibhdr) -
sizeof(struct aac_write64) + sizeof(struct aac_write) + sizeof(struct sgmap)) /
sizeof(struct sgmap64)) / sizeof(struct sgmap);
sizeof(struct sgmap64); if (dev->dac_support) {
} /*
dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; * 38 scatter gather elements
if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { */
/* dev->scsi_host_ptr->sg_tablesize =
* Worst case size that could cause sg overflow when (dev->max_fib_size -
* we break up SG elements that are larger than 64KB. sizeof(struct aac_fibhdr) -
* Would be nice if we could tell the SCSI layer what sizeof(struct aac_write64) +
* the maximum SG element size can be. Worst case is sizeof(struct sgmap64)) /
* (sg_tablesize-1) 4KB elements with one 64KB sizeof(struct sgmap64);
* element. }
* 32bit -> 468 or 238KB 64bit -> 424 or 212KB dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
*/ if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
dev->scsi_host_ptr->max_sectors = /*
(dev->scsi_host_ptr->sg_tablesize * 8) + 112; * Worst case size that could cause sg overflow when
* we break up SG elements that are larger than 64KB.
* Would be nice if we could tell the SCSI layer what
* the maximum SG element size can be. Worst case is
* (sg_tablesize-1) 4KB elements with one 64KB
* element.
* 32bit -> 468 or 238KB 64bit -> 424 or 212KB
*/
dev->scsi_host_ptr->max_sectors =
(dev->scsi_host_ptr->sg_tablesize * 8) + 112;
}
} }
fib_complete(fibptr); fib_complete(fibptr);
@@ -814,12 +842,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
} }
static void read_callback(void *context, struct fib * fibptr) static void io_callback(void *context, struct fib * fibptr)
{ {
struct aac_dev *dev; struct aac_dev *dev;
struct aac_read_reply *readreply; struct aac_read_reply *readreply;
struct scsi_cmnd *scsicmd; struct scsi_cmnd *scsicmd;
u32 lba;
u32 cid; u32 cid;
scsicmd = (struct scsi_cmnd *) context; scsicmd = (struct scsi_cmnd *) context;
@@ -827,8 +854,7 @@ static void read_callback(void *context, struct fib * fibptr)
dev = (struct aac_dev *)scsicmd->device->host->hostdata; dev = (struct aac_dev *)scsicmd->device->host->hostdata;
cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies));
dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
if (fibptr == NULL) if (fibptr == NULL)
BUG(); BUG();
@@ -847,7 +873,7 @@ static void read_callback(void *context, struct fib * fibptr)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
else { else {
#ifdef AAC_DETAILED_STATUS_INFO #ifdef AAC_DETAILED_STATUS_INFO
printk(KERN_WARNING "read_callback: io failed, status = %d\n", printk(KERN_WARNING "io_callback: io failed, status = %d\n",
le32_to_cpu(readreply->status)); le32_to_cpu(readreply->status));
#endif #endif
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
@@ -867,53 +893,6 @@ static void read_callback(void *context, struct fib * fibptr)
aac_io_done(scsicmd); aac_io_done(scsicmd);
} }
static void write_callback(void *context, struct fib * fibptr)
{
struct aac_dev *dev;
struct aac_write_reply *writereply;
struct scsi_cmnd *scsicmd;
u32 lba;
u32 cid;
scsicmd = (struct scsi_cmnd *) context;
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
if (fibptr == NULL)
BUG();
if(scsicmd->use_sg)
pci_unmap_sg(dev->pdev,
(struct scatterlist *)scsicmd->buffer,
scsicmd->use_sg,
scsicmd->sc_data_direction);
else if(scsicmd->request_bufflen)
pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
scsicmd->request_bufflen,
scsicmd->sc_data_direction);
writereply = (struct aac_write_reply *) fib_data(fibptr);
if (le32_to_cpu(writereply->status) == ST_OK)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
else {
printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR,
SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
sizeof(struct sense_data));
}
fib_complete(fibptr);
fib_free(fibptr);
aac_io_done(scsicmd);
}
static int aac_read(struct scsi_cmnd * scsicmd, int cid) static int aac_read(struct scsi_cmnd * scsicmd, int cid)
{ {
u32 lba; u32 lba;
@@ -954,7 +933,32 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
fib_init(cmd_fibcontext); fib_init(cmd_fibcontext);
if (dev->dac_support == 1) { if (dev->raw_io_interface) {
struct aac_raw_io *readcmd;
readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
readcmd->block[0] = cpu_to_le32(lba);
readcmd->block[1] = 0;
readcmd->count = cpu_to_le32(count<<9);
readcmd->cid = cpu_to_le16(cid);
readcmd->flags = cpu_to_le16(1);
readcmd->bpTotal = 0;
readcmd->bpComplete = 0;
aac_build_sgraw(scsicmd, &readcmd->sg);
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
BUG();
/*
* Now send the Fib to the adapter
*/
status = fib_send(ContainerRawIo,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) scsicmd);
} else if (dev->dac_support == 1) {
struct aac_read64 *readcmd; struct aac_read64 *readcmd;
readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext); readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
readcmd->command = cpu_to_le32(VM_CtHostRead64); readcmd->command = cpu_to_le32(VM_CtHostRead64);
@@ -968,7 +972,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
fibsize = sizeof(struct aac_read64) + fibsize = sizeof(struct aac_read64) +
((le32_to_cpu(readcmd->sg.count) - 1) * ((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry64)); sizeof (struct sgentry64));
BUG_ON (fibsize > (sizeof(struct hw_fib) - BUG_ON (fibsize > (dev->max_fib_size -
sizeof(struct aac_fibhdr))); sizeof(struct aac_fibhdr)));
/* /*
* Now send the Fib to the adapter * Now send the Fib to the adapter
@@ -978,7 +982,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
fibsize, fibsize,
FsaNormal, FsaNormal,
0, 1, 0, 1,
(fib_callback) read_callback, (fib_callback) io_callback,
(void *) scsicmd); (void *) scsicmd);
} else { } else {
struct aac_read *readcmd; struct aac_read *readcmd;
@@ -1002,7 +1006,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
fibsize, fibsize,
FsaNormal, FsaNormal,
0, 1, 0, 1,
(fib_callback) read_callback, (fib_callback) io_callback,
(void *) scsicmd); (void *) scsicmd);
} }
@@ -1061,7 +1065,32 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
} }
fib_init(cmd_fibcontext); fib_init(cmd_fibcontext);
if(dev->dac_support == 1) { if (dev->raw_io_interface) {
struct aac_raw_io *writecmd;
writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
writecmd->block[0] = cpu_to_le32(lba);
writecmd->block[1] = 0;
writecmd->count = cpu_to_le32(count<<9);
writecmd->cid = cpu_to_le16(cid);
writecmd->flags = 0;
writecmd->bpTotal = 0;
writecmd->bpComplete = 0;
aac_build_sgraw(scsicmd, &writecmd->sg);
fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))
BUG();
/*
* Now send the Fib to the adapter
*/
status = fib_send(ContainerRawIo,
cmd_fibcontext,
fibsize,
FsaNormal,
0, 1,
(fib_callback) io_callback,
(void *) scsicmd);
} else if (dev->dac_support == 1) {
struct aac_write64 *writecmd; struct aac_write64 *writecmd;
writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext); writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
writecmd->command = cpu_to_le32(VM_CtHostWrite64); writecmd->command = cpu_to_le32(VM_CtHostWrite64);
@@ -1085,7 +1114,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
fibsize, fibsize,
FsaNormal, FsaNormal,
0, 1, 0, 1,
(fib_callback) write_callback, (fib_callback) io_callback,
(void *) scsicmd); (void *) scsicmd);
} else { } else {
struct aac_write *writecmd; struct aac_write *writecmd;
@@ -1111,7 +1140,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
fibsize, fibsize,
FsaNormal, FsaNormal,
0, 1, 0, 1,
(fib_callback) write_callback, (fib_callback) io_callback,
(void *) scsicmd); (void *) scsicmd);
} }
@@ -1340,44 +1369,45 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
switch (scsicmd->cmnd[0]) { switch (scsicmd->cmnd[0]) {
case INQUIRY: case INQUIRY:
{ {
struct inquiry_data *inq_data_ptr; struct inquiry_data inq_data;
dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id)); dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer; memset(&inq_data, 0, sizeof (struct inquiry_data));
memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
inq_data_ptr->inqd_ver = 2; /* claim compliance to SCSI-2 */ inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
inq_data_ptr->inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */ inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
inq_data_ptr->inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
inq_data_ptr->inqd_len = 31; inq_data.inqd_len = 31;
/*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
inq_data_ptr->inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */ inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
/* /*
* Set the Vendor, Product, and Revision Level * Set the Vendor, Product, and Revision Level
* see: <vendor>.c i.e. aac.c * see: <vendor>.c i.e. aac.c
*/ */
if (scsicmd->device->id == host->this_id) { if (scsicmd->device->id == host->this_id) {
setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), (sizeof(container_types)/sizeof(char *))); setinqstr(cardtype, (void *) (inq_data.inqd_vid), (sizeof(container_types)/sizeof(char *)));
inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */ inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd); scsicmd->scsi_done(scsicmd);
return 0; return 0;
} }
setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr[cid].type); setinqstr(cardtype, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
inq_data_ptr->inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
return aac_get_container_name(scsicmd, cid); return aac_get_container_name(scsicmd, cid);
} }
case READ_CAPACITY: case READ_CAPACITY:
{ {
u32 capacity; u32 capacity;
char *cp; char cp[8];
dprintk((KERN_DEBUG "READ CAPACITY command.\n")); dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
if (fsa_dev_ptr[cid].size <= 0x100000000LL) if (fsa_dev_ptr[cid].size <= 0x100000000LL)
capacity = fsa_dev_ptr[cid].size - 1; capacity = fsa_dev_ptr[cid].size - 1;
else else
capacity = (u32)-1; capacity = (u32)-1;
cp = scsicmd->request_buffer;
cp[0] = (capacity >> 24) & 0xff; cp[0] = (capacity >> 24) & 0xff;
cp[1] = (capacity >> 16) & 0xff; cp[1] = (capacity >> 16) & 0xff;
cp[2] = (capacity >> 8) & 0xff; cp[2] = (capacity >> 8) & 0xff;
@@ -1386,6 +1416,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
cp[5] = 0; cp[5] = 0;
cp[6] = 2; cp[6] = 2;
cp[7] = 0; cp[7] = 0;
aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd); scsicmd->scsi_done(scsicmd);
@@ -1395,15 +1426,15 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case MODE_SENSE: case MODE_SENSE:
{ {
char *mode_buf; char mode_buf[4];
dprintk((KERN_DEBUG "MODE SENSE command.\n")); dprintk((KERN_DEBUG "MODE SENSE command.\n"));
mode_buf = scsicmd->request_buffer;
mode_buf[0] = 3; /* Mode data length */ mode_buf[0] = 3; /* Mode data length */
mode_buf[1] = 0; /* Medium type - default */ mode_buf[1] = 0; /* Medium type - default */
mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */
mode_buf[3] = 0; /* Block descriptor length */ mode_buf[3] = 0; /* Block descriptor length */
aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd); scsicmd->scsi_done(scsicmd);
@@ -1411,10 +1442,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
} }
case MODE_SENSE_10: case MODE_SENSE_10:
{ {
char *mode_buf; char mode_buf[8];
dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
mode_buf = scsicmd->request_buffer;
mode_buf[0] = 0; /* Mode data length (MSB) */ mode_buf[0] = 0; /* Mode data length (MSB) */
mode_buf[1] = 6; /* Mode data length (LSB) */ mode_buf[1] = 6; /* Mode data length (LSB) */
mode_buf[2] = 0; /* Medium type - default */ mode_buf[2] = 0; /* Medium type - default */
@@ -1423,6 +1453,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
mode_buf[5] = 0; /* reserved */ mode_buf[5] = 0; /* reserved */
mode_buf[6] = 0; /* Block descriptor length (MSB) */ mode_buf[6] = 0; /* Block descriptor length (MSB) */
mode_buf[7] = 0; /* Block descriptor length (LSB) */ mode_buf[7] = 0; /* Block descriptor length (LSB) */
aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd); scsicmd->scsi_done(scsicmd);
@@ -1894,7 +1925,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
srbcmd->id = cpu_to_le32(scsicmd->device->id); srbcmd->id = cpu_to_le32(scsicmd->device->id);
srbcmd->lun = cpu_to_le32(scsicmd->device->lun); srbcmd->lun = cpu_to_le32(scsicmd->device->lun);
srbcmd->flags = cpu_to_le32(flag); srbcmd->flags = cpu_to_le32(flag);
timeout = (scsicmd->timeout-jiffies)/HZ; timeout = scsicmd->timeout_per_command/HZ;
if(timeout == 0){ if(timeout == 0){
timeout = 1; timeout = 1;
} }
@@ -2077,6 +2108,76 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
return byte_count; return byte_count;
} }
static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
{
struct Scsi_Host *host = scsicmd->device->host;
struct aac_dev *dev = (struct aac_dev *)host->hostdata;
unsigned long byte_count = 0;
// Get rid of old data
psg->count = 0;
psg->sg[0].next = 0;
psg->sg[0].prev = 0;
psg->sg[0].addr[0] = 0;
psg->sg[0].addr[1] = 0;
psg->sg[0].count = 0;
psg->sg[0].flags = 0;
if (scsicmd->use_sg) {
struct scatterlist *sg;
int i;
int sg_count;
sg = (struct scatterlist *) scsicmd->request_buffer;
sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
scsicmd->sc_data_direction);
for (i = 0; i < sg_count; i++) {
int count = sg_dma_len(sg);
u64 addr = sg_dma_address(sg);
psg->sg[i].next = 0;
psg->sg[i].prev = 0;
psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
psg->sg[i].count = cpu_to_le32(count);
psg->sg[i].flags = 0;
byte_count += count;
sg++;
}
psg->count = cpu_to_le32(sg_count);
/* hba wants the size to be exact */
if(byte_count > scsicmd->request_bufflen){
u32 temp = le32_to_cpu(psg->sg[i-1].count) -
(byte_count - scsicmd->request_bufflen);
psg->sg[i-1].count = cpu_to_le32(temp);
byte_count = scsicmd->request_bufflen;
}
/* Check for command underflow */
if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
byte_count, scsicmd->underflow);
}
}
else if(scsicmd->request_bufflen) {
int count;
u64 addr;
scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
scsicmd->request_buffer,
scsicmd->request_bufflen,
scsicmd->sc_data_direction);
addr = scsicmd->SCp.dma_handle;
count = scsicmd->request_bufflen;
psg->count = cpu_to_le32(1);
psg->sg[0].next = 0;
psg->sg[0].prev = 0;
psg->sg[0].addr[1] = cpu_to_le32((u32)(addr>>32));
psg->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
psg->sg[0].count = cpu_to_le32(count);
psg->sg[0].flags = 0;
byte_count = scsicmd->request_bufflen;
}
return byte_count;
}
#ifdef AAC_DETAILED_STATUS_INFO #ifdef AAC_DETAILED_STATUS_INFO
struct aac_srb_status_info { struct aac_srb_status_info {

View File

@@ -110,6 +110,22 @@ struct user_sgentry64 {
u32 count; /* Length. */ u32 count; /* Length. */
}; };
struct sgentryraw {
__le32 next; /* reserved for F/W use */
__le32 prev; /* reserved for F/W use */
__le32 addr[2];
__le32 count;
__le32 flags; /* reserved for F/W use */
};
struct user_sgentryraw {
u32 next; /* reserved for F/W use */
u32 prev; /* reserved for F/W use */
u32 addr[2];
u32 count;
u32 flags; /* reserved for F/W use */
};
/* /*
* SGMAP * SGMAP
* *
@@ -137,6 +153,16 @@ struct user_sgmap64 {
struct user_sgentry64 sg[1]; struct user_sgentry64 sg[1];
}; };
struct sgmapraw {
__le32 count;
struct sgentryraw sg[1];
};
struct user_sgmapraw {
u32 count;
struct user_sgentryraw sg[1];
};
struct creation_info struct creation_info
{ {
u8 buildnum; /* e.g., 588 */ u8 buildnum; /* e.g., 588 */
@@ -351,6 +377,7 @@ struct hw_fib {
*/ */
#define ContainerCommand 500 #define ContainerCommand 500
#define ContainerCommand64 501 #define ContainerCommand64 501
#define ContainerRawIo 502
/* /*
* Cluster Commands * Cluster Commands
*/ */
@@ -456,6 +483,7 @@ struct adapter_ops
{ {
void (*adapter_interrupt)(struct aac_dev *dev); void (*adapter_interrupt)(struct aac_dev *dev);
void (*adapter_notify)(struct aac_dev *dev, u32 event); void (*adapter_notify)(struct aac_dev *dev, u32 event);
void (*adapter_disable_int)(struct aac_dev *dev);
int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4); int (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
int (*adapter_check_health)(struct aac_dev *dev); int (*adapter_check_health)(struct aac_dev *dev);
}; };
@@ -981,6 +1009,9 @@ struct aac_dev
u8 nondasd_support; u8 nondasd_support;
u8 dac_support; u8 dac_support;
u8 raid_scsi_mode; u8 raid_scsi_mode;
/* macro side-effects BEWARE */
# define raw_io_interface \
init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
u8 printf_enabled; u8 printf_enabled;
}; };
@@ -990,6 +1021,9 @@ struct aac_dev
#define aac_adapter_notify(dev, event) \ #define aac_adapter_notify(dev, event) \
(dev)->a_ops.adapter_notify(dev, event) (dev)->a_ops.adapter_notify(dev, event)
#define aac_adapter_disable_int(dev) \
(dev)->a_ops.adapter_disable_int(dev)
#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
@@ -1156,6 +1190,17 @@ struct aac_write_reply
__le32 committed; __le32 committed;
}; };
struct aac_raw_io
{
__le32 block[2];
__le32 count;
__le16 cid;
__le16 flags; /* 00 W, 01 R */
__le16 bpTotal; /* reserved for F/W use */
__le16 bpComplete; /* reserved for F/W use */
struct sgmapraw sg;
};
#define CT_FLUSH_CACHE 129 #define CT_FLUSH_CACHE 129
struct aac_synchronize { struct aac_synchronize {
__le32 command; /* VM_ContainerConfig */ __le32 command; /* VM_ContainerConfig */
@@ -1196,7 +1241,7 @@ struct aac_srb
}; };
/* /*
* This and assocated data structs are used by the * This and associated data structs are used by the
* ioctl caller and are in cpu order. * ioctl caller and are in cpu order.
*/ */
struct user_aac_srb struct user_aac_srb
@@ -1508,11 +1553,12 @@ struct fib_ioctl
struct revision struct revision
{ {
u32 compat; __le32 compat;
u32 version; __le32 version;
u32 build; __le32 build;
}; };
/* /*
* Ugly - non Linux like ioctl coding for back compat. * Ugly - non Linux like ioctl coding for back compat.
*/ */
@@ -1733,3 +1779,4 @@ int aac_get_adapter_info(struct aac_dev* dev);
int aac_send_shutdown(struct aac_dev *dev); int aac_send_shutdown(struct aac_dev *dev);
extern int numacb; extern int numacb;
extern int acbsize; extern int acbsize;
extern char aac_driver_version[];

View File

@@ -287,7 +287,6 @@ return_fib:
kfree(fib->hw_fib); kfree(fib->hw_fib);
kfree(fib); kfree(fib);
status = 0; status = 0;
fibctx->jiffies = jiffies/HZ;
} else { } else {
spin_unlock_irqrestore(&dev->fib_lock, flags); spin_unlock_irqrestore(&dev->fib_lock, flags);
if (f.wait) { if (f.wait) {
@@ -302,6 +301,7 @@ return_fib:
status = -EAGAIN; status = -EAGAIN;
} }
} }
fibctx->jiffies = jiffies/HZ;
return status; return status;
} }
@@ -405,10 +405,20 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
static int check_revision(struct aac_dev *dev, void __user *arg) static int check_revision(struct aac_dev *dev, void __user *arg)
{ {
struct revision response; struct revision response;
char *driver_version = aac_driver_version;
u32 version;
response.compat = 1; response.compat = cpu_to_le32(1);
response.version = le32_to_cpu(dev->adapter_info.kernelrev); version = (simple_strtol(driver_version,
response.build = le32_to_cpu(dev->adapter_info.kernelbuild); &driver_version, 10) << 24) | 0x00000400;
version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
version += simple_strtol(driver_version + 1, NULL, 10);
response.version = cpu_to_le32(version);
# if (defined(AAC_DRIVER_BUILD))
response.build = cpu_to_le32(AAC_DRIVER_BUILD);
# else
response.build = cpu_to_le32(9999);
# endif
if (copy_to_user(arg, &response, sizeof(response))) if (copy_to_user(arg, &response, sizeof(response)))
return -EFAULT; return -EFAULT;

View File

@@ -44,7 +44,9 @@
#include "aacraid.h" #include "aacraid.h"
struct aac_common aac_config; struct aac_common aac_config = {
.irq_mod = 1
};
static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign) static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
{ {

View File

@@ -254,6 +254,7 @@ static void fib_dealloc(struct fib * fibptr)
static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify) static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
{ {
struct aac_queue * q; struct aac_queue * q;
unsigned long idx;
/* /*
* All of the queues wrap when they reach the end, so we check * All of the queues wrap when they reach the end, so we check
@@ -263,10 +264,23 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
*/ */
q = &dev->queues->queue[qid]; q = &dev->queues->queue[qid];
*index = le32_to_cpu(*(q->headers.producer)); idx = *index = le32_to_cpu(*(q->headers.producer));
if ((*index - 2) == le32_to_cpu(*(q->headers.consumer))) /* Interrupt Moderation, only interrupt for first two entries */
if (idx != le32_to_cpu(*(q->headers.consumer))) {
if (--idx == 0) {
if (qid == AdapHighCmdQueue)
idx = ADAP_HIGH_CMD_ENTRIES;
else if (qid == AdapNormCmdQueue)
idx = ADAP_NORM_CMD_ENTRIES;
else if (qid == AdapHighRespQueue)
idx = ADAP_HIGH_RESP_ENTRIES;
else if (qid == AdapNormRespQueue)
idx = ADAP_NORM_RESP_ENTRIES;
}
if (idx != le32_to_cpu(*(q->headers.consumer)))
*nonotify = 1; *nonotify = 1;
}
if (qid == AdapHighCmdQueue) { if (qid == AdapHighCmdQueue) {
if (*index >= ADAP_HIGH_CMD_ENTRIES) if (*index >= ADAP_HIGH_CMD_ENTRIES)

View File

@@ -27,8 +27,11 @@
* Abstract: Linux Driver entry module for Adaptec RAID Array Controller * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
*/ */
#define AAC_DRIVER_VERSION "1.1.2-lk2" #define AAC_DRIVER_VERSION "1.1-4"
#define AAC_DRIVER_BUILD_DATE __DATE__ #ifndef AAC_DRIVER_BRANCH
#define AAC_DRIVER_BRANCH ""
#endif
#define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__
#define AAC_DRIVERNAME "aacraid" #define AAC_DRIVERNAME "aacraid"
#include <linux/compat.h> #include <linux/compat.h>
@@ -58,16 +61,24 @@
#include "aacraid.h" #include "aacraid.h"
#ifdef AAC_DRIVER_BUILD
#define _str(x) #x
#define str(x) _str(x)
#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
#else
#define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH " " AAC_DRIVER_BUILD_DATE
#endif
MODULE_AUTHOR("Red Hat Inc and Adaptec"); MODULE_AUTHOR("Red Hat Inc and Adaptec");
MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, " MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
"Adaptec Advanced Raid Products, " "Adaptec Advanced Raid Products, "
"and HP NetRAID-4M SCSI driver"); "and HP NetRAID-4M SCSI driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION(AAC_DRIVER_VERSION); MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
static LIST_HEAD(aac_devices); static LIST_HEAD(aac_devices);
static int aac_cfg_major = -1; static int aac_cfg_major = -1;
char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
/* /*
* Because of the way Linux names scsi devices, the order in this table has * Because of the way Linux names scsi devices, the order in this table has
@@ -109,36 +120,39 @@ static struct pci_device_id aac_pci_tbl[] = {
{ 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5085AU (Hurricane) */ { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5085AU (Hurricane) */
{ 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */ { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
{ 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */ { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
{ 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 32 }, /* Themisto Jupiter Platform */ { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
{ 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 32 }, /* Themisto Jupiter Platform */ { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
{ 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 33 }, /* Callisto Jupiter Platform */ { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
{ 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 34 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */ { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
{ 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 35 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */ { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
{ 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 36 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */ { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
{ 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 37 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */ { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
{ 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 38 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */ { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
{ 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 39 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */ { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
{ 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 40 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */ { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
{ 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 41 }, /* AAR-2610SA PCI SATA 6ch */ { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
{ 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 42 }, /* ASR-2240S (SabreExpress) */ { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
{ 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 43 }, /* ASR-4005SAS */ { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
{ 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 44 }, /* IBM 8i (AvonPark) */ { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005SAS */
{ 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 44 }, /* IBM 8i (AvonPark Lite) */ { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
{ 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 45 }, /* ASR-4000SAS (BlackBird) */ { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
{ 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 46 }, /* ASR-4800SAS (Marauder-X) */ { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
{ 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 47 }, /* ASR-4805SAS (Marauder-E) */ { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
{ 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 48 }, /* ASR-4810SAS (Hurricane */ { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000SAS (BlackBird) */
{ 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
{ 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
{ 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-4810SAS (Hurricane */
{ 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 49 }, /* Perc 320/DC*/ { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
{ 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 50 }, /* Adaptec 5400S (Mustang)*/ { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
{ 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 51 }, /* Adaptec 5400S (Mustang)*/ { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
{ 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 52 }, /* Dell PERC2/QC */ { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
{ 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 53 }, /* HP NetRAID-4M */ { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
{ 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 54 }, /* Dell Catchall */ { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
{ 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 55 }, /* Legend Catchall */ { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 56 }, /* Adaptec Catch All */ { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 57 }, /* Adaptec Rocket Catch All */ { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
{ 0,} { 0,}
}; };
MODULE_DEVICE_TABLE(pci, aac_pci_tbl); MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -180,8 +194,9 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */ { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */ { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP5085AU ", 1 }, /* ICP5085AU (Hurricane) */ { aac_rkt_init, "aacraid", "ICP ", "ICP5085AU ", 1 }, /* ICP5085AU (Hurricane) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */ { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */ { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
{ aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
{ NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */ { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */ { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
@@ -195,10 +210,12 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005SAS ", 1 }, /* ASR-4005SAS */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005SAS ", 1 }, /* ASR-4005SAS */
{ aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */ { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
{ aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
{ aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000SAS ", 1 }, /* ASR-4000SAS (BlackBird & AvonPark) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000SAS ", 1 }, /* ASR-4000SAS (BlackBird & AvonPark) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */ { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
{ aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4810SAS ", 1 }, /* ASR-4810SAS (Hurricane) */ { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-4810SAS ", 1 }, /* ASR-4810SAS (Hurricane) */
{ aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/ { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
{ aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/ { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
@@ -839,11 +856,12 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
return 0; return 0;
out_deinit: out_deinit:
kill_proc(aac->thread_pid, SIGKILL, 0); kill_proc(aac->thread_pid, SIGKILL, 0);
wait_for_completion(&aac->aif_completion); wait_for_completion(&aac->aif_completion);
aac_send_shutdown(aac); aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
fib_map_free(aac); fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
kfree(aac->queues); kfree(aac->queues);
@@ -860,6 +878,13 @@ out_deinit:
return error; return error;
} }
static void aac_shutdown(struct pci_dev *dev)
{
struct Scsi_Host *shost = pci_get_drvdata(dev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
aac_send_shutdown(aac);
}
static void __devexit aac_remove_one(struct pci_dev *pdev) static void __devexit aac_remove_one(struct pci_dev *pdev)
{ {
struct Scsi_Host *shost = pci_get_drvdata(pdev); struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -871,6 +896,7 @@ static void __devexit aac_remove_one(struct pci_dev *pdev)
wait_for_completion(&aac->aif_completion); wait_for_completion(&aac->aif_completion);
aac_send_shutdown(aac); aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
fib_map_free(aac); fib_map_free(aac);
pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
aac->comm_phys); aac->comm_phys);
@@ -891,14 +917,15 @@ static struct pci_driver aac_pci_driver = {
.id_table = aac_pci_tbl, .id_table = aac_pci_tbl,
.probe = aac_probe_one, .probe = aac_probe_one,
.remove = __devexit_p(aac_remove_one), .remove = __devexit_p(aac_remove_one),
.shutdown = aac_shutdown,
}; };
static int __init aac_init(void) static int __init aac_init(void)
{ {
int error; int error;
printk(KERN_INFO "Red Hat/Adaptec aacraid driver (%s %s)\n", printk(KERN_INFO "Adaptec %s driver (%s)\n",
AAC_DRIVER_VERSION, AAC_DRIVER_BUILD_DATE); AAC_DRIVERNAME, aac_driver_version);
error = pci_module_init(&aac_pci_driver); error = pci_module_init(&aac_pci_driver);
if (error) if (error)
@@ -909,6 +936,7 @@ static int __init aac_init(void)
printk(KERN_WARNING printk(KERN_WARNING
"aacraid: unable to register \"aac\" device.\n"); "aacraid: unable to register \"aac\" device.\n");
} }
return 0; return 0;
} }

View File

@@ -87,6 +87,16 @@ static irqreturn_t aac_rkt_intr(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_NONE; return IRQ_NONE;
} }
/**
* aac_rkt_disable_interrupt - Disable interrupts
* @dev: Adapter
*/
static void aac_rkt_disable_interrupt(struct aac_dev *dev)
{
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
}
/** /**
* rkt_sync_cmd - send a command and wait * rkt_sync_cmd - send a command and wait
* @dev: Adapter * @dev: Adapter
@@ -412,10 +422,19 @@ int aac_rkt_init(struct aac_dev *dev)
* Fill in the function dispatch table. * Fill in the function dispatch table.
*/ */
dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter; dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_rkt_disable_interrupt;
dev->a_ops.adapter_notify = aac_rkt_notify_adapter; dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
dev->a_ops.adapter_sync_cmd = rkt_sync_cmd; dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
dev->a_ops.adapter_check_health = aac_rkt_check_health; dev->a_ops.adapter_check_health = aac_rkt_check_health;
/*
* First clear out all interrupts. Then enable the one's that we
* can handle.
*/
rkt_writeb(dev, MUnit.OIMR, 0xff);
rkt_writel(dev, MUnit.ODR, 0xffffffff);
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
if (aac_init_adapter(dev) == NULL) if (aac_init_adapter(dev) == NULL)
goto error_irq; goto error_irq;
/* /*
@@ -438,6 +457,7 @@ error_kfree:
kfree(dev->queues); kfree(dev->queues);
error_irq: error_irq:
rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
free_irq(dev->scsi_host_ptr->irq, (void *)dev); free_irq(dev->scsi_host_ptr->irq, (void *)dev);
error_iounmap: error_iounmap:

View File

@@ -87,6 +87,16 @@ static irqreturn_t aac_rx_intr(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_NONE; return IRQ_NONE;
} }
/**
* aac_rx_disable_interrupt - Disable interrupts
* @dev: Adapter
*/
static void aac_rx_disable_interrupt(struct aac_dev *dev)
{
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
}
/** /**
* rx_sync_cmd - send a command and wait * rx_sync_cmd - send a command and wait
* @dev: Adapter * @dev: Adapter
@@ -412,10 +422,19 @@ int aac_rx_init(struct aac_dev *dev)
* Fill in the function dispatch table. * Fill in the function dispatch table.
*/ */
dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter; dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
dev->a_ops.adapter_notify = aac_rx_notify_adapter; dev->a_ops.adapter_notify = aac_rx_notify_adapter;
dev->a_ops.adapter_sync_cmd = rx_sync_cmd; dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
dev->a_ops.adapter_check_health = aac_rx_check_health; dev->a_ops.adapter_check_health = aac_rx_check_health;
/*
* First clear out all interrupts. Then enable the one's that we
* can handle.
*/
rx_writeb(dev, MUnit.OIMR, 0xff);
rx_writel(dev, MUnit.ODR, 0xffffffff);
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
if (aac_init_adapter(dev) == NULL) if (aac_init_adapter(dev) == NULL)
goto error_irq; goto error_irq;
/* /*
@@ -438,6 +457,7 @@ error_kfree:
kfree(dev->queues); kfree(dev->queues);
error_irq: error_irq:
rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
free_irq(dev->scsi_host_ptr->irq, (void *)dev); free_irq(dev->scsi_host_ptr->irq, (void *)dev);
error_iounmap: error_iounmap:

View File

@@ -81,6 +81,16 @@ static irqreturn_t aac_sa_intr(int irq, void *dev_id, struct pt_regs *regs)
return IRQ_NONE; return IRQ_NONE;
} }
/**
* aac_sa_disable_interrupt - disable interrupt
* @dev: Which adapter to enable.
*/
static void aac_sa_disable_interrupt (struct aac_dev *dev)
{
sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
}
/** /**
* aac_sa_notify_adapter - handle adapter notification * aac_sa_notify_adapter - handle adapter notification
* @dev: Adapter that notification is for * @dev: Adapter that notification is for
@@ -214,9 +224,8 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command,
static void aac_sa_interrupt_adapter (struct aac_dev *dev) static void aac_sa_interrupt_adapter (struct aac_dev *dev)
{ {
u32 ret;
sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
&ret, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
} }
/** /**
@@ -352,10 +361,18 @@ int aac_sa_init(struct aac_dev *dev)
*/ */
dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter; dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
dev->a_ops.adapter_notify = aac_sa_notify_adapter; dev->a_ops.adapter_notify = aac_sa_notify_adapter;
dev->a_ops.adapter_sync_cmd = sa_sync_cmd; dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
dev->a_ops.adapter_check_health = aac_sa_check_health; dev->a_ops.adapter_check_health = aac_sa_check_health;
/*
* First clear out all interrupts. Then enable the one's that
* we can handle.
*/
sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
if(aac_init_adapter(dev) == NULL) if(aac_init_adapter(dev) == NULL)
goto error_irq; goto error_irq;
@@ -381,6 +398,7 @@ error_kfree:
kfree(dev->queues); kfree(dev->queues);
error_irq: error_irq:
sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
free_irq(dev->scsi_host_ptr->irq, (void *)dev); free_irq(dev->scsi_host_ptr->irq, (void *)dev);
error_iounmap: error_iounmap:

View File

@@ -9200,8 +9200,8 @@ asc_prt_scsi_cmnd(struct scsi_cmnd *s)
(unsigned) s->serial_number, s->retries, s->allowed); (unsigned) s->serial_number, s->retries, s->allowed);
printk( printk(
" timeout_per_command %d, timeout_total %d, timeout %d\n", " timeout_per_command %d\n",
s->timeout_per_command, s->timeout_total, s->timeout); s->timeout_per_command);
printk( printk(
" scsi_done 0x%lx, done 0x%lx, host_scribble 0x%lx, result 0x%x\n", " scsi_done 0x%lx, done 0x%lx, host_scribble 0x%lx, result 0x%x\n",

View File

@@ -5,6 +5,7 @@
config SCSI_AIC79XX config SCSI_AIC79XX
tristate "Adaptec AIC79xx U320 support" tristate "Adaptec AIC79xx U320 support"
depends on PCI && SCSI depends on PCI && SCSI
select SCSI_SPI_ATTRS
help help
This driver supports all of Adaptec's Ultra 320 PCI-X This driver supports all of Adaptec's Ultra 320 PCI-X
based SCSI controllers. based SCSI controllers.

View File

@@ -126,7 +126,6 @@ aic7770_find_device(uint32_t id)
int int
aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io) aic7770_config(struct ahc_softc *ahc, struct aic7770_identity *entry, u_int io)
{ {
u_long l;
int error; int error;
int have_seeprom; int have_seeprom;
u_int hostconf; u_int hostconf;

View File

@@ -1247,9 +1247,6 @@ struct ahd_softc {
uint16_t user_tagenable;/* Tagged Queuing allowed */ uint16_t user_tagenable;/* Tagged Queuing allowed */
}; };
TAILQ_HEAD(ahd_softc_tailq, ahd_softc);
extern struct ahd_softc_tailq ahd_tailq;
/*************************** IO Cell Configuration ****************************/ /*************************** IO Cell Configuration ****************************/
#define AHD_PRECOMP_SLEW_INDEX \ #define AHD_PRECOMP_SLEW_INDEX \
(AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0) (AHD_ANNEXCOL_PRECOMP_SLEW - AHD_ANNEXCOL_PER_DEV0)
@@ -1374,8 +1371,6 @@ void ahd_enable_coalescing(struct ahd_softc *ahd,
void ahd_pause_and_flushwork(struct ahd_softc *ahd); void ahd_pause_and_flushwork(struct ahd_softc *ahd);
int ahd_suspend(struct ahd_softc *ahd); int ahd_suspend(struct ahd_softc *ahd);
int ahd_resume(struct ahd_softc *ahd); int ahd_resume(struct ahd_softc *ahd);
void ahd_softc_insert(struct ahd_softc *);
struct ahd_softc *ahd_find_softc(struct ahd_softc *ahd);
void ahd_set_unit(struct ahd_softc *, int); void ahd_set_unit(struct ahd_softc *, int);
void ahd_set_name(struct ahd_softc *, char *); void ahd_set_name(struct ahd_softc *, char *);
struct scb *ahd_get_scb(struct ahd_softc *ahd, u_int col_idx); struct scb *ahd_get_scb(struct ahd_softc *ahd, u_int col_idx);
@@ -1524,7 +1519,6 @@ void ahd_print_scb(struct scb *scb);
void ahd_print_devinfo(struct ahd_softc *ahd, void ahd_print_devinfo(struct ahd_softc *ahd,
struct ahd_devinfo *devinfo); struct ahd_devinfo *devinfo);
void ahd_dump_sglist(struct scb *scb); void ahd_dump_sglist(struct scb *scb);
void ahd_dump_all_cards_state(void);
void ahd_dump_card_state(struct ahd_softc *ahd); void ahd_dump_card_state(struct ahd_softc *ahd);
int ahd_print_register(ahd_reg_parse_entry_t *table, int ahd_print_register(ahd_reg_parse_entry_t *table,
u_int num_entries, u_int num_entries,

View File

@@ -52,8 +52,6 @@
#include <dev/aic7xxx/aicasm/aicasm_insformat.h> #include <dev/aic7xxx/aicasm/aicasm_insformat.h>
#endif #endif
/******************************** Globals *************************************/
struct ahd_softc_tailq ahd_tailq = TAILQ_HEAD_INITIALIZER(ahd_tailq);
/***************************** Lookup Tables **********************************/ /***************************** Lookup Tables **********************************/
char *ahd_chip_names[] = char *ahd_chip_names[] =
@@ -5179,74 +5177,6 @@ ahd_softc_init(struct ahd_softc *ahd)
return (0); return (0);
} }
void
ahd_softc_insert(struct ahd_softc *ahd)
{
struct ahd_softc *list_ahd;
#if AHD_PCI_CONFIG > 0
/*
* Second Function PCI devices need to inherit some
* settings from function 0.
*/
if ((ahd->features & AHD_MULTI_FUNC) != 0) {
TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
ahd_dev_softc_t list_pci;
ahd_dev_softc_t pci;
list_pci = list_ahd->dev_softc;
pci = ahd->dev_softc;
if (ahd_get_pci_slot(list_pci) == ahd_get_pci_slot(pci)
&& ahd_get_pci_bus(list_pci) == ahd_get_pci_bus(pci)) {
struct ahd_softc *master;
struct ahd_softc *slave;
if (ahd_get_pci_function(list_pci) == 0) {
master = list_ahd;
slave = ahd;
} else {
master = ahd;
slave = list_ahd;
}
slave->flags &= ~AHD_BIOS_ENABLED;
slave->flags |=
master->flags & AHD_BIOS_ENABLED;
break;
}
}
}
#endif
/*
* Insertion sort into our list of softcs.
*/
list_ahd = TAILQ_FIRST(&ahd_tailq);
while (list_ahd != NULL
&& ahd_softc_comp(ahd, list_ahd) <= 0)
list_ahd = TAILQ_NEXT(list_ahd, links);
if (list_ahd != NULL)
TAILQ_INSERT_BEFORE(list_ahd, ahd, links);
else
TAILQ_INSERT_TAIL(&ahd_tailq, ahd, links);
ahd->init_level++;
}
/*
* Verify that the passed in softc pointer is for a
* controller that is still configured.
*/
struct ahd_softc *
ahd_find_softc(struct ahd_softc *ahd)
{
struct ahd_softc *list_ahd;
TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
if (list_ahd == ahd)
return (ahd);
}
return (NULL);
}
void void
ahd_set_unit(struct ahd_softc *ahd, int unit) ahd_set_unit(struct ahd_softc *ahd, int unit)
{ {
@@ -7902,18 +7832,10 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset)
static void static void
ahd_reset_poll(void *arg) ahd_reset_poll(void *arg)
{ {
struct ahd_softc *ahd; struct ahd_softc *ahd = arg;
u_int scsiseq1; u_int scsiseq1;
u_long l;
u_long s; u_long s;
ahd_list_lock(&l);
ahd = ahd_find_softc((struct ahd_softc *)arg);
if (ahd == NULL) {
printf("ahd_reset_poll: Instance %p no longer exists\n", arg);
ahd_list_unlock(&l);
return;
}
ahd_lock(ahd, &s); ahd_lock(ahd, &s);
ahd_pause(ahd); ahd_pause(ahd);
ahd_update_modes(ahd); ahd_update_modes(ahd);
@@ -7924,7 +7846,6 @@ ahd_reset_poll(void *arg)
ahd_reset_poll, ahd); ahd_reset_poll, ahd);
ahd_unpause(ahd); ahd_unpause(ahd);
ahd_unlock(ahd, &s); ahd_unlock(ahd, &s);
ahd_list_unlock(&l);
return; return;
} }
@@ -7936,25 +7857,16 @@ ahd_reset_poll(void *arg)
ahd->flags &= ~AHD_RESET_POLL_ACTIVE; ahd->flags &= ~AHD_RESET_POLL_ACTIVE;
ahd_unlock(ahd, &s); ahd_unlock(ahd, &s);
ahd_release_simq(ahd); ahd_release_simq(ahd);
ahd_list_unlock(&l);
} }
/**************************** Statistics Processing ***************************/ /**************************** Statistics Processing ***************************/
static void static void
ahd_stat_timer(void *arg) ahd_stat_timer(void *arg)
{ {
struct ahd_softc *ahd; struct ahd_softc *ahd = arg;
u_long l;
u_long s; u_long s;
int enint_coal; int enint_coal;
ahd_list_lock(&l);
ahd = ahd_find_softc((struct ahd_softc *)arg);
if (ahd == NULL) {
printf("ahd_stat_timer: Instance %p no longer exists\n", arg);
ahd_list_unlock(&l);
return;
}
ahd_lock(ahd, &s); ahd_lock(ahd, &s);
enint_coal = ahd->hs_mailbox & ENINT_COALESCE; enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
@@ -7981,7 +7893,6 @@ ahd_stat_timer(void *arg)
ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US,
ahd_stat_timer, ahd); ahd_stat_timer, ahd);
ahd_unlock(ahd, &s); ahd_unlock(ahd, &s);
ahd_list_unlock(&l);
} }
/****************************** Status Processing *****************************/ /****************************** Status Processing *****************************/
@@ -8745,16 +8656,6 @@ sized:
return (last_probe); return (last_probe);
} }
void
ahd_dump_all_cards_state(void)
{
struct ahd_softc *list_ahd;
TAILQ_FOREACH(list_ahd, &ahd_tailq, links) {
ahd_dump_card_state(list_ahd);
}
}
int int
ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries, ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries,
const char *name, u_int address, u_int value, const char *name, u_int address, u_int value,
@@ -9039,7 +8940,6 @@ ahd_dump_card_state(struct ahd_softc *ahd)
ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF);
} }
printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
ahd_platform_dump_card_state(ahd);
ahd_restore_modes(ahd, saved_modes); ahd_restore_modes(ahd, saved_modes);
if (paused == 0) if (paused == 0)
ahd_unpause(ahd); ahd_unpause(ahd);

File diff suppressed because it is too large Load Diff

View File

@@ -42,6 +42,7 @@
#ifndef _AIC79XX_LINUX_H_ #ifndef _AIC79XX_LINUX_H_
#define _AIC79XX_LINUX_H_ #define _AIC79XX_LINUX_H_
#include <linux/config.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/delay.h> #include <linux/delay.h>
@@ -49,18 +50,23 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/version.h> #include <linux/version.h>
#include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/io.h> #include <asm/io.h>
#include <linux/interrupt.h> /* For tasklet support. */ #include <scsi/scsi.h>
#include <linux/config.h> #include <scsi/scsi_cmnd.h>
#include <linux/slab.h> #include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h>
/* Core SCSI definitions */ /* Core SCSI definitions */
#define AIC_LIB_PREFIX ahd #define AIC_LIB_PREFIX ahd
#include "scsi.h"
#include <scsi/scsi_host.h>
/* Name space conflict with BSD queue macros */ /* Name space conflict with BSD queue macros */
#ifdef LIST_HEAD #ifdef LIST_HEAD
@@ -95,7 +101,7 @@
/************************* Forward Declarations *******************************/ /************************* Forward Declarations *******************************/
struct ahd_softc; struct ahd_softc;
typedef struct pci_dev *ahd_dev_softc_t; typedef struct pci_dev *ahd_dev_softc_t;
typedef Scsi_Cmnd *ahd_io_ctx_t; typedef struct scsi_cmnd *ahd_io_ctx_t;
/******************************* Byte Order ***********************************/ /******************************* Byte Order ***********************************/
#define ahd_htobe16(x) cpu_to_be16(x) #define ahd_htobe16(x) cpu_to_be16(x)
@@ -114,8 +120,7 @@ typedef Scsi_Cmnd *ahd_io_ctx_t;
/************************* Configuration Data *********************************/ /************************* Configuration Data *********************************/
extern uint32_t aic79xx_allow_memio; extern uint32_t aic79xx_allow_memio;
extern int aic79xx_detect_complete; extern struct scsi_host_template aic79xx_driver_template;
extern Scsi_Host_Template aic79xx_driver_template;
/***************************** Bus Space/DMA **********************************/ /***************************** Bus Space/DMA **********************************/
@@ -145,11 +150,7 @@ struct ahd_linux_dma_tag
}; };
typedef struct ahd_linux_dma_tag* bus_dma_tag_t; typedef struct ahd_linux_dma_tag* bus_dma_tag_t;
struct ahd_linux_dmamap typedef dma_addr_t bus_dmamap_t;
{
dma_addr_t bus_addr;
};
typedef struct ahd_linux_dmamap* bus_dmamap_t;
typedef int bus_dma_filter_t(void*, dma_addr_t); typedef int bus_dma_filter_t(void*, dma_addr_t);
typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int); typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
@@ -226,12 +227,12 @@ typedef struct timer_list ahd_timer_t;
#define ahd_timer_init init_timer #define ahd_timer_init init_timer
#define ahd_timer_stop del_timer_sync #define ahd_timer_stop del_timer_sync
typedef void ahd_linux_callback_t (u_long); typedef void ahd_linux_callback_t (u_long);
static __inline void ahd_timer_reset(ahd_timer_t *timer, u_int usec, static __inline void ahd_timer_reset(ahd_timer_t *timer, int usec,
ahd_callback_t *func, void *arg); ahd_callback_t *func, void *arg);
static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec); static __inline void ahd_scb_timer_reset(struct scb *scb, u_int usec);
static __inline void static __inline void
ahd_timer_reset(ahd_timer_t *timer, u_int usec, ahd_callback_t *func, void *arg) ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
{ {
struct ahd_softc *ahd; struct ahd_softc *ahd;
@@ -252,43 +253,8 @@ ahd_scb_timer_reset(struct scb *scb, u_int usec)
/***************************** SMP support ************************************/ /***************************** SMP support ************************************/
#include <linux/spinlock.h> #include <linux/spinlock.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
#define AHD_SCSI_HAS_HOST_LOCK 1
#else
#define AHD_SCSI_HAS_HOST_LOCK 0
#endif
#define AIC79XX_DRIVER_VERSION "1.3.11" #define AIC79XX_DRIVER_VERSION "1.3.11"
/**************************** Front End Queues ********************************/
/*
* Data structure used to cast the Linux struct scsi_cmnd to something
* that allows us to use the queue macros. The linux structure has
* plenty of space to hold the links fields as required by the queue
* macros, but the queue macors require them to have the correct type.
*/
struct ahd_cmd_internal {
/* Area owned by the Linux scsi layer. */
uint8_t private[offsetof(struct scsi_cmnd, SCp.Status)];
union {
STAILQ_ENTRY(ahd_cmd) ste;
LIST_ENTRY(ahd_cmd) le;
TAILQ_ENTRY(ahd_cmd) tqe;
} links;
uint32_t end;
};
struct ahd_cmd {
union {
struct ahd_cmd_internal icmd;
struct scsi_cmnd scsi_cmd;
} un;
};
#define acmd_icmd(cmd) ((cmd)->un.icmd)
#define acmd_scsi_cmd(cmd) ((cmd)->un.scsi_cmd)
#define acmd_links un.icmd.links
/*************************** Device Data Structures ***************************/ /*************************** Device Data Structures ***************************/
/* /*
* A per probed device structure used to deal with some error recovery * A per probed device structure used to deal with some error recovery
@@ -297,22 +263,17 @@ struct ahd_cmd {
* after a successfully completed inquiry command to the target when * after a successfully completed inquiry command to the target when
* that inquiry data indicates a lun is present. * that inquiry data indicates a lun is present.
*/ */
TAILQ_HEAD(ahd_busyq, ahd_cmd);
typedef enum { typedef enum {
AHD_DEV_UNCONFIGURED = 0x01,
AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */ AHD_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
AHD_DEV_TIMER_ACTIVE = 0x04, /* Our timer is active */
AHD_DEV_ON_RUN_LIST = 0x08, /* Queued to be run later */
AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */ AHD_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */ AHD_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */ AHD_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
AHD_DEV_SLAVE_CONFIGURED = 0x80 /* slave_configure() has been called */
} ahd_linux_dev_flags; } ahd_linux_dev_flags;
struct ahd_linux_target; struct ahd_linux_target;
struct ahd_linux_device { struct ahd_linux_device {
TAILQ_ENTRY(ahd_linux_device) links; TAILQ_ENTRY(ahd_linux_device) links;
struct ahd_busyq busyq;
/* /*
* The number of transactions currently * The number of transactions currently
@@ -388,62 +349,12 @@ struct ahd_linux_device {
*/ */
u_int commands_since_idle_or_otag; u_int commands_since_idle_or_otag;
#define AHD_OTAG_THRESH 500 #define AHD_OTAG_THRESH 500
int lun;
Scsi_Device *scsi_device;
struct ahd_linux_target *target;
}; };
typedef enum {
AHD_DV_REQUIRED = 0x01,
AHD_INQ_VALID = 0x02,
AHD_BASIC_DV = 0x04,
AHD_ENHANCED_DV = 0x08
} ahd_linux_targ_flags;
/* DV States */
typedef enum {
AHD_DV_STATE_EXIT = 0,
AHD_DV_STATE_INQ_SHORT_ASYNC,
AHD_DV_STATE_INQ_ASYNC,
AHD_DV_STATE_INQ_ASYNC_VERIFY,
AHD_DV_STATE_TUR,
AHD_DV_STATE_REBD,
AHD_DV_STATE_INQ_VERIFY,
AHD_DV_STATE_WEB,
AHD_DV_STATE_REB,
AHD_DV_STATE_SU,
AHD_DV_STATE_BUSY
} ahd_dv_state;
struct ahd_linux_target { struct ahd_linux_target {
struct ahd_linux_device *devices[AHD_NUM_LUNS]; struct scsi_device *sdev[AHD_NUM_LUNS];
int channel;
int target;
int refcount;
struct ahd_transinfo last_tinfo; struct ahd_transinfo last_tinfo;
struct ahd_softc *ahd; struct ahd_softc *ahd;
ahd_linux_targ_flags flags;
struct scsi_inquiry_data *inq_data;
/*
* The next "fallback" period to use for narrow/wide transfers.
*/
uint8_t dv_next_narrow_period;
uint8_t dv_next_wide_period;
uint8_t dv_max_width;
uint8_t dv_max_ppr_options;
uint8_t dv_last_ppr_options;
u_int dv_echo_size;
ahd_dv_state dv_state;
u_int dv_state_retry;
uint8_t *dv_buffer;
uint8_t *dv_buffer1;
/*
* Cumulative counter of errors.
*/
u_long errors_detected;
u_long cmds_since_error;
}; };
/********************* Definitions Required by the Core ***********************/ /********************* Definitions Required by the Core ***********************/
@@ -453,32 +364,16 @@ struct ahd_linux_target {
* manner and are allocated below 4GB, the number of S/G segments is * manner and are allocated below 4GB, the number of S/G segments is
* unrestricted. * unrestricted.
*/ */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
/*
* We dynamically adjust the number of segments in pre-2.5 kernels to
* avoid fragmentation issues in the SCSI mid-layer's private memory
* allocator. See aic79xx_osm.c ahd_linux_size_nseg() for details.
*/
extern u_int ahd_linux_nseg;
#define AHD_NSEG ahd_linux_nseg
#define AHD_LINUX_MIN_NSEG 64
#else
#define AHD_NSEG 128 #define AHD_NSEG 128
#endif
/* /*
* Per-SCB OSM storage. * Per-SCB OSM storage.
*/ */
typedef enum {
AHD_SCB_UP_EH_SEM = 0x1
} ahd_linux_scb_flags;
struct scb_platform_data { struct scb_platform_data {
struct ahd_linux_device *dev; struct ahd_linux_device *dev;
dma_addr_t buf_busaddr; dma_addr_t buf_busaddr;
uint32_t xfer_len; uint32_t xfer_len;
uint32_t sense_resid; /* Auto-Sense residual */ uint32_t sense_resid; /* Auto-Sense residual */
ahd_linux_scb_flags flags;
}; };
/* /*
@@ -487,44 +382,23 @@ struct scb_platform_data {
* alignment restrictions of the various platforms supported by * alignment restrictions of the various platforms supported by
* this driver. * this driver.
*/ */
typedef enum {
AHD_DV_WAIT_SIMQ_EMPTY = 0x01,
AHD_DV_WAIT_SIMQ_RELEASE = 0x02,
AHD_DV_ACTIVE = 0x04,
AHD_DV_SHUTDOWN = 0x08,
AHD_RUN_CMPLT_Q_TIMER = 0x10
} ahd_linux_softc_flags;
TAILQ_HEAD(ahd_completeq, ahd_cmd);
struct ahd_platform_data { struct ahd_platform_data {
/* /*
* Fields accessed from interrupt context. * Fields accessed from interrupt context.
*/ */
struct ahd_linux_target *targets[AHD_NUM_TARGETS]; struct scsi_target *starget[AHD_NUM_TARGETS];
TAILQ_HEAD(, ahd_linux_device) device_runq;
struct ahd_completeq completeq;
spinlock_t spin_lock; spinlock_t spin_lock;
struct tasklet_struct runq_tasklet;
u_int qfrozen; u_int qfrozen;
pid_t dv_pid;
struct timer_list completeq_timer;
struct timer_list reset_timer; struct timer_list reset_timer;
struct timer_list stats_timer;
struct semaphore eh_sem; struct semaphore eh_sem;
struct semaphore dv_sem;
struct semaphore dv_cmd_sem; /* XXX This needs to be in
* the target struct
*/
struct scsi_device *dv_scsi_dev;
struct Scsi_Host *host; /* pointer to scsi host */ struct Scsi_Host *host; /* pointer to scsi host */
#define AHD_LINUX_NOIRQ ((uint32_t)~0) #define AHD_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */ uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address; uint32_t bios_address;
uint32_t mem_busaddr; /* Mem Base Addr */ uint32_t mem_busaddr; /* Mem Base Addr */
uint64_t hw_dma_mask; #define AHD_SCB_UP_EH_SEM 0x1
ahd_linux_softc_flags flags; uint32_t flags;
}; };
/************************** OS Utility Wrappers *******************************/ /************************** OS Utility Wrappers *******************************/
@@ -641,7 +515,7 @@ ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
/**************************** Initialization **********************************/ /**************************** Initialization **********************************/
int ahd_linux_register_host(struct ahd_softc *, int ahd_linux_register_host(struct ahd_softc *,
Scsi_Host_Template *); struct scsi_host_template *);
uint64_t ahd_linux_get_memsize(void); uint64_t ahd_linux_get_memsize(void);
@@ -657,28 +531,6 @@ void ahd_format_transinfo(struct info_str *info,
struct ahd_transinfo *tinfo); struct ahd_transinfo *tinfo);
/******************************** Locking *************************************/ /******************************** Locking *************************************/
/* Lock protecting internal data structures */
static __inline void ahd_lockinit(struct ahd_softc *);
static __inline void ahd_lock(struct ahd_softc *, unsigned long *flags);
static __inline void ahd_unlock(struct ahd_softc *, unsigned long *flags);
/* Lock acquisition and release of the above lock in midlayer entry points. */
static __inline void ahd_midlayer_entrypoint_lock(struct ahd_softc *,
unsigned long *flags);
static __inline void ahd_midlayer_entrypoint_unlock(struct ahd_softc *,
unsigned long *flags);
/* Lock held during command compeletion to the upper layer */
static __inline void ahd_done_lockinit(struct ahd_softc *);
static __inline void ahd_done_lock(struct ahd_softc *, unsigned long *flags);
static __inline void ahd_done_unlock(struct ahd_softc *, unsigned long *flags);
/* Lock held during ahd_list manipulation and ahd softc frees */
extern spinlock_t ahd_list_spinlock;
static __inline void ahd_list_lockinit(void);
static __inline void ahd_list_lock(unsigned long *flags);
static __inline void ahd_list_unlock(unsigned long *flags);
static __inline void static __inline void
ahd_lockinit(struct ahd_softc *ahd) ahd_lockinit(struct ahd_softc *ahd)
{ {
@@ -697,75 +549,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags); spin_unlock_irqrestore(&ahd->platform_data->spin_lock, *flags);
} }
static __inline void
ahd_midlayer_entrypoint_lock(struct ahd_softc *ahd, unsigned long *flags)
{
/*
* In 2.5.X and some 2.4.X versions, the midlayer takes our
* lock just before calling us, so we avoid locking again.
* For other kernel versions, the io_request_lock is taken
* just before our entry point is called. In this case, we
* trade the io_request_lock for our per-softc lock.
*/
#if AHD_SCSI_HAS_HOST_LOCK == 0
spin_unlock(&io_request_lock);
spin_lock(&ahd->platform_data->spin_lock);
#endif
}
static __inline void
ahd_midlayer_entrypoint_unlock(struct ahd_softc *ahd, unsigned long *flags)
{
#if AHD_SCSI_HAS_HOST_LOCK == 0
spin_unlock(&ahd->platform_data->spin_lock);
spin_lock(&io_request_lock);
#endif
}
static __inline void
ahd_done_lockinit(struct ahd_softc *ahd)
{
/*
* In 2.5.X, our own lock is held during completions.
* In previous versions, the io_request_lock is used.
* In either case, we can't initialize this lock again.
*/
}
static __inline void
ahd_done_lock(struct ahd_softc *ahd, unsigned long *flags)
{
#if AHD_SCSI_HAS_HOST_LOCK == 0
spin_lock(&io_request_lock);
#endif
}
static __inline void
ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags)
{
#if AHD_SCSI_HAS_HOST_LOCK == 0
spin_unlock(&io_request_lock);
#endif
}
static __inline void
ahd_list_lockinit(void)
{
spin_lock_init(&ahd_list_spinlock);
}
static __inline void
ahd_list_lock(unsigned long *flags)
{
spin_lock_irqsave(&ahd_list_spinlock, *flags);
}
static __inline void
ahd_list_unlock(unsigned long *flags)
{
spin_unlock_irqrestore(&ahd_list_spinlock, *flags);
}
/******************************* PCI Definitions ******************************/ /******************************* PCI Definitions ******************************/
/* /*
* PCIM_xxx: mask to locate subfield in register * PCIM_xxx: mask to locate subfield in register
@@ -925,27 +708,17 @@ ahd_flush_device_writes(struct ahd_softc *ahd)
} }
/**************************** Proc FS Support *********************************/ /**************************** Proc FS Support *********************************/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
int ahd_linux_proc_info(char *, char **, off_t, int, int, int);
#else
int ahd_linux_proc_info(struct Scsi_Host *, char *, char **, int ahd_linux_proc_info(struct Scsi_Host *, char *, char **,
off_t, int, int); off_t, int, int);
#endif
/*************************** Domain Validation ********************************/
#define AHD_DV_CMD(cmd) ((cmd)->scsi_done == ahd_linux_dv_complete)
#define AHD_DV_SIMQ_FROZEN(ahd) \
((((ahd)->platform_data->flags & AHD_DV_ACTIVE) != 0) \
&& (ahd)->platform_data->qfrozen == 1)
/*********************** Transaction Access Wrappers **************************/ /*********************** Transaction Access Wrappers **************************/
static __inline void ahd_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t); static __inline void ahd_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
static __inline void ahd_set_transaction_status(struct scb *, uint32_t); static __inline void ahd_set_transaction_status(struct scb *, uint32_t);
static __inline void ahd_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t); static __inline void ahd_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
static __inline void ahd_set_scsi_status(struct scb *, uint32_t); static __inline void ahd_set_scsi_status(struct scb *, uint32_t);
static __inline uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd); static __inline uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd);
static __inline uint32_t ahd_get_transaction_status(struct scb *); static __inline uint32_t ahd_get_transaction_status(struct scb *);
static __inline uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd); static __inline uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd);
static __inline uint32_t ahd_get_scsi_status(struct scb *); static __inline uint32_t ahd_get_scsi_status(struct scb *);
static __inline void ahd_set_transaction_tag(struct scb *, int, u_int); static __inline void ahd_set_transaction_tag(struct scb *, int, u_int);
static __inline u_long ahd_get_transfer_length(struct scb *); static __inline u_long ahd_get_transfer_length(struct scb *);
@@ -964,7 +737,7 @@ static __inline void ahd_platform_scb_free(struct ahd_softc *ahd,
static __inline void ahd_freeze_scb(struct scb *scb); static __inline void ahd_freeze_scb(struct scb *scb);
static __inline static __inline
void ahd_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status) void ahd_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
{ {
cmd->result &= ~(CAM_STATUS_MASK << 16); cmd->result &= ~(CAM_STATUS_MASK << 16);
cmd->result |= status << 16; cmd->result |= status << 16;
@@ -977,7 +750,7 @@ void ahd_set_transaction_status(struct scb *scb, uint32_t status)
} }
static __inline static __inline
void ahd_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status) void ahd_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status)
{ {
cmd->result &= ~0xFFFF; cmd->result &= ~0xFFFF;
cmd->result |= status; cmd->result |= status;
@@ -990,7 +763,7 @@ void ahd_set_scsi_status(struct scb *scb, uint32_t status)
} }
static __inline static __inline
uint32_t ahd_cmd_get_transaction_status(Scsi_Cmnd *cmd) uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd)
{ {
return ((cmd->result >> 16) & CAM_STATUS_MASK); return ((cmd->result >> 16) & CAM_STATUS_MASK);
} }
@@ -1002,7 +775,7 @@ uint32_t ahd_get_transaction_status(struct scb *scb)
} }
static __inline static __inline
uint32_t ahd_cmd_get_scsi_status(Scsi_Cmnd *cmd) uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd)
{ {
return (cmd->result & 0xFFFF); return (cmd->result & 0xFFFF);
} }
@@ -1117,7 +890,6 @@ void ahd_done(struct ahd_softc*, struct scb*);
void ahd_send_async(struct ahd_softc *, char channel, void ahd_send_async(struct ahd_softc *, char channel,
u_int target, u_int lun, ac_code, void *); u_int target, u_int lun, ac_code, void *);
void ahd_print_path(struct ahd_softc *, struct scb *); void ahd_print_path(struct ahd_softc *, struct scb *);
void ahd_platform_dump_card_state(struct ahd_softc *ahd);
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
#define AHD_PCI_CONFIG 1 #define AHD_PCI_CONFIG 1

View File

@@ -92,27 +92,31 @@ struct pci_driver aic79xx_pci_driver = {
static void static void
ahd_linux_pci_dev_remove(struct pci_dev *pdev) ahd_linux_pci_dev_remove(struct pci_dev *pdev)
{ {
struct ahd_softc *ahd; struct ahd_softc *ahd = pci_get_drvdata(pdev);
u_long l; u_long s;
/* ahd_lock(ahd, &s);
* We should be able to just perform ahd_intr_enable(ahd, FALSE);
* the free directly, but check our ahd_unlock(ahd, &s);
* list for extra sanity. ahd_free(ahd);
*/ }
ahd_list_lock(&l);
ahd = ahd_find_softc((struct ahd_softc *)pci_get_drvdata(pdev));
if (ahd != NULL) {
u_long s;
TAILQ_REMOVE(&ahd_tailq, ahd, links); static void
ahd_list_unlock(&l); ahd_linux_pci_inherit_flags(struct ahd_softc *ahd)
ahd_lock(ahd, &s); {
ahd_intr_enable(ahd, FALSE); struct pci_dev *pdev = ahd->dev_softc, *master_pdev;
ahd_unlock(ahd, &s); unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
ahd_free(ahd);
} else master_pdev = pci_get_slot(pdev->bus, master_devfn);
ahd_list_unlock(&l); if (master_pdev) {
struct ahd_softc *master = pci_get_drvdata(master_pdev);
if (master) {
ahd->flags &= ~AHD_BIOS_ENABLED;
ahd->flags |= master->flags & AHD_BIOS_ENABLED;
} else
printk(KERN_ERR "aic79xx: no multichannel peer found!\n");
pci_dev_put(master_pdev);
}
} }
static int static int
@@ -125,22 +129,6 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
char *name; char *name;
int error; int error;
/*
* Some BIOSen report the same device multiple times.
*/
TAILQ_FOREACH(ahd, &ahd_tailq, links) {
struct pci_dev *probed_pdev;
probed_pdev = ahd->dev_softc;
if (probed_pdev->bus->number == pdev->bus->number
&& probed_pdev->devfn == pdev->devfn)
break;
}
if (ahd != NULL) {
/* Skip duplicate. */
return (-ENODEV);
}
pci = pdev; pci = pdev;
entry = ahd_find_pci_device(pci); entry = ahd_find_pci_device(pci);
if (entry == NULL) if (entry == NULL)
@@ -177,15 +165,12 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (memsize >= 0x8000000000ULL if (memsize >= 0x8000000000ULL
&& pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) { && pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
ahd->flags |= AHD_64BIT_ADDRESSING; ahd->flags |= AHD_64BIT_ADDRESSING;
ahd->platform_data->hw_dma_mask = DMA_64BIT_MASK;
} else if (memsize > 0x80000000 } else if (memsize > 0x80000000
&& pci_set_dma_mask(pdev, mask_39bit) == 0) { && pci_set_dma_mask(pdev, mask_39bit) == 0) {
ahd->flags |= AHD_39BIT_ADDRESSING; ahd->flags |= AHD_39BIT_ADDRESSING;
ahd->platform_data->hw_dma_mask = mask_39bit;
} }
} else { } else {
pci_set_dma_mask(pdev, DMA_32BIT_MASK); pci_set_dma_mask(pdev, DMA_32BIT_MASK);
ahd->platform_data->hw_dma_mask = DMA_32BIT_MASK;
} }
ahd->dev_softc = pci; ahd->dev_softc = pci;
error = ahd_pci_config(ahd, entry); error = ahd_pci_config(ahd, entry);
@@ -193,16 +178,17 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahd_free(ahd); ahd_free(ahd);
return (-error); return (-error);
} }
/*
* Second Function PCI devices need to inherit some
* * settings from function 0.
*/
if ((ahd->features & AHD_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
ahd_linux_pci_inherit_flags(ahd);
pci_set_drvdata(pdev, ahd); pci_set_drvdata(pdev, ahd);
if (aic79xx_detect_complete) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ahd_linux_register_host(ahd, &aic79xx_driver_template);
ahd_linux_register_host(ahd, &aic79xx_driver_template);
#else
printf("aic79xx: ignoring PCI device found after "
"initialization\n");
return (-ENODEV);
#endif
}
return (0); return (0);
} }

View File

@@ -283,7 +283,6 @@ int
ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry) ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
{ {
struct scb_data *shared_scb_data; struct scb_data *shared_scb_data;
u_long l;
u_int command; u_int command;
uint32_t devconfig; uint32_t devconfig;
uint16_t subvendor; uint16_t subvendor;
@@ -373,16 +372,9 @@ ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
* Allow interrupts now that we are completely setup. * Allow interrupts now that we are completely setup.
*/ */
error = ahd_pci_map_int(ahd); error = ahd_pci_map_int(ahd);
if (error != 0) if (!error)
return (error); ahd->init_level++;
return error;
ahd_list_lock(&l);
/*
* Link this softc in with all other ahd instances.
*/
ahd_softc_insert(ahd);
ahd_list_unlock(&l);
return (0);
} }
/* /*

View File

@@ -49,10 +49,53 @@ static void ahd_dump_target_state(struct ahd_softc *ahd,
u_int our_id, char channel, u_int our_id, char channel,
u_int target_id, u_int target_offset); u_int target_id, u_int target_offset);
static void ahd_dump_device_state(struct info_str *info, static void ahd_dump_device_state(struct info_str *info,
struct ahd_linux_device *dev); struct scsi_device *sdev);
static int ahd_proc_write_seeprom(struct ahd_softc *ahd, static int ahd_proc_write_seeprom(struct ahd_softc *ahd,
char *buffer, int length); char *buffer, int length);
/*
* Table of syncrates that don't follow the "divisible by 4"
* rule. This table will be expanded in future SCSI specs.
*/
static struct {
u_int period_factor;
u_int period; /* in 100ths of ns */
} scsi_syncrates[] = {
{ 0x08, 625 }, /* FAST-160 */
{ 0x09, 1250 }, /* FAST-80 */
{ 0x0a, 2500 }, /* FAST-40 40MHz */
{ 0x0b, 3030 }, /* FAST-40 33MHz */
{ 0x0c, 5000 } /* FAST-20 */
};
/*
* Return the frequency in kHz corresponding to the given
* sync period factor.
*/
static u_int
ahd_calc_syncsrate(u_int period_factor)
{
int i;
int num_syncrates;
num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
/* See if the period is in the "exception" table */
for (i = 0; i < num_syncrates; i++) {
if (period_factor == scsi_syncrates[i].period_factor) {
/* Period in kHz */
return (100000000 / scsi_syncrates[i].period);
}
}
/*
* Wasn't in the table, so use the standard
* 4 times conversion.
*/
return (10000000 / (period_factor * 4 * 10));
}
static void static void
copy_mem_info(struct info_str *info, char *data, int len) copy_mem_info(struct info_str *info, char *data, int len)
{ {
@@ -109,7 +152,7 @@ ahd_format_transinfo(struct info_str *info, struct ahd_transinfo *tinfo)
speed = 3300; speed = 3300;
freq = 0; freq = 0;
if (tinfo->offset != 0) { if (tinfo->offset != 0) {
freq = aic_calc_syncsrate(tinfo->period); freq = ahd_calc_syncsrate(tinfo->period);
speed = freq; speed = freq;
} }
speed *= (0x01 << tinfo->width); speed *= (0x01 << tinfo->width);
@@ -167,6 +210,7 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
u_int target_offset) u_int target_offset)
{ {
struct ahd_linux_target *targ; struct ahd_linux_target *targ;
struct scsi_target *starget;
struct ahd_initiator_tinfo *tinfo; struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate; struct ahd_tmode_tstate *tstate;
int lun; int lun;
@@ -176,20 +220,20 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
copy_info(info, "Target %d Negotiation Settings\n", target_id); copy_info(info, "Target %d Negotiation Settings\n", target_id);
copy_info(info, "\tUser: "); copy_info(info, "\tUser: ");
ahd_format_transinfo(info, &tinfo->user); ahd_format_transinfo(info, &tinfo->user);
targ = ahd->platform_data->targets[target_offset]; starget = ahd->platform_data->starget[target_offset];
if (targ == NULL) if (starget == NULL)
return; return;
targ = scsi_transport_target_data(starget);
copy_info(info, "\tGoal: "); copy_info(info, "\tGoal: ");
ahd_format_transinfo(info, &tinfo->goal); ahd_format_transinfo(info, &tinfo->goal);
copy_info(info, "\tCurr: "); copy_info(info, "\tCurr: ");
ahd_format_transinfo(info, &tinfo->curr); ahd_format_transinfo(info, &tinfo->curr);
copy_info(info, "\tTransmission Errors %ld\n", targ->errors_detected);
for (lun = 0; lun < AHD_NUM_LUNS; lun++) { for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
struct ahd_linux_device *dev; struct scsi_device *dev;
dev = targ->devices[lun]; dev = targ->sdev[lun];
if (dev == NULL) if (dev == NULL)
continue; continue;
@@ -199,10 +243,13 @@ ahd_dump_target_state(struct ahd_softc *ahd, struct info_str *info,
} }
static void static void
ahd_dump_device_state(struct info_str *info, struct ahd_linux_device *dev) ahd_dump_device_state(struct info_str *info, struct scsi_device *sdev)
{ {
struct ahd_linux_device *dev = scsi_transport_device_data(sdev);
copy_info(info, "\tChannel %c Target %d Lun %d Settings\n", copy_info(info, "\tChannel %c Target %d Lun %d Settings\n",
dev->target->channel + 'A', dev->target->target, dev->lun); sdev->sdev_target->channel + 'A',
sdev->sdev_target->id, sdev->lun);
copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued); copy_info(info, "\t\tCommands Queued %ld\n", dev->commands_issued);
copy_info(info, "\t\tCommands Active %d\n", dev->active); copy_info(info, "\t\tCommands Active %d\n", dev->active);
@@ -278,36 +325,16 @@ done:
* Return information to handle /proc support for the driver. * Return information to handle /proc support for the driver.
*/ */
int int
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
ahd_linux_proc_info(char *buffer, char **start, off_t offset,
int length, int hostno, int inout)
#else
ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start, ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
off_t offset, int length, int inout) off_t offset, int length, int inout)
#endif
{ {
struct ahd_softc *ahd; struct ahd_softc *ahd = *(struct ahd_softc **)shost->hostdata;
struct info_str info; struct info_str info;
char ahd_info[256]; char ahd_info[256];
u_long l;
u_int max_targ; u_int max_targ;
u_int i; u_int i;
int retval; int retval;
retval = -EINVAL;
ahd_list_lock(&l);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
TAILQ_FOREACH(ahd, &ahd_tailq, links) {
if (ahd->platform_data->host->host_no == hostno)
break;
}
#else
ahd = ahd_find_softc(*(struct ahd_softc **)shost->hostdata);
#endif
if (ahd == NULL)
goto done;
/* Has data been written to the file? */ /* Has data been written to the file? */
if (inout == TRUE) { if (inout == TRUE) {
retval = ahd_proc_write_seeprom(ahd, buffer, length); retval = ahd_proc_write_seeprom(ahd, buffer, length);
@@ -357,6 +384,5 @@ ahd_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
} }
retval = info.pos > info.offset ? info.pos - info.offset : 0; retval = info.pos > info.offset ? info.pos - info.offset : 0;
done: done:
ahd_list_unlock(&l);
return (retval); return (retval);
} }

View File

@@ -37,7 +37,7 @@
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES. * POSSIBILITY OF SUCH DAMAGES.
* *
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#79 $ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.h#85 $
* *
* $FreeBSD$ * $FreeBSD$
*/ */
@@ -243,7 +243,7 @@ typedef enum {
*/ */
AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA, AHC_AIC7850_FE = AHC_SPIOCAP|AHC_AUTOPAUSE|AHC_TARGETMODE|AHC_ULTRA,
AHC_AIC7860_FE = AHC_AIC7850_FE, AHC_AIC7860_FE = AHC_AIC7850_FE,
AHC_AIC7870_FE = AHC_TARGETMODE, AHC_AIC7870_FE = AHC_TARGETMODE|AHC_AUTOPAUSE,
AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA, AHC_AIC7880_FE = AHC_AIC7870_FE|AHC_ULTRA,
/* /*
* Although we have space for both the initiator and * Although we have space for both the initiator and

View File

@@ -39,7 +39,7 @@
* *
* $FreeBSD$ * $FreeBSD$
*/ */
VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $" VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $"
/* /*
* This file is processed by the aic7xxx_asm utility for use in assembling * This file is processed by the aic7xxx_asm utility for use in assembling
@@ -1306,7 +1306,6 @@ scratch_ram {
*/ */
MWI_RESIDUAL { MWI_RESIDUAL {
size 1 size 1
alias TARG_IMMEDIATE_SCB
} }
/* /*
* SCBID of the next SCB to be started by the controller. * SCBID of the next SCB to be started by the controller.
@@ -1461,6 +1460,7 @@ scratch_ram {
*/ */
LAST_MSG { LAST_MSG {
size 1 size 1
alias TARG_IMMEDIATE_SCB
} }
/* /*

View File

@@ -40,7 +40,7 @@
* $FreeBSD$ * $FreeBSD$
*/ */
VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $" VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $"
PATCH_ARG_LIST = "struct ahc_softc *ahc" PATCH_ARG_LIST = "struct ahc_softc *ahc"
PREFIX = "ahc_" PREFIX = "ahc_"
@@ -679,6 +679,7 @@ await_busfree:
clr SCSIBUSL; /* Prevent bit leakage durint SELTO */ clr SCSIBUSL; /* Prevent bit leakage durint SELTO */
} }
and SXFRCTL0, ~SPIOEN; and SXFRCTL0, ~SPIOEN;
mvi SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT;
test SSTAT1,REQINIT|BUSFREE jz .; test SSTAT1,REQINIT|BUSFREE jz .;
test SSTAT1, BUSFREE jnz poll_for_work; test SSTAT1, BUSFREE jnz poll_for_work;
mvi MISSED_BUSFREE call set_seqint; mvi MISSED_BUSFREE call set_seqint;
@@ -1097,7 +1098,7 @@ ultra2_dmahalt:
test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg; test SCB_RESIDUAL_DATACNT[3], SG_LAST_SEG jz dma_mid_sg;
if ((ahc->flags & AHC_TARGETROLE) != 0) { if ((ahc->flags & AHC_TARGETROLE) != 0) {
test SSTAT0, TARGET jz dma_last_sg; test SSTAT0, TARGET jz dma_last_sg;
if ((ahc->flags & AHC_TMODE_WIDEODD_BUG) != 0) { if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0) {
test DMAPARAMS, DIRECTION jz dma_mid_sg; test DMAPARAMS, DIRECTION jz dma_mid_sg;
} }
} }

View File

@@ -28,9 +28,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE. * SUCH DAMAGE.
* *
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#17 $ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_93cx6.c#19 $
*
* $FreeBSD$
*/ */
/* /*
@@ -64,7 +62,6 @@
* is preceded by an initial zero (leading 0, followed by 16-bits, MSB * is preceded by an initial zero (leading 0, followed by 16-bits, MSB
* first). The clock cycling from low to high initiates the next data * first). The clock cycling from low to high initiates the next data
* bit to be sent from the chip. * bit to be sent from the chip.
*
*/ */
#ifdef __linux__ #ifdef __linux__
@@ -81,14 +78,22 @@
* Right now, we only have to read the SEEPROM. But we make it easier to * Right now, we only have to read the SEEPROM. But we make it easier to
* add other 93Cx6 functions. * add other 93Cx6 functions.
*/ */
static struct seeprom_cmd { struct seeprom_cmd {
uint8_t len; uint8_t len;
uint8_t bits[9]; uint8_t bits[11];
} seeprom_read = {3, {1, 1, 0}}; };
/* Short opcodes for the c46 */
static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}}; static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}}; static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
/* Long opcodes for the C56/C66 */
static struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
static struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
/* Common opcodes */
static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}}; static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
static struct seeprom_cmd seeprom_read = {3, {1, 1, 0}};
/* /*
* Wait for the SEERDY to go high; about 800 ns. * Wait for the SEERDY to go high; about 800 ns.
@@ -222,12 +227,25 @@ int
ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf, ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
u_int start_addr, u_int count) u_int start_addr, u_int count)
{ {
struct seeprom_cmd *ewen, *ewds;
uint16_t v; uint16_t v;
uint8_t temp; uint8_t temp;
int i, k; int i, k;
/* Place the chip into write-enable mode */ /* Place the chip into write-enable mode */
send_seeprom_cmd(sd, &seeprom_ewen); if (sd->sd_chip == C46) {
ewen = &seeprom_ewen;
ewds = &seeprom_ewds;
} else if (sd->sd_chip == C56_66) {
ewen = &seeprom_long_ewen;
ewds = &seeprom_long_ewds;
} else {
printf("ahc_write_seeprom: unsupported seeprom type %d\n",
sd->sd_chip);
return (0);
}
send_seeprom_cmd(sd, ewen);
reset_seeprom(sd); reset_seeprom(sd);
/* Write all requested data out to the seeprom. */ /* Write all requested data out to the seeprom. */
@@ -277,7 +295,7 @@ ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
} }
/* Put the chip back into write-protect mode */ /* Put the chip back into write-protect mode */
send_seeprom_cmd(sd, &seeprom_ewds); send_seeprom_cmd(sd, ewds);
reset_seeprom(sd); reset_seeprom(sd);
return (1); return (1);

View File

@@ -37,9 +37,7 @@
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES. * POSSIBILITY OF SUCH DAMAGES.
* *
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#134 $ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
*
* $FreeBSD$
*/ */
#ifdef __linux__ #ifdef __linux__
@@ -287,10 +285,19 @@ ahc_restart(struct ahc_softc *ahc)
ahc_outb(ahc, SEQ_FLAGS2, ahc_outb(ahc, SEQ_FLAGS2,
ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA); ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
} }
/*
* Clear any pending sequencer interrupt. It is no
* longer relevant since we're resetting the Program
* Counter.
*/
ahc_outb(ahc, CLRINT, CLRSEQINT);
ahc_outb(ahc, MWI_RESIDUAL, 0); ahc_outb(ahc, MWI_RESIDUAL, 0);
ahc_outb(ahc, SEQCTL, ahc->seqctl); ahc_outb(ahc, SEQCTL, ahc->seqctl);
ahc_outb(ahc, SEQADDR0, 0); ahc_outb(ahc, SEQADDR0, 0);
ahc_outb(ahc, SEQADDR1, 0); ahc_outb(ahc, SEQADDR1, 0);
ahc_unpause(ahc); ahc_unpause(ahc);
} }
@@ -1174,19 +1181,20 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
scb_index); scb_index);
} }
#endif #endif
/*
* Force a renegotiation with this target just in
* case the cable was pulled and will later be
* re-attached. The target may forget its negotiation
* settings with us should it attempt to reselect
* during the interruption. The target will not issue
* a unit attention in this case, so we must always
* renegotiate.
*/
ahc_scb_devinfo(ahc, &devinfo, scb); ahc_scb_devinfo(ahc, &devinfo, scb);
ahc_force_renegotiation(ahc, &devinfo);
ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
ahc_freeze_devq(ahc, scb); ahc_freeze_devq(ahc, scb);
/*
* Cancel any pending transactions on the device
* now that it seems to be missing. This will
* also revert us to async/narrow transfers until
* we can renegotiate with the device.
*/
ahc_handle_devreset(ahc, &devinfo,
CAM_SEL_TIMEOUT,
"Selection Timeout",
/*verbose_level*/1);
} }
ahc_outb(ahc, CLRINT, CLRSCSIINT); ahc_outb(ahc, CLRINT, CLRSCSIINT);
ahc_restart(ahc); ahc_restart(ahc);
@@ -3763,8 +3771,9 @@ ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
/*period*/0, /*offset*/0, /*ppr_options*/0, /*period*/0, /*offset*/0, /*ppr_options*/0,
AHC_TRANS_CUR, /*paused*/TRUE); AHC_TRANS_CUR, /*paused*/TRUE);
ahc_send_async(ahc, devinfo->channel, devinfo->target, if (status != CAM_SEL_TIMEOUT)
CAM_LUN_WILDCARD, AC_SENT_BDR, NULL); ahc_send_async(ahc, devinfo->channel, devinfo->target,
CAM_LUN_WILDCARD, AC_SENT_BDR, NULL);
if (message != NULL if (message != NULL
&& (verbose_level <= bootverbose)) && (verbose_level <= bootverbose))
@@ -4003,14 +4012,6 @@ ahc_reset(struct ahc_softc *ahc, int reinit)
* to disturb the integrity of the bus. * to disturb the integrity of the bus.
*/ */
ahc_pause(ahc); ahc_pause(ahc);
if ((ahc_inb(ahc, HCNTRL) & CHIPRST) != 0) {
/*
* The chip has not been initialized since
* PCI/EISA/VLB bus reset. Don't trust
* "left over BIOS data".
*/
ahc->flags |= AHC_NO_BIOS_INIT;
}
sxfrctl1_b = 0; sxfrctl1_b = 0;
if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
u_int sblkctl; u_int sblkctl;
@@ -5036,14 +5037,23 @@ ahc_pause_and_flushwork(struct ahc_softc *ahc)
ahc->flags |= AHC_ALL_INTERRUPTS; ahc->flags |= AHC_ALL_INTERRUPTS;
paused = FALSE; paused = FALSE;
do { do {
if (paused) if (paused) {
ahc_unpause(ahc); ahc_unpause(ahc);
/*
* Give the sequencer some time to service
* any active selections.
*/
ahc_delay(500);
}
ahc_intr(ahc); ahc_intr(ahc);
ahc_pause(ahc); ahc_pause(ahc);
paused = TRUE; paused = TRUE;
ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO); ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
ahc_clear_critical_section(ahc);
intstat = ahc_inb(ahc, INTSTAT); intstat = ahc_inb(ahc, INTSTAT);
if ((intstat & INT_PEND) == 0) {
ahc_clear_critical_section(ahc);
intstat = ahc_inb(ahc, INTSTAT);
}
} while (--maxloops } while (--maxloops
&& (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0) && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0)
&& ((intstat & INT_PEND) != 0 && ((intstat & INT_PEND) != 0

View File

@@ -125,12 +125,6 @@
static struct scsi_transport_template *ahc_linux_transport_template = NULL; static struct scsi_transport_template *ahc_linux_transport_template = NULL;
/*
* Include aiclib.c as part of our
* "module dependencies are hard" work around.
*/
#include "aiclib.c"
#include <linux/init.h> /* __setup */ #include <linux/init.h> /* __setup */
#include <linux/mm.h> /* For fetching system memory size */ #include <linux/mm.h> /* For fetching system memory size */
#include <linux/blkdev.h> /* For block_size() */ #include <linux/blkdev.h> /* For block_size() */
@@ -391,7 +385,6 @@ static int ahc_linux_run_command(struct ahc_softc*,
struct ahc_linux_device *, struct ahc_linux_device *,
struct scsi_cmnd *); struct scsi_cmnd *);
static void ahc_linux_setup_tag_info_global(char *p); static void ahc_linux_setup_tag_info_global(char *p);
static aic_option_callback_t ahc_linux_setup_tag_info;
static int aic7xxx_setup(char *s); static int aic7xxx_setup(char *s);
static int ahc_linux_unit; static int ahc_linux_unit;
@@ -635,6 +628,8 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
targ->sdev[sdev->lun] = sdev; targ->sdev[sdev->lun] = sdev;
spi_period(starget) = 0;
return 0; return 0;
} }
@@ -918,6 +913,86 @@ ahc_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
} }
} }
static char *
ahc_parse_brace_option(char *opt_name, char *opt_arg, char *end, int depth,
void (*callback)(u_long, int, int, int32_t),
u_long callback_arg)
{
char *tok_end;
char *tok_end2;
int i;
int instance;
int targ;
int done;
char tok_list[] = {'.', ',', '{', '}', '\0'};
/* All options use a ':' name/arg separator */
if (*opt_arg != ':')
return (opt_arg);
opt_arg++;
instance = -1;
targ = -1;
done = FALSE;
/*
* Restore separator that may be in
* the middle of our option argument.
*/
tok_end = strchr(opt_arg, '\0');
if (tok_end < end)
*tok_end = ',';
while (!done) {
switch (*opt_arg) {
case '{':
if (instance == -1) {
instance = 0;
} else {
if (depth > 1) {
if (targ == -1)
targ = 0;
} else {
printf("Malformed Option %s\n",
opt_name);
done = TRUE;
}
}
opt_arg++;
break;
case '}':
if (targ != -1)
targ = -1;
else if (instance != -1)
instance = -1;
opt_arg++;
break;
case ',':
case '.':
if (instance == -1)
done = TRUE;
else if (targ >= 0)
targ++;
else if (instance >= 0)
instance++;
opt_arg++;
break;
case '\0':
done = TRUE;
break;
default:
tok_end = end;
for (i = 0; tok_list[i]; i++) {
tok_end2 = strchr(opt_arg, tok_list[i]);
if ((tok_end2) && (tok_end2 < tok_end))
tok_end = tok_end2;
}
callback(callback_arg, instance, targ,
simple_strtol(opt_arg, NULL, 0));
opt_arg = tok_end;
break;
}
}
return (opt_arg);
}
/* /*
* Handle Linux boot parameters. This routine allows for assigning a value * Handle Linux boot parameters. This routine allows for assigning a value
* to a parameter with a ':' between the parameter and the value. * to a parameter with a ':' between the parameter and the value.
@@ -972,7 +1047,7 @@ aic7xxx_setup(char *s)
if (strncmp(p, "global_tag_depth", n) == 0) { if (strncmp(p, "global_tag_depth", n) == 0) {
ahc_linux_setup_tag_info_global(p + n); ahc_linux_setup_tag_info_global(p + n);
} else if (strncmp(p, "tag_info", n) == 0) { } else if (strncmp(p, "tag_info", n) == 0) {
s = aic_parse_brace_option("tag_info", p + n, end, s = ahc_parse_brace_option("tag_info", p + n, end,
2, ahc_linux_setup_tag_info, 0); 2, ahc_linux_setup_tag_info, 0);
} else if (p[n] == ':') { } else if (p[n] == ':') {
*(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
@@ -1612,9 +1687,9 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
if (channel == 'B') if (channel == 'B')
target_offset += 8; target_offset += 8;
starget = ahc->platform_data->starget[target_offset]; starget = ahc->platform_data->starget[target_offset];
targ = scsi_transport_target_data(starget); if (starget == NULL)
if (targ == NULL)
break; break;
targ = scsi_transport_target_data(starget);
target_ppr_options = target_ppr_options =
(spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0) (spi_dt(starget) ? MSG_EXT_PPR_DT_REQ : 0)
@@ -2329,8 +2404,6 @@ ahc_platform_dump_card_state(struct ahc_softc *ahc)
{ {
} }
static void ahc_linux_exit(void);
static void ahc_linux_set_width(struct scsi_target *starget, int width) static void ahc_linux_set_width(struct scsi_target *starget, int width)
{ {
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);

View File

@@ -265,7 +265,7 @@ ahc_scb_timer_reset(struct scb *scb, u_int usec)
/***************************** SMP support ************************************/ /***************************** SMP support ************************************/
#include <linux/spinlock.h> #include <linux/spinlock.h>
#define AIC7XXX_DRIVER_VERSION "6.2.36" #define AIC7XXX_DRIVER_VERSION "7.0"
/*************************** Device Data Structures ***************************/ /*************************** Device Data Structures ***************************/
/* /*

View File

@@ -149,6 +149,27 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
ahc_free(ahc); ahc_free(ahc);
} }
static void
ahc_linux_pci_inherit_flags(struct ahc_softc *ahc)
{
struct pci_dev *pdev = ahc->dev_softc, *master_pdev;
unsigned int master_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
master_pdev = pci_get_slot(pdev->bus, master_devfn);
if (master_pdev) {
struct ahc_softc *master = pci_get_drvdata(master_pdev);
if (master) {
ahc->flags &= ~AHC_BIOS_ENABLED;
ahc->flags |= master->flags & AHC_BIOS_ENABLED;
ahc->flags &= ~AHC_PRIMARY_CHANNEL;
ahc->flags |= master->flags & AHC_PRIMARY_CHANNEL;
} else
printk(KERN_ERR "aic7xxx: no multichannel peer found!\n");
pci_dev_put(master_pdev);
}
}
static int static int
ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
@@ -203,6 +224,14 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahc_free(ahc); ahc_free(ahc);
return (-error); return (-error);
} }
/*
* Second Function PCI devices need to inherit some
* settings from function 0.
*/
if ((ahc->features & AHC_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
ahc_linux_pci_inherit_flags(ahc);
pci_set_drvdata(pdev, ahc); pci_set_drvdata(pdev, ahc);
ahc_linux_register_host(ahc, &aic7xxx_driver_template); ahc_linux_register_host(ahc, &aic7xxx_driver_template);
return (0); return (0);

View File

@@ -54,6 +54,49 @@ static void ahc_dump_device_state(struct info_str *info,
static int ahc_proc_write_seeprom(struct ahc_softc *ahc, static int ahc_proc_write_seeprom(struct ahc_softc *ahc,
char *buffer, int length); char *buffer, int length);
/*
* Table of syncrates that don't follow the "divisible by 4"
* rule. This table will be expanded in future SCSI specs.
*/
static struct {
u_int period_factor;
u_int period; /* in 100ths of ns */
} scsi_syncrates[] = {
{ 0x08, 625 }, /* FAST-160 */
{ 0x09, 1250 }, /* FAST-80 */
{ 0x0a, 2500 }, /* FAST-40 40MHz */
{ 0x0b, 3030 }, /* FAST-40 33MHz */
{ 0x0c, 5000 } /* FAST-20 */
};
/*
* Return the frequency in kHz corresponding to the given
* sync period factor.
*/
static u_int
ahc_calc_syncsrate(u_int period_factor)
{
int i;
int num_syncrates;
num_syncrates = sizeof(scsi_syncrates) / sizeof(scsi_syncrates[0]);
/* See if the period is in the "exception" table */
for (i = 0; i < num_syncrates; i++) {
if (period_factor == scsi_syncrates[i].period_factor) {
/* Period in kHz */
return (100000000 / scsi_syncrates[i].period);
}
}
/*
* Wasn't in the table, so use the standard
* 4 times conversion.
*/
return (10000000 / (period_factor * 4 * 10));
}
static void static void
copy_mem_info(struct info_str *info, char *data, int len) copy_mem_info(struct info_str *info, char *data, int len)
{ {
@@ -106,7 +149,7 @@ ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
speed = 3300; speed = 3300;
freq = 0; freq = 0;
if (tinfo->offset != 0) { if (tinfo->offset != 0) {
freq = aic_calc_syncsrate(tinfo->period); freq = ahc_calc_syncsrate(tinfo->period);
speed = freq; speed = freq;
} }
speed *= (0x01 << tinfo->width); speed *= (0x01 << tinfo->width);

View File

@@ -2,8 +2,8 @@
* DO NOT EDIT - This file is automatically generated * DO NOT EDIT - This file is automatically generated
* from the following source files: * from the following source files:
* *
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
*/ */
typedef int (ahc_reg_print_t)(u_int, u_int *, u_int); typedef int (ahc_reg_print_t)(u_int, u_int *, u_int);
typedef struct ahc_reg_parse_entry { typedef struct ahc_reg_parse_entry {
@@ -1298,7 +1298,6 @@ ahc_reg_print_t ahc_sg_cache_pre_print;
#define CMDSIZE_TABLE_TAIL 0x34 #define CMDSIZE_TABLE_TAIL 0x34
#define MWI_RESIDUAL 0x38 #define MWI_RESIDUAL 0x38
#define TARG_IMMEDIATE_SCB 0x38
#define NEXT_QUEUED_SCB 0x39 #define NEXT_QUEUED_SCB 0x39
@@ -1380,6 +1379,7 @@ ahc_reg_print_t ahc_sg_cache_pre_print;
#define RETURN_2 0x52 #define RETURN_2 0x52
#define LAST_MSG 0x53 #define LAST_MSG 0x53
#define TARG_IMMEDIATE_SCB 0x53
#define SCSISEQ_TEMPLATE 0x54 #define SCSISEQ_TEMPLATE 0x54
#define ENSELO 0x40 #define ENSELO 0x40

View File

@@ -2,8 +2,8 @@
* DO NOT EDIT - This file is automatically generated * DO NOT EDIT - This file is automatically generated
* from the following source files: * from the following source files:
* *
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#56 $ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#39 $ * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
*/ */
#include "aic7xxx_osm.h" #include "aic7xxx_osm.h"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -57,121 +57,6 @@
#ifndef _AICLIB_H #ifndef _AICLIB_H
#define _AICLIB_H #define _AICLIB_H
/*
* Linux Interrupt Support.
*/
#ifndef IRQ_RETVAL
typedef void irqreturn_t;
#define IRQ_RETVAL(x)
#endif
/*
* SCSI command format
*/
/*
* Define dome bits that are in ALL (or a lot of) scsi commands
*/
#define SCSI_CTL_LINK 0x01
#define SCSI_CTL_FLAG 0x02
#define SCSI_CTL_VENDOR 0xC0
#define SCSI_CMD_LUN 0xA0 /* these two should not be needed */
#define SCSI_CMD_LUN_SHIFT 5 /* LUN in the cmd is no longer SCSI */
#define SCSI_MAX_CDBLEN 16 /*
* 16 byte commands are in the
* SCSI-3 spec
*/
/* 6byte CDBs special case 0 length to be 256 */
#define SCSI_CDB6_LEN(len) ((len) == 0 ? 256 : len)
/*
* This type defines actions to be taken when a particular sense code is
* received. Right now, these flags are only defined to take up 16 bits,
* but can be expanded in the future if necessary.
*/
typedef enum {
SS_NOP = 0x000000, /* Do nothing */
SS_RETRY = 0x010000, /* Retry the command */
SS_FAIL = 0x020000, /* Bail out */
SS_START = 0x030000, /* Send a Start Unit command to the device,
* then retry the original command.
*/
SS_TUR = 0x040000, /* Send a Test Unit Ready command to the
* device, then retry the original command.
*/
SS_REQSENSE = 0x050000, /* Send a RequestSense command to the
* device, then retry the original command.
*/
SS_INQ_REFRESH = 0x060000,
SS_MASK = 0xff0000
} aic_sense_action;
typedef enum {
SSQ_NONE = 0x0000,
SSQ_DECREMENT_COUNT = 0x0100, /* Decrement the retry count */
SSQ_MANY = 0x0200, /* send lots of recovery commands */
SSQ_RANGE = 0x0400, /*
* This table entry represents the
* end of a range of ASCQs that
* have identical error actions
* and text.
*/
SSQ_PRINT_SENSE = 0x0800,
SSQ_DELAY = 0x1000, /* Delay before retry. */
SSQ_DELAY_RANDOM = 0x2000, /* Randomized delay before retry. */
SSQ_FALLBACK = 0x4000, /* Do a speed fallback to recover */
SSQ_MASK = 0xff00
} aic_sense_action_qualifier;
/* Mask for error status values */
#define SS_ERRMASK 0xff
/* The default, retyable, error action */
#define SS_RDEF SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE|EIO
/* The retyable, error action, with table specified error code */
#define SS_RET SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE
/* Fatal error action, with table specified error code */
#define SS_FATAL SS_FAIL|SSQ_PRINT_SENSE
struct scsi_generic
{
uint8_t opcode;
uint8_t bytes[11];
};
struct scsi_request_sense
{
uint8_t opcode;
uint8_t byte2;
uint8_t unused[2];
uint8_t length;
uint8_t control;
};
struct scsi_test_unit_ready
{
uint8_t opcode;
uint8_t byte2;
uint8_t unused[3];
uint8_t control;
};
struct scsi_send_diag
{
uint8_t opcode;
uint8_t byte2;
#define SSD_UOL 0x01
#define SSD_DOL 0x02
#define SSD_SELFTEST 0x04
#define SSD_PF 0x10
uint8_t unused[1];
uint8_t paramlen[2];
uint8_t control;
};
struct scsi_sense struct scsi_sense
{ {
uint8_t opcode; uint8_t opcode;
@@ -181,537 +66,12 @@ struct scsi_sense
uint8_t control; uint8_t control;
}; };
struct scsi_inquiry
{
uint8_t opcode;
uint8_t byte2;
#define SI_EVPD 0x01
uint8_t page_code;
uint8_t reserved;
uint8_t length;
uint8_t control;
};
struct scsi_mode_sense_6
{
uint8_t opcode;
uint8_t byte2;
#define SMS_DBD 0x08
uint8_t page;
#define SMS_PAGE_CODE 0x3F
#define SMS_VENDOR_SPECIFIC_PAGE 0x00
#define SMS_DISCONNECT_RECONNECT_PAGE 0x02
#define SMS_PERIPHERAL_DEVICE_PAGE 0x09
#define SMS_CONTROL_MODE_PAGE 0x0A
#define SMS_ALL_PAGES_PAGE 0x3F
#define SMS_PAGE_CTRL_MASK 0xC0
#define SMS_PAGE_CTRL_CURRENT 0x00
#define SMS_PAGE_CTRL_CHANGEABLE 0x40
#define SMS_PAGE_CTRL_DEFAULT 0x80
#define SMS_PAGE_CTRL_SAVED 0xC0
uint8_t unused;
uint8_t length;
uint8_t control;
};
struct scsi_mode_sense_10
{
uint8_t opcode;
uint8_t byte2; /* same bits as small version */
uint8_t page; /* same bits as small version */
uint8_t unused[4];
uint8_t length[2];
uint8_t control;
};
struct scsi_mode_select_6
{
uint8_t opcode;
uint8_t byte2;
#define SMS_SP 0x01
#define SMS_PF 0x10
uint8_t unused[2];
uint8_t length;
uint8_t control;
};
struct scsi_mode_select_10
{
uint8_t opcode;
uint8_t byte2; /* same bits as small version */
uint8_t unused[5];
uint8_t length[2];
uint8_t control;
};
/*
* When sending a mode select to a tape drive, the medium type must be 0.
*/
struct scsi_mode_hdr_6
{
uint8_t datalen;
uint8_t medium_type;
uint8_t dev_specific;
uint8_t block_descr_len;
};
struct scsi_mode_hdr_10
{
uint8_t datalen[2];
uint8_t medium_type;
uint8_t dev_specific;
uint8_t reserved[2];
uint8_t block_descr_len[2];
};
struct scsi_mode_block_descr
{
uint8_t density_code;
uint8_t num_blocks[3];
uint8_t reserved;
uint8_t block_len[3];
};
struct scsi_log_sense
{
uint8_t opcode;
uint8_t byte2;
#define SLS_SP 0x01
#define SLS_PPC 0x02
uint8_t page;
#define SLS_PAGE_CODE 0x3F
#define SLS_ALL_PAGES_PAGE 0x00
#define SLS_OVERRUN_PAGE 0x01
#define SLS_ERROR_WRITE_PAGE 0x02
#define SLS_ERROR_READ_PAGE 0x03
#define SLS_ERROR_READREVERSE_PAGE 0x04
#define SLS_ERROR_VERIFY_PAGE 0x05
#define SLS_ERROR_NONMEDIUM_PAGE 0x06
#define SLS_ERROR_LASTN_PAGE 0x07
#define SLS_PAGE_CTRL_MASK 0xC0
#define SLS_PAGE_CTRL_THRESHOLD 0x00
#define SLS_PAGE_CTRL_CUMULATIVE 0x40
#define SLS_PAGE_CTRL_THRESH_DEFAULT 0x80
#define SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0
uint8_t reserved[2];
uint8_t paramptr[2];
uint8_t length[2];
uint8_t control;
};
struct scsi_log_select
{
uint8_t opcode;
uint8_t byte2;
/* SLS_SP 0x01 */
#define SLS_PCR 0x02
uint8_t page;
/* SLS_PAGE_CTRL_MASK 0xC0 */
/* SLS_PAGE_CTRL_THRESHOLD 0x00 */
/* SLS_PAGE_CTRL_CUMULATIVE 0x40 */
/* SLS_PAGE_CTRL_THRESH_DEFAULT 0x80 */
/* SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0 */
uint8_t reserved[4];
uint8_t length[2];
uint8_t control;
};
struct scsi_log_header
{
uint8_t page;
uint8_t reserved;
uint8_t datalen[2];
};
struct scsi_log_param_header {
uint8_t param_code[2];
uint8_t param_control;
#define SLP_LP 0x01
#define SLP_LBIN 0x02
#define SLP_TMC_MASK 0x0C
#define SLP_TMC_ALWAYS 0x00
#define SLP_TMC_EQUAL 0x04
#define SLP_TMC_NOTEQUAL 0x08
#define SLP_TMC_GREATER 0x0C
#define SLP_ETC 0x10
#define SLP_TSD 0x20
#define SLP_DS 0x40
#define SLP_DU 0x80
uint8_t param_len;
};
struct scsi_control_page {
uint8_t page_code;
uint8_t page_length;
uint8_t rlec;
#define SCB_RLEC 0x01 /*Report Log Exception Cond*/
uint8_t queue_flags;
#define SCP_QUEUE_ALG_MASK 0xF0
#define SCP_QUEUE_ALG_RESTRICTED 0x00
#define SCP_QUEUE_ALG_UNRESTRICTED 0x10
#define SCP_QUEUE_ERR 0x02 /*Queued I/O aborted for CACs*/
#define SCP_QUEUE_DQUE 0x01 /*Queued I/O disabled*/
uint8_t eca_and_aen;
#define SCP_EECA 0x80 /*Enable Extended CA*/
#define SCP_RAENP 0x04 /*Ready AEN Permission*/
#define SCP_UAAENP 0x02 /*UA AEN Permission*/
#define SCP_EAENP 0x01 /*Error AEN Permission*/
uint8_t reserved;
uint8_t aen_holdoff_period[2];
};
struct scsi_reserve
{
uint8_t opcode;
uint8_t byte2;
uint8_t unused[2];
uint8_t length;
uint8_t control;
};
struct scsi_release
{
uint8_t opcode;
uint8_t byte2;
uint8_t unused[2];
uint8_t length;
uint8_t control;
};
struct scsi_prevent
{
uint8_t opcode;
uint8_t byte2;
uint8_t unused[2];
uint8_t how;
uint8_t control;
};
#define PR_PREVENT 0x01
#define PR_ALLOW 0x00
struct scsi_sync_cache
{
uint8_t opcode;
uint8_t byte2;
uint8_t begin_lba[4];
uint8_t reserved;
uint8_t lb_count[2];
uint8_t control;
};
struct scsi_changedef
{
uint8_t opcode;
uint8_t byte2;
uint8_t unused1;
uint8_t how;
uint8_t unused[4];
uint8_t datalen;
uint8_t control;
};
struct scsi_read_buffer
{
uint8_t opcode;
uint8_t byte2;
#define RWB_MODE 0x07
#define RWB_MODE_HDR_DATA 0x00
#define RWB_MODE_DATA 0x02
#define RWB_MODE_DOWNLOAD 0x04
#define RWB_MODE_DOWNLOAD_SAVE 0x05
uint8_t buffer_id;
uint8_t offset[3];
uint8_t length[3];
uint8_t control;
};
struct scsi_write_buffer
{
uint8_t opcode;
uint8_t byte2;
uint8_t buffer_id;
uint8_t offset[3];
uint8_t length[3];
uint8_t control;
};
struct scsi_rw_6
{
uint8_t opcode;
uint8_t addr[3];
/* only 5 bits are valid in the MSB address byte */
#define SRW_TOPADDR 0x1F
uint8_t length;
uint8_t control;
};
struct scsi_rw_10
{
uint8_t opcode;
#define SRW10_RELADDR 0x01
#define SRW10_FUA 0x08
#define SRW10_DPO 0x10
uint8_t byte2;
uint8_t addr[4];
uint8_t reserved;
uint8_t length[2];
uint8_t control;
};
struct scsi_rw_12
{
uint8_t opcode;
#define SRW12_RELADDR 0x01
#define SRW12_FUA 0x08
#define SRW12_DPO 0x10
uint8_t byte2;
uint8_t addr[4];
uint8_t length[4];
uint8_t reserved;
uint8_t control;
};
struct scsi_start_stop_unit
{
uint8_t opcode;
uint8_t byte2;
#define SSS_IMMED 0x01
uint8_t reserved[2];
uint8_t how;
#define SSS_START 0x01
#define SSS_LOEJ 0x02
uint8_t control;
};
#define SC_SCSI_1 0x01
#define SC_SCSI_2 0x03
/*
* Opcodes
*/
#define TEST_UNIT_READY 0x00
#define REQUEST_SENSE 0x03
#define READ_6 0x08
#define WRITE_6 0x0a
#define INQUIRY 0x12
#define MODE_SELECT_6 0x15
#define MODE_SENSE_6 0x1a
#define START_STOP_UNIT 0x1b
#define START_STOP 0x1b
#define RESERVE 0x16
#define RELEASE 0x17
#define RECEIVE_DIAGNOSTIC 0x1c
#define SEND_DIAGNOSTIC 0x1d
#define PREVENT_ALLOW 0x1e
#define READ_CAPACITY 0x25
#define READ_10 0x28
#define WRITE_10 0x2a
#define POSITION_TO_ELEMENT 0x2b
#define SYNCHRONIZE_CACHE 0x35
#define WRITE_BUFFER 0x3b
#define READ_BUFFER 0x3c
#define CHANGE_DEFINITION 0x40
#define LOG_SELECT 0x4c
#define LOG_SENSE 0x4d
#ifdef XXXCAM
#define MODE_SENSE_10 0x5A
#endif
#define MODE_SELECT_10 0x55
#define MOVE_MEDIUM 0xa5
#define READ_12 0xa8
#define WRITE_12 0xaa
#define READ_ELEMENT_STATUS 0xb8
/*
* Device Types
*/
#define T_DIRECT 0x00
#define T_SEQUENTIAL 0x01
#define T_PRINTER 0x02
#define T_PROCESSOR 0x03
#define T_WORM 0x04
#define T_CDROM 0x05
#define T_SCANNER 0x06
#define T_OPTICAL 0x07
#define T_CHANGER 0x08
#define T_COMM 0x09
#define T_ASC0 0x0a
#define T_ASC1 0x0b
#define T_STORARRAY 0x0c
#define T_ENCLOSURE 0x0d
#define T_RBC 0x0e
#define T_OCRW 0x0f
#define T_NODEVICE 0x1F
#define T_ANY 0xFF /* Used in Quirk table matches */
#define T_REMOV 1
#define T_FIXED 0
/*
* This length is the initial inquiry length used by the probe code, as
* well as the legnth necessary for aic_print_inquiry() to function
* correctly. If either use requires a different length in the future,
* the two values should be de-coupled.
*/
#define SHORT_INQUIRY_LENGTH 36
struct scsi_inquiry_data
{
uint8_t device;
#define SID_TYPE(inq_data) ((inq_data)->device & 0x1f)
#define SID_QUAL(inq_data) (((inq_data)->device & 0xE0) >> 5)
#define SID_QUAL_LU_CONNECTED 0x00 /*
* The specified peripheral device
* type is currently connected to
* logical unit. If the target cannot
* determine whether or not a physical
* device is currently connected, it
* shall also use this peripheral
* qualifier when returning the INQUIRY
* data. This peripheral qualifier
* does not mean that the device is
* ready for access by the initiator.
*/
#define SID_QUAL_LU_OFFLINE 0x01 /*
* The target is capable of supporting
* the specified peripheral device type
* on this logical unit; however, the
* physical device is not currently
* connected to this logical unit.
*/
#define SID_QUAL_RSVD 0x02
#define SID_QUAL_BAD_LU 0x03 /*
* The target is not capable of
* supporting a physical device on
* this logical unit. For this
* peripheral qualifier the peripheral
* device type shall be set to 1Fh to
* provide compatibility with previous
* versions of SCSI. All other
* peripheral device type values are
* reserved for this peripheral
* qualifier.
*/
#define SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x08) != 0)
uint8_t dev_qual2;
#define SID_QUAL2 0x7F
#define SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & 0x80) != 0)
uint8_t version;
#define SID_ANSI_REV(inq_data) ((inq_data)->version & 0x07)
#define SCSI_REV_0 0 #define SCSI_REV_0 0
#define SCSI_REV_CCS 1 #define SCSI_REV_CCS 1
#define SCSI_REV_2 2 #define SCSI_REV_2 2
#define SCSI_REV_SPC 3 #define SCSI_REV_SPC 3
#define SCSI_REV_SPC2 4 #define SCSI_REV_SPC2 4
#define SID_ECMA 0x38
#define SID_ISO 0xC0
uint8_t response_format;
#define SID_AENC 0x80
#define SID_TrmIOP 0x40
uint8_t additional_length;
uint8_t reserved[2];
uint8_t flags;
#define SID_SftRe 0x01
#define SID_CmdQue 0x02
#define SID_Linked 0x08
#define SID_Sync 0x10
#define SID_WBus16 0x20
#define SID_WBus32 0x40
#define SID_RelAdr 0x80
#define SID_VENDOR_SIZE 8
char vendor[SID_VENDOR_SIZE];
#define SID_PRODUCT_SIZE 16
char product[SID_PRODUCT_SIZE];
#define SID_REVISION_SIZE 4
char revision[SID_REVISION_SIZE];
/*
* The following fields were taken from SCSI Primary Commands - 2
* (SPC-2) Revision 14, Dated 11 November 1999
*/
#define SID_VENDOR_SPECIFIC_0_SIZE 20
uint8_t vendor_specific0[SID_VENDOR_SPECIFIC_0_SIZE];
/*
* An extension of SCSI Parallel Specific Values
*/
#define SID_SPI_IUS 0x01
#define SID_SPI_QAS 0x02
#define SID_SPI_CLOCK_ST 0x00
#define SID_SPI_CLOCK_DT 0x04
#define SID_SPI_CLOCK_DT_ST 0x0C
#define SID_SPI_MASK 0x0F
uint8_t spi3data;
uint8_t reserved2;
/*
* Version Descriptors, stored 2 byte values.
*/
uint8_t version1[2];
uint8_t version2[2];
uint8_t version3[2];
uint8_t version4[2];
uint8_t version5[2];
uint8_t version6[2];
uint8_t version7[2];
uint8_t version8[2];
uint8_t reserved3[22];
#define SID_VENDOR_SPECIFIC_1_SIZE 160
uint8_t vendor_specific1[SID_VENDOR_SPECIFIC_1_SIZE];
};
struct scsi_vpd_unit_serial_number
{
uint8_t device;
uint8_t page_code;
#define SVPD_UNIT_SERIAL_NUMBER 0x80
uint8_t reserved;
uint8_t length; /* serial number length */
#define SVPD_SERIAL_NUM_SIZE 251
uint8_t serial_num[SVPD_SERIAL_NUM_SIZE];
};
struct scsi_read_capacity
{
uint8_t opcode;
uint8_t byte2;
uint8_t addr[4];
uint8_t unused[3];
uint8_t control;
};
struct scsi_read_capacity_data
{
uint8_t addr[4];
uint8_t length[4];
};
struct scsi_report_luns
{
uint8_t opcode;
uint8_t byte2;
uint8_t unused[3];
uint8_t addr[4];
uint8_t control;
};
struct scsi_report_luns_data {
uint8_t length[4]; /* length of LUN inventory, in bytes */
uint8_t reserved[4]; /* unused */
/*
* LUN inventory- we only support the type zero form for now.
*/
struct {
uint8_t lundata[8];
} luns[1];
};
#define RPL_LUNDATA_ATYP_MASK 0xc0 /* MBZ for type 0 lun */
#define RPL_LUNDATA_T0LUN 1 /* @ lundata[1] */
struct scsi_sense_data struct scsi_sense_data
{ {
uint8_t error_code; uint8_t error_code;
@@ -757,41 +117,6 @@ struct scsi_sense_data
#define SSD_FULL_SIZE sizeof(struct scsi_sense_data) #define SSD_FULL_SIZE sizeof(struct scsi_sense_data)
}; };
struct scsi_mode_header_6
{
uint8_t data_length; /* Sense data length */
uint8_t medium_type;
uint8_t dev_spec;
uint8_t blk_desc_len;
};
struct scsi_mode_header_10
{
uint8_t data_length[2];/* Sense data length */
uint8_t medium_type;
uint8_t dev_spec;
uint8_t unused[2];
uint8_t blk_desc_len[2];
};
struct scsi_mode_page_header
{
uint8_t page_code;
uint8_t page_length;
};
struct scsi_mode_blk_desc
{
uint8_t density;
uint8_t nblocks[3];
uint8_t reserved;
uint8_t blklen[3];
};
#define SCSI_DEFAULT_DENSITY 0x00 /* use 'default' density */
#define SCSI_SAME_DENSITY 0x7f /* use 'same' density- >= SCSI-2 only */
/* /*
* Status Byte * Status Byte
*/ */
@@ -807,76 +132,7 @@ struct scsi_mode_blk_desc
#define SCSI_STATUS_ACA_ACTIVE 0x30 #define SCSI_STATUS_ACA_ACTIVE 0x30
#define SCSI_STATUS_TASK_ABORTED 0x40 #define SCSI_STATUS_TASK_ABORTED 0x40
struct scsi_inquiry_pattern {
uint8_t type;
uint8_t media_type;
#define SIP_MEDIA_REMOVABLE 0x01
#define SIP_MEDIA_FIXED 0x02
const char *vendor;
const char *product;
const char *revision;
};
struct scsi_static_inquiry_pattern {
uint8_t type;
uint8_t media_type;
char vendor[SID_VENDOR_SIZE+1];
char product[SID_PRODUCT_SIZE+1];
char revision[SID_REVISION_SIZE+1];
};
struct scsi_sense_quirk_entry {
struct scsi_inquiry_pattern inq_pat;
int num_sense_keys;
int num_ascs;
struct sense_key_table_entry *sense_key_info;
struct asc_table_entry *asc_info;
};
struct sense_key_table_entry {
uint8_t sense_key;
uint32_t action;
const char *desc;
};
struct asc_table_entry {
uint8_t asc;
uint8_t ascq;
uint32_t action;
const char *desc;
};
struct op_table_entry {
uint8_t opcode;
uint16_t opmask;
const char *desc;
};
struct scsi_op_quirk_entry {
struct scsi_inquiry_pattern inq_pat;
int num_ops;
struct op_table_entry *op_table;
};
typedef enum {
SSS_FLAG_NONE = 0x00,
SSS_FLAG_PRINT_COMMAND = 0x01
} scsi_sense_string_flags;
extern const char *scsi_sense_key_text[];
/************************* Large Disk Handling ********************************/ /************************* Large Disk Handling ********************************/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
static __inline int aic_sector_div(u_long capacity, int heads, int sectors);
static __inline int
aic_sector_div(u_long capacity, int heads, int sectors)
{
return (capacity / (heads * sectors));
}
#else
static __inline int aic_sector_div(sector_t capacity, int heads, int sectors);
static __inline int static __inline int
aic_sector_div(sector_t capacity, int heads, int sectors) aic_sector_div(sector_t capacity, int heads, int sectors)
{ {
@@ -884,152 +140,6 @@ aic_sector_div(sector_t capacity, int heads, int sectors)
sector_div(capacity, (heads * sectors)); sector_div(capacity, (heads * sectors));
return (int)capacity; return (int)capacity;
} }
#endif
/**************************** Module Library Hack *****************************/
/*
* What we'd like to do is have a single "scsi library" module that both the
* aic7xxx and aic79xx drivers could load and depend on. A cursory examination
* of implementing module dependencies in Linux (handling the install and
* initrd cases) does not look promissing. For now, we just duplicate this
* code in both drivers using a simple symbol renaming scheme that hides this
* hack from the drivers.
*/
#define AIC_LIB_ENTRY_CONCAT(x, prefix) prefix ## x
#define AIC_LIB_ENTRY_EXPAND(x, prefix) AIC_LIB_ENTRY_CONCAT(x, prefix)
#define AIC_LIB_ENTRY(x) AIC_LIB_ENTRY_EXPAND(x, AIC_LIB_PREFIX)
#define aic_sense_desc AIC_LIB_ENTRY(_sense_desc)
#define aic_sense_error_action AIC_LIB_ENTRY(_sense_error_action)
#define aic_error_action AIC_LIB_ENTRY(_error_action)
#define aic_op_desc AIC_LIB_ENTRY(_op_desc)
#define aic_cdb_string AIC_LIB_ENTRY(_cdb_string)
#define aic_print_inquiry AIC_LIB_ENTRY(_print_inquiry)
#define aic_calc_syncsrate AIC_LIB_ENTRY(_calc_syncrate)
#define aic_calc_syncparam AIC_LIB_ENTRY(_calc_syncparam)
#define aic_calc_speed AIC_LIB_ENTRY(_calc_speed)
#define aic_inquiry_match AIC_LIB_ENTRY(_inquiry_match)
#define aic_static_inquiry_match AIC_LIB_ENTRY(_static_inquiry_match)
#define aic_parse_brace_option AIC_LIB_ENTRY(_parse_brace_option)
/******************************************************************************/
void aic_sense_desc(int /*sense_key*/, int /*asc*/,
int /*ascq*/, struct scsi_inquiry_data*,
const char** /*sense_key_desc*/,
const char** /*asc_desc*/);
aic_sense_action aic_sense_error_action(struct scsi_sense_data*,
struct scsi_inquiry_data*,
uint32_t /*sense_flags*/);
uint32_t aic_error_action(struct scsi_cmnd *,
struct scsi_inquiry_data *,
cam_status, u_int);
#define SF_RETRY_UA 0x01
#define SF_NO_PRINT 0x02
#define SF_QUIET_IR 0x04 /* Be quiet about Illegal Request reponses */
#define SF_PRINT_ALWAYS 0x08
const char * aic_op_desc(uint16_t /*opcode*/, struct scsi_inquiry_data*);
char * aic_cdb_string(uint8_t* /*cdb_ptr*/, char* /*cdb_string*/,
size_t /*len*/);
void aic_print_inquiry(struct scsi_inquiry_data*);
u_int aic_calc_syncsrate(u_int /*period_factor*/);
u_int aic_calc_syncparam(u_int /*period*/);
u_int aic_calc_speed(u_int width, u_int period, u_int offset,
u_int min_rate);
int aic_inquiry_match(caddr_t /*inqbuffer*/,
caddr_t /*table_entry*/);
int aic_static_inquiry_match(caddr_t /*inqbuffer*/,
caddr_t /*table_entry*/);
typedef void aic_option_callback_t(u_long, int, int, int32_t);
char * aic_parse_brace_option(char *opt_name, char *opt_arg,
char *end, int depth,
aic_option_callback_t *, u_long);
static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
int *error_code, int *sense_key,
int *asc, int *ascq);
static __inline void scsi_ulto2b(uint32_t val, uint8_t *bytes);
static __inline void scsi_ulto3b(uint32_t val, uint8_t *bytes);
static __inline void scsi_ulto4b(uint32_t val, uint8_t *bytes);
static __inline uint32_t scsi_2btoul(uint8_t *bytes);
static __inline uint32_t scsi_3btoul(uint8_t *bytes);
static __inline int32_t scsi_3btol(uint8_t *bytes);
static __inline uint32_t scsi_4btoul(uint8_t *bytes);
static __inline void scsi_extract_sense(struct scsi_sense_data *sense,
int *error_code, int *sense_key,
int *asc, int *ascq)
{
*error_code = sense->error_code & SSD_ERRCODE;
*sense_key = sense->flags & SSD_KEY;
*asc = (sense->extra_len >= 5) ? sense->add_sense_code : 0;
*ascq = (sense->extra_len >= 6) ? sense->add_sense_code_qual : 0;
}
static __inline void
scsi_ulto2b(uint32_t val, uint8_t *bytes)
{
bytes[0] = (val >> 8) & 0xff;
bytes[1] = val & 0xff;
}
static __inline void
scsi_ulto3b(uint32_t val, uint8_t *bytes)
{
bytes[0] = (val >> 16) & 0xff;
bytes[1] = (val >> 8) & 0xff;
bytes[2] = val & 0xff;
}
static __inline void
scsi_ulto4b(uint32_t val, uint8_t *bytes)
{
bytes[0] = (val >> 24) & 0xff;
bytes[1] = (val >> 16) & 0xff;
bytes[2] = (val >> 8) & 0xff;
bytes[3] = val & 0xff;
}
static __inline uint32_t
scsi_2btoul(uint8_t *bytes)
{
uint32_t rv;
rv = (bytes[0] << 8) |
bytes[1];
return (rv);
}
static __inline uint32_t
scsi_3btoul(uint8_t *bytes)
{
uint32_t rv;
rv = (bytes[0] << 16) |
(bytes[1] << 8) |
bytes[2];
return (rv);
}
static __inline int32_t
scsi_3btol(uint8_t *bytes)
{
uint32_t rc = scsi_3btoul(bytes);
if (rc & 0x00800000)
rc |= 0xff000000;
return (int32_t) rc;
}
static __inline uint32_t static __inline uint32_t
scsi_4btoul(uint8_t *bytes) scsi_4btoul(uint8_t *bytes)

View File

@@ -20,7 +20,6 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/devfs_fs_kernel.h>
#include <linux/ioctl32.h> #include <linux/ioctl32.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/chio.h> /* here are all the ioctls */ #include <linux/chio.h> /* here are all the ioctls */
@@ -31,7 +30,7 @@
#include <scsi/scsi_ioctl.h> #include <scsi/scsi_ioctl.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_request.h> #include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h> #include <scsi/scsi_dbg.h>
#define CH_DT_MAX 16 #define CH_DT_MAX 16
@@ -181,17 +180,17 @@ static struct {
/* ------------------------------------------------------------------- */ /* ------------------------------------------------------------------- */
static int ch_find_errno(unsigned char *sense_buffer) static int ch_find_errno(struct scsi_sense_hdr *sshdr)
{ {
int i,errno = 0; int i,errno = 0;
/* Check to see if additional sense information is available */ /* Check to see if additional sense information is available */
if (sense_buffer[7] > 5 && if (scsi_sense_valid(sshdr) &&
sense_buffer[12] != 0) { sshdr->asc != 0) {
for (i = 0; err[i].errno != 0; i++) { for (i = 0; err[i].errno != 0; i++) {
if (err[i].sense == sense_buffer[ 2] && if (err[i].sense == sshdr->sense_key &&
err[i].asc == sense_buffer[12] && err[i].asc == sshdr->asc &&
err[i].ascq == sense_buffer[13]) { err[i].ascq == sshdr->ascq) {
errno = -err[i].errno; errno = -err[i].errno;
break; break;
} }
@@ -207,13 +206,9 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
void *buffer, unsigned buflength, void *buffer, unsigned buflength,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
int errno, retries = 0, timeout; int errno, retries = 0, timeout, result;
struct scsi_request *sr; struct scsi_sense_hdr sshdr;
sr = scsi_allocate_request(ch->device, GFP_KERNEL);
if (NULL == sr)
return -ENOMEM;
timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS) timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
? timeout_init : timeout_move; ? timeout_init : timeout_move;
@@ -224,16 +219,17 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
__scsi_print_command(cmd); __scsi_print_command(cmd);
} }
scsi_wait_req(sr, cmd, buffer, buflength, result = scsi_execute_req(ch->device, cmd, direction, buffer,
timeout * HZ, MAX_RETRIES); buflength, &sshdr, timeout * HZ,
MAX_RETRIES);
dprintk("result: 0x%x\n",sr->sr_result); dprintk("result: 0x%x\n",result);
if (driver_byte(sr->sr_result) & DRIVER_SENSE) { if (driver_byte(result) & DRIVER_SENSE) {
if (debug) if (debug)
scsi_print_req_sense(ch->name, sr); scsi_print_sense_hdr(ch->name, &sshdr);
errno = ch_find_errno(sr->sr_sense_buffer); errno = ch_find_errno(&sshdr);
switch(sr->sr_sense_buffer[2] & 0xf) { switch(sshdr.sense_key) {
case UNIT_ATTENTION: case UNIT_ATTENTION:
ch->unit_attention = 1; ch->unit_attention = 1;
if (retries++ < 3) if (retries++ < 3)
@@ -241,7 +237,6 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
break; break;
} }
} }
scsi_release_request(sr);
return errno; return errno;
} }
@@ -940,8 +935,6 @@ static int ch_probe(struct device *dev)
if (init) if (init)
ch_init_elem(ch); ch_init_elem(ch);
devfs_mk_cdev(MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
S_IFCHR | S_IRUGO | S_IWUGO, ch->name);
class_device_create(ch_sysfs_class, class_device_create(ch_sysfs_class,
MKDEV(SCSI_CHANGER_MAJOR,ch->minor), MKDEV(SCSI_CHANGER_MAJOR,ch->minor),
dev, "s%s", ch->name); dev, "s%s", ch->name);
@@ -974,7 +967,6 @@ static int ch_remove(struct device *dev)
class_device_destroy(ch_sysfs_class, class_device_destroy(ch_sysfs_class,
MKDEV(SCSI_CHANGER_MAJOR,ch->minor)); MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
devfs_remove(ch->name);
kfree(ch->dt); kfree(ch->dt);
kfree(ch); kfree(ch);
ch_devcount--; ch_devcount--;

View File

@@ -17,6 +17,7 @@
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_request.h> #include <scsi/scsi_request.h>
#include <scsi/scsi_eh.h> #include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
@@ -1155,6 +1156,31 @@ scsi_show_extd_sense(unsigned char asc, unsigned char ascq)
} }
} }
void
scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr)
{
const char *sense_txt;
/* An example of deferred is when an earlier write to disk cache
* succeeded, but now the disk discovers that it cannot write the
* data to the magnetic media.
*/
const char *error = scsi_sense_is_deferred(sshdr) ?
"<<DEFERRED>>" : "Current";
printk(KERN_INFO "%s: %s", name, error);
if (sshdr->response_code >= 0x72)
printk(" [descriptor]");
sense_txt = scsi_sense_key_string(sshdr->sense_key);
if (sense_txt)
printk(": sense key: %s\n", sense_txt);
else
printk(": sense key=0x%x\n", sshdr->sense_key);
printk(KERN_INFO " ");
scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
printk("\n");
}
EXPORT_SYMBOL(scsi_print_sense_hdr);
/* Print sense information */ /* Print sense information */
void void
__scsi_print_sense(const char *name, const unsigned char *sense_buffer, __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
@@ -1162,8 +1188,6 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
{ {
int k, num, res; int k, num, res;
unsigned int info; unsigned int info;
const char *error;
const char *sense_txt;
struct scsi_sense_hdr ssh; struct scsi_sense_hdr ssh;
res = scsi_normalize_sense(sense_buffer, sense_len, &ssh); res = scsi_normalize_sense(sense_buffer, sense_len, &ssh);
@@ -1181,26 +1205,7 @@ __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
printk("\n"); printk("\n");
return; return;
} }
scsi_print_sense_hdr(name, &ssh);
/* An example of deferred is when an earlier write to disk cache
* succeeded, but now the disk discovers that it cannot write the
* data to the magnetic media.
*/
error = scsi_sense_is_deferred(&ssh) ?
"<<DEFERRED>>" : "Current";
printk(KERN_INFO "%s: %s", name, error);
if (ssh.response_code >= 0x72)
printk(" [descriptor]");
sense_txt = scsi_sense_key_string(ssh.sense_key);
if (sense_txt)
printk(": sense key: %s\n", sense_txt);
else
printk(": sense key=0x%x\n", ssh.sense_key);
printk(KERN_INFO " ");
scsi_show_extd_sense(ssh.asc, ssh.ascq);
printk("\n");
if (ssh.response_code < 0x72) { if (ssh.response_code < 0x72) {
/* only decode extras for "fixed" format now */ /* only decode extras for "fixed" format now */
char buff[80]; char buff[80];

View File

@@ -24,6 +24,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/init.h> #include <linux/init.h>
@@ -52,21 +53,80 @@ static struct class shost_class = {
}; };
/** /**
* scsi_host_cancel - cancel outstanding IO to this host * scsi_host_set_state - Take the given host through the host
* @shost: pointer to struct Scsi_Host * state model.
* recovery: recovery requested to run. * @shost: scsi host to change the state of.
* @state: state to change to.
*
* Returns zero if unsuccessful or an error if the requested
* transition is illegal.
**/ **/
static void scsi_host_cancel(struct Scsi_Host *shost, int recovery) int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
{ {
struct scsi_device *sdev; enum scsi_host_state oldstate = shost->shost_state;
if (state == oldstate)
return 0;
switch (state) {
case SHOST_CREATED:
/* There are no legal states that come back to
* created. This is the manually initialised start
* state */
goto illegal;
case SHOST_RUNNING:
switch (oldstate) {
case SHOST_CREATED:
case SHOST_RECOVERY:
break;
default:
goto illegal;
}
break;
case SHOST_RECOVERY:
switch (oldstate) {
case SHOST_RUNNING:
break;
default:
goto illegal;
}
break;
case SHOST_CANCEL:
switch (oldstate) {
case SHOST_CREATED:
case SHOST_RUNNING:
break;
default:
goto illegal;
}
break;
case SHOST_DEL:
switch (oldstate) {
case SHOST_CANCEL:
break;
default:
goto illegal;
}
break;
set_bit(SHOST_CANCEL, &shost->shost_state);
shost_for_each_device(sdev, shost) {
scsi_device_cancel(sdev, recovery);
} }
wait_event(shost->host_wait, (!test_bit(SHOST_RECOVERY, shost->shost_state = state;
&shost->shost_state))); return 0;
illegal:
SCSI_LOG_ERROR_RECOVERY(1,
dev_printk(KERN_ERR, &shost->shost_gendev,
"Illegal host state transition"
"%s->%s\n",
scsi_host_state_name(oldstate),
scsi_host_state_name(state)));
return -EINVAL;
} }
EXPORT_SYMBOL(scsi_host_set_state);
/** /**
* scsi_remove_host - remove a scsi host * scsi_remove_host - remove a scsi host
@@ -74,11 +134,13 @@ static void scsi_host_cancel(struct Scsi_Host *shost, int recovery)
**/ **/
void scsi_remove_host(struct Scsi_Host *shost) void scsi_remove_host(struct Scsi_Host *shost)
{ {
down(&shost->scan_mutex);
scsi_host_set_state(shost, SHOST_CANCEL);
up(&shost->scan_mutex);
scsi_forget_host(shost); scsi_forget_host(shost);
scsi_host_cancel(shost, 0);
scsi_proc_host_rm(shost); scsi_proc_host_rm(shost);
set_bit(SHOST_DEL, &shost->shost_state); scsi_host_set_state(shost, SHOST_DEL);
transport_unregister_device(&shost->shost_gendev); transport_unregister_device(&shost->shost_gendev);
class_device_unregister(&shost->shost_classdev); class_device_unregister(&shost->shost_classdev);
@@ -115,7 +177,7 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
if (error) if (error)
goto out; goto out;
set_bit(SHOST_ADD, &shost->shost_state); scsi_host_set_state(shost, SHOST_RUNNING);
get_device(shost->shost_gendev.parent); get_device(shost->shost_gendev.parent);
error = class_device_add(&shost->shost_classdev); error = class_device_add(&shost->shost_classdev);
@@ -164,15 +226,8 @@ static void scsi_host_dev_release(struct device *dev)
struct Scsi_Host *shost = dev_to_shost(dev); struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent; struct device *parent = dev->parent;
if (shost->ehandler) { if (shost->ehandler)
DECLARE_COMPLETION(sem); kthread_stop(shost->ehandler);
shost->eh_notify = &sem;
shost->eh_kill = 1;
up(shost->eh_wait);
wait_for_completion(&sem);
shost->eh_notify = NULL;
}
if (shost->work_q) if (shost->work_q)
destroy_workqueue(shost->work_q); destroy_workqueue(shost->work_q);
@@ -202,7 +257,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{ {
struct Scsi_Host *shost; struct Scsi_Host *shost;
int gfp_mask = GFP_KERNEL, rval; int gfp_mask = GFP_KERNEL, rval;
DECLARE_COMPLETION(complete);
if (sht->unchecked_isa_dma && privsize) if (sht->unchecked_isa_dma && privsize)
gfp_mask |= __GFP_DMA; gfp_mask |= __GFP_DMA;
@@ -226,6 +280,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
spin_lock_init(&shost->default_lock); spin_lock_init(&shost->default_lock);
scsi_assign_lock(shost, &shost->default_lock); scsi_assign_lock(shost, &shost->default_lock);
shost->shost_state = SHOST_CREATED;
INIT_LIST_HEAD(&shost->__devices); INIT_LIST_HEAD(&shost->__devices);
INIT_LIST_HEAD(&shost->__targets); INIT_LIST_HEAD(&shost->__targets);
INIT_LIST_HEAD(&shost->eh_cmd_q); INIT_LIST_HEAD(&shost->eh_cmd_q);
@@ -307,12 +362,12 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
snprintf(shost->shost_classdev.class_id, BUS_ID_SIZE, "host%d", snprintf(shost->shost_classdev.class_id, BUS_ID_SIZE, "host%d",
shost->host_no); shost->host_no);
shost->eh_notify = &complete; shost->ehandler = kthread_run(scsi_error_handler, shost,
rval = kernel_thread(scsi_error_handler, shost, 0); "scsi_eh_%d", shost->host_no);
if (rval < 0) if (IS_ERR(shost->ehandler)) {
rval = PTR_ERR(shost->ehandler);
goto fail_destroy_freelist; goto fail_destroy_freelist;
wait_for_completion(&complete); }
shost->eh_notify = NULL;
scsi_proc_hostdir_add(shost->hostt); scsi_proc_hostdir_add(shost->hostt);
return shost; return shost;
@@ -382,7 +437,7 @@ EXPORT_SYMBOL(scsi_host_lookup);
**/ **/
struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
{ {
if (test_bit(SHOST_DEL, &shost->shost_state) || if ((shost->shost_state == SHOST_DEL) ||
!get_device(&shost->shost_gendev)) !get_device(&shost->shost_gendev))
return NULL; return NULL;
return shost; return shost;

View File

@@ -87,7 +87,7 @@ static int max_channel = 3;
static int init_timeout = 5; static int init_timeout = 5;
static int max_requests = 50; static int max_requests = 50;
#define IBMVSCSI_VERSION "1.5.6" #define IBMVSCSI_VERSION "1.5.7"
MODULE_DESCRIPTION("IBM Virtual SCSI"); MODULE_DESCRIPTION("IBM Virtual SCSI");
MODULE_AUTHOR("Dave Boutcher"); MODULE_AUTHOR("Dave Boutcher");
@@ -145,6 +145,8 @@ static int initialize_event_pool(struct event_pool *pool,
sizeof(*evt->xfer_iu) * i; sizeof(*evt->xfer_iu) * i;
evt->xfer_iu = pool->iu_storage + i; evt->xfer_iu = pool->iu_storage + i;
evt->hostdata = hostdata; evt->hostdata = hostdata;
evt->ext_list = NULL;
evt->ext_list_token = 0;
} }
return 0; return 0;
@@ -161,9 +163,16 @@ static void release_event_pool(struct event_pool *pool,
struct ibmvscsi_host_data *hostdata) struct ibmvscsi_host_data *hostdata)
{ {
int i, in_use = 0; int i, in_use = 0;
for (i = 0; i < pool->size; ++i) for (i = 0; i < pool->size; ++i) {
if (atomic_read(&pool->events[i].free) != 1) if (atomic_read(&pool->events[i].free) != 1)
++in_use; ++in_use;
if (pool->events[i].ext_list) {
dma_free_coherent(hostdata->dev,
SG_ALL * sizeof(struct memory_descriptor),
pool->events[i].ext_list,
pool->events[i].ext_list_token);
}
}
if (in_use) if (in_use)
printk(KERN_WARNING printk(KERN_WARNING
"ibmvscsi: releasing event pool with %d " "ibmvscsi: releasing event pool with %d "
@@ -286,24 +295,41 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
} else { } else {
if (cmd->sc_data_direction == DMA_TO_DEVICE) { if (cmd->sc_data_direction == DMA_TO_DEVICE) {
srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; srp_cmd->data_out_format = SRP_INDIRECT_BUFFER;
srp_cmd->data_out_count = numbuf; srp_cmd->data_out_count =
numbuf < MAX_INDIRECT_BUFS ?
numbuf: MAX_INDIRECT_BUFS;
} else { } else {
srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; srp_cmd->data_in_format = SRP_INDIRECT_BUFFER;
srp_cmd->data_in_count = numbuf; srp_cmd->data_in_count =
numbuf < MAX_INDIRECT_BUFS ?
numbuf: MAX_INDIRECT_BUFS;
} }
} }
} }
static void unmap_sg_list(int num_entries,
struct device *dev,
struct memory_descriptor *md)
{
int i;
for (i = 0; i < num_entries; ++i) {
dma_unmap_single(dev,
md[i].virtual_address,
md[i].length, DMA_BIDIRECTIONAL);
}
}
/** /**
* unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
* @cmd: srp_cmd whose additional_data member will be unmapped * @cmd: srp_cmd whose additional_data member will be unmapped
* @dev: device for which the memory is mapped * @dev: device for which the memory is mapped
* *
*/ */
static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev) static void unmap_cmd_data(struct srp_cmd *cmd,
struct srp_event_struct *evt_struct,
struct device *dev)
{ {
int i;
if ((cmd->data_out_format == SRP_NO_BUFFER) && if ((cmd->data_out_format == SRP_NO_BUFFER) &&
(cmd->data_in_format == SRP_NO_BUFFER)) (cmd->data_in_format == SRP_NO_BUFFER))
return; return;
@@ -318,15 +344,34 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
(struct indirect_descriptor *)cmd->additional_data; (struct indirect_descriptor *)cmd->additional_data;
int num_mapped = indirect->head.length / int num_mapped = indirect->head.length /
sizeof(indirect->list[0]); sizeof(indirect->list[0]);
for (i = 0; i < num_mapped; ++i) {
struct memory_descriptor *data = &indirect->list[i]; if (num_mapped <= MAX_INDIRECT_BUFS) {
dma_unmap_single(dev, unmap_sg_list(num_mapped, dev, &indirect->list[0]);
data->virtual_address, return;
data->length, DMA_BIDIRECTIONAL);
} }
unmap_sg_list(num_mapped, dev, evt_struct->ext_list);
} }
} }
static int map_sg_list(int num_entries,
struct scatterlist *sg,
struct memory_descriptor *md)
{
int i;
u64 total_length = 0;
for (i = 0; i < num_entries; ++i) {
struct memory_descriptor *descr = md + i;
struct scatterlist *sg_entry = &sg[i];
descr->virtual_address = sg_dma_address(sg_entry);
descr->length = sg_dma_len(sg_entry);
descr->memory_handle = 0;
total_length += sg_dma_len(sg_entry);
}
return total_length;
}
/** /**
* map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
* @cmd: Scsi_Cmnd with the scatterlist * @cmd: Scsi_Cmnd with the scatterlist
@@ -337,10 +382,11 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct device *dev)
* Returns 1 on success. * Returns 1 on success.
*/ */
static int map_sg_data(struct scsi_cmnd *cmd, static int map_sg_data(struct scsi_cmnd *cmd,
struct srp_event_struct *evt_struct,
struct srp_cmd *srp_cmd, struct device *dev) struct srp_cmd *srp_cmd, struct device *dev)
{ {
int i, sg_mapped; int sg_mapped;
u64 total_length = 0; u64 total_length = 0;
struct scatterlist *sg = cmd->request_buffer; struct scatterlist *sg = cmd->request_buffer;
struct memory_descriptor *data = struct memory_descriptor *data =
@@ -363,27 +409,46 @@ static int map_sg_data(struct scsi_cmnd *cmd,
return 1; return 1;
} }
if (sg_mapped > MAX_INDIRECT_BUFS) { if (sg_mapped > SG_ALL) {
printk(KERN_ERR printk(KERN_ERR
"ibmvscsi: More than %d mapped sg entries, got %d\n", "ibmvscsi: More than %d mapped sg entries, got %d\n",
MAX_INDIRECT_BUFS, sg_mapped); SG_ALL, sg_mapped);
return 0; return 0;
} }
indirect->head.virtual_address = 0; indirect->head.virtual_address = 0;
indirect->head.length = sg_mapped * sizeof(indirect->list[0]); indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
indirect->head.memory_handle = 0; indirect->head.memory_handle = 0;
for (i = 0; i < sg_mapped; ++i) {
struct memory_descriptor *descr = &indirect->list[i];
struct scatterlist *sg_entry = &sg[i];
descr->virtual_address = sg_dma_address(sg_entry);
descr->length = sg_dma_len(sg_entry);
descr->memory_handle = 0;
total_length += sg_dma_len(sg_entry);
}
indirect->total_length = total_length;
return 1; if (sg_mapped <= MAX_INDIRECT_BUFS) {
total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]);
indirect->total_length = total_length;
return 1;
}
/* get indirect table */
if (!evt_struct->ext_list) {
evt_struct->ext_list =(struct memory_descriptor*)
dma_alloc_coherent(dev,
SG_ALL * sizeof(struct memory_descriptor),
&evt_struct->ext_list_token, 0);
if (!evt_struct->ext_list) {
printk(KERN_ERR
"ibmvscsi: Can't allocate memory for indirect table\n");
return 0;
}
}
total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list);
indirect->total_length = total_length;
indirect->head.virtual_address = evt_struct->ext_list_token;
indirect->head.length = sg_mapped * sizeof(indirect->list[0]);
memcpy(indirect->list, evt_struct->ext_list,
MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor));
return 1;
} }
/** /**
@@ -428,6 +493,7 @@ static int map_single_data(struct scsi_cmnd *cmd,
* Returns 1 on success. * Returns 1 on success.
*/ */
static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
struct srp_event_struct *evt_struct,
struct srp_cmd *srp_cmd, struct device *dev) struct srp_cmd *srp_cmd, struct device *dev)
{ {
switch (cmd->sc_data_direction) { switch (cmd->sc_data_direction) {
@@ -450,7 +516,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
if (!cmd->request_buffer) if (!cmd->request_buffer)
return 1; return 1;
if (cmd->use_sg) if (cmd->use_sg)
return map_sg_data(cmd, srp_cmd, dev); return map_sg_data(cmd, evt_struct, srp_cmd, dev);
return map_single_data(cmd, srp_cmd, dev); return map_single_data(cmd, srp_cmd, dev);
} }
@@ -486,6 +552,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
printk(KERN_WARNING printk(KERN_WARNING
"ibmvscsi: Warning, request_limit exceeded\n"); "ibmvscsi: Warning, request_limit exceeded\n");
unmap_cmd_data(&evt_struct->iu.srp.cmd, unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct,
hostdata->dev); hostdata->dev);
free_event_struct(&hostdata->pool, evt_struct); free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
@@ -513,7 +580,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
return 0; return 0;
send_error: send_error:
unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev); unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
if ((cmnd = evt_struct->cmnd) != NULL) { if ((cmnd = evt_struct->cmnd) != NULL) {
cmnd->result = DID_ERROR << 16; cmnd->result = DID_ERROR << 16;
@@ -551,6 +618,7 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
rsp->sense_and_response_data, rsp->sense_and_response_data,
rsp->sense_data_list_length); rsp->sense_data_list_length);
unmap_cmd_data(&evt_struct->iu.srp.cmd, unmap_cmd_data(&evt_struct->iu.srp.cmd,
evt_struct,
evt_struct->hostdata->dev); evt_struct->hostdata->dev);
if (rsp->doover) if (rsp->doover)
@@ -583,6 +651,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
{ {
struct srp_cmd *srp_cmd; struct srp_cmd *srp_cmd;
struct srp_event_struct *evt_struct; struct srp_event_struct *evt_struct;
struct indirect_descriptor *indirect;
struct ibmvscsi_host_data *hostdata = struct ibmvscsi_host_data *hostdata =
(struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata;
u16 lun = lun_from_dev(cmnd->device); u16 lun = lun_from_dev(cmnd->device);
@@ -591,14 +660,6 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
if (!evt_struct) if (!evt_struct)
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
init_event_struct(evt_struct,
handle_cmd_rsp,
VIOSRP_SRP_FORMAT,
cmnd->timeout);
evt_struct->cmnd = cmnd;
evt_struct->cmnd_done = done;
/* Set up the actual SRP IU */ /* Set up the actual SRP IU */
srp_cmd = &evt_struct->iu.srp.cmd; srp_cmd = &evt_struct->iu.srp.cmd;
memset(srp_cmd, 0x00, sizeof(*srp_cmd)); memset(srp_cmd, 0x00, sizeof(*srp_cmd));
@@ -606,17 +667,25 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd));
srp_cmd->lun = ((u64) lun) << 48; srp_cmd->lun = ((u64) lun) << 48;
if (!map_data_for_srp_cmd(cmnd, srp_cmd, hostdata->dev)) { if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n");
free_event_struct(&hostdata->pool, evt_struct); free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
} }
init_event_struct(evt_struct,
handle_cmd_rsp,
VIOSRP_SRP_FORMAT,
cmnd->timeout_per_command/HZ);
evt_struct->cmnd = cmnd;
evt_struct->cmnd_done = done;
/* Fix up dma address of the buffer itself */ /* Fix up dma address of the buffer itself */
if ((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || indirect = (struct indirect_descriptor *)srp_cmd->additional_data;
(srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) { if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) ||
struct indirect_descriptor *indirect = (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) &&
(struct indirect_descriptor *)srp_cmd->additional_data; (indirect->head.virtual_address == 0)) {
indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + indirect->head.virtual_address = evt_struct->crq.IU_data_ptr +
offsetof(struct srp_cmd, additional_data) + offsetof(struct srp_cmd, additional_data) +
offsetof(struct indirect_descriptor, list); offsetof(struct indirect_descriptor, list);
@@ -826,11 +895,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
struct srp_event_struct *tmp_evt, *found_evt; struct srp_event_struct *tmp_evt, *found_evt;
union viosrp_iu srp_rsp; union viosrp_iu srp_rsp;
int rsp_rc; int rsp_rc;
unsigned long flags;
u16 lun = lun_from_dev(cmd->device); u16 lun = lun_from_dev(cmd->device);
/* First, find this command in our sent list so we can figure /* First, find this command in our sent list so we can figure
* out the correct tag * out the correct tag
*/ */
spin_lock_irqsave(hostdata->host->host_lock, flags);
found_evt = NULL; found_evt = NULL;
list_for_each_entry(tmp_evt, &hostdata->sent, list) { list_for_each_entry(tmp_evt, &hostdata->sent, list) {
if (tmp_evt->cmnd == cmd) { if (tmp_evt->cmnd == cmd) {
@@ -839,11 +910,14 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
} }
} }
if (!found_evt) if (!found_evt) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
return FAILED; return FAILED;
}
evt = get_event_struct(&hostdata->pool); evt = get_event_struct(&hostdata->pool);
if (evt == NULL) { if (evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n");
return FAILED; return FAILED;
} }
@@ -867,7 +941,9 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
evt->sync_srp = &srp_rsp; evt->sync_srp = &srp_rsp;
init_completion(&evt->comp); init_completion(&evt->comp);
if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rsp_rc != 0) {
printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
return FAILED; return FAILED;
} }
@@ -901,6 +977,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
* The event is no longer in our list. Make sure it didn't * The event is no longer in our list. Make sure it didn't
* complete while we were aborting * complete while we were aborting
*/ */
spin_lock_irqsave(hostdata->host->host_lock, flags);
found_evt = NULL; found_evt = NULL;
list_for_each_entry(tmp_evt, &hostdata->sent, list) { list_for_each_entry(tmp_evt, &hostdata->sent, list) {
if (tmp_evt->cmnd == cmd) { if (tmp_evt->cmnd == cmd) {
@@ -910,6 +987,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
} }
if (found_evt == NULL) { if (found_evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
printk(KERN_INFO printk(KERN_INFO
"ibmvscsi: aborted task tag 0x%lx completed\n", "ibmvscsi: aborted task tag 0x%lx completed\n",
tsk_mgmt->managed_task_tag); tsk_mgmt->managed_task_tag);
@@ -922,8 +1000,10 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
cmd->result = (DID_ABORT << 16); cmd->result = (DID_ABORT << 16);
list_del(&found_evt->list); list_del(&found_evt->list);
unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev); unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
found_evt->hostdata->dev);
free_event_struct(&found_evt->hostdata->pool, found_evt); free_event_struct(&found_evt->hostdata->pool, found_evt);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
atomic_inc(&hostdata->request_limit); atomic_inc(&hostdata->request_limit);
return SUCCESS; return SUCCESS;
} }
@@ -943,10 +1023,13 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
struct srp_event_struct *tmp_evt, *pos; struct srp_event_struct *tmp_evt, *pos;
union viosrp_iu srp_rsp; union viosrp_iu srp_rsp;
int rsp_rc; int rsp_rc;
unsigned long flags;
u16 lun = lun_from_dev(cmd->device); u16 lun = lun_from_dev(cmd->device);
spin_lock_irqsave(hostdata->host->host_lock, flags);
evt = get_event_struct(&hostdata->pool); evt = get_event_struct(&hostdata->pool);
if (evt == NULL) { if (evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n");
return FAILED; return FAILED;
} }
@@ -969,7 +1052,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
evt->sync_srp = &srp_rsp; evt->sync_srp = &srp_rsp;
init_completion(&evt->comp); init_completion(&evt->comp);
if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { rsp_rc = ibmvscsi_send_srp_event(evt, hostdata);
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
if (rsp_rc != 0) {
printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
return FAILED; return FAILED;
} }
@@ -1002,12 +1087,14 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* We need to find all commands for this LUN that have not yet been /* We need to find all commands for this LUN that have not yet been
* responded to, and fail them with DID_RESET * responded to, and fail them with DID_RESET
*/ */
spin_lock_irqsave(hostdata->host->host_lock, flags);
list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) { if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
if (tmp_evt->cmnd) if (tmp_evt->cmnd)
tmp_evt->cmnd->result = (DID_RESET << 16); tmp_evt->cmnd->result = (DID_RESET << 16);
list_del(&tmp_evt->list); list_del(&tmp_evt->list);
unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt->hostdata->dev); unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
tmp_evt->hostdata->dev);
free_event_struct(&tmp_evt->hostdata->pool, free_event_struct(&tmp_evt->hostdata->pool,
tmp_evt); tmp_evt);
atomic_inc(&hostdata->request_limit); atomic_inc(&hostdata->request_limit);
@@ -1017,6 +1104,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
tmp_evt->done(tmp_evt); tmp_evt->done(tmp_evt);
} }
} }
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
return SUCCESS; return SUCCESS;
} }
@@ -1035,6 +1123,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata)
if (tmp_evt->cmnd) { if (tmp_evt->cmnd) {
tmp_evt->cmnd->result = (DID_ERROR << 16); tmp_evt->cmnd->result = (DID_ERROR << 16);
unmap_cmd_data(&tmp_evt->iu.srp.cmd, unmap_cmd_data(&tmp_evt->iu.srp.cmd,
tmp_evt,
tmp_evt->hostdata->dev); tmp_evt->hostdata->dev);
if (tmp_evt->cmnd_done) if (tmp_evt->cmnd_done)
tmp_evt->cmnd_done(tmp_evt->cmnd); tmp_evt->cmnd_done(tmp_evt->cmnd);
@@ -1339,7 +1428,7 @@ static struct scsi_host_template driver_template = {
.cmd_per_lun = 16, .cmd_per_lun = 16,
.can_queue = 1, /* Updated after SRP_LOGIN */ .can_queue = 1, /* Updated after SRP_LOGIN */
.this_id = -1, .this_id = -1,
.sg_tablesize = MAX_INDIRECT_BUFS, .sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = ibmvscsi_attrs, .shost_attrs = ibmvscsi_attrs,
}; };

View File

@@ -68,6 +68,8 @@ struct srp_event_struct {
void (*cmnd_done) (struct scsi_cmnd *); void (*cmnd_done) (struct scsi_cmnd *);
struct completion comp; struct completion comp;
union viosrp_iu *sync_srp; union viosrp_iu *sync_srp;
struct memory_descriptor *ext_list;
dma_addr_t ext_list_token;
}; };
/* a pool of event structs for use */ /* a pool of event structs for use */

View File

@@ -342,9 +342,6 @@ struct lpfc_hba {
#define VPD_MASK 0xf /* mask for any vpd data */ #define VPD_MASK 0xf /* mask for any vpd data */
struct timer_list els_tmofunc; struct timer_list els_tmofunc;
void *link_stats;
/* /*
* stat counters * stat counters
*/ */
@@ -370,6 +367,8 @@ struct lpfc_hba {
struct list_head freebufList; struct list_head freebufList;
struct list_head ctrspbuflist; struct list_head ctrspbuflist;
struct list_head rnidrspbuflist; struct list_head rnidrspbuflist;
struct fc_host_statistics link_stats;
}; };

View File

@@ -23,6 +23,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h> #include <scsi/scsi_tcq.h>
@@ -988,8 +989,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
{ {
struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
struct lpfc_sli *psli = &phba->sli; struct lpfc_sli *psli = &phba->sli;
struct fc_host_statistics *hs = struct fc_host_statistics *hs = &phba->link_stats;
(struct fc_host_statistics *)phba->link_stats;
LPFC_MBOXQ_t *pmboxq; LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb; MAILBOX_t *pmb;
int rc=0; int rc=0;
@@ -1020,6 +1020,8 @@ lpfc_get_stats(struct Scsi_Host *shost)
return NULL; return NULL;
} }
memset(hs, 0, sizeof (struct fc_host_statistics));
hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256); hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256);
hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;

View File

@@ -27,8 +27,10 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw.h" #include "lpfc_hw.h"
#include "lpfc_sli.h" #include "lpfc_sli.h"

View File

@@ -23,6 +23,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h> #include <scsi/scsi_transport_fc.h>

View File

@@ -24,6 +24,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h> #include <scsi/scsi_transport_fc.h>
@@ -1135,6 +1136,8 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
switch(list) { switch(list) {
case NLP_NO_LIST: /* No list, just remove it */ case NLP_NO_LIST: /* No list, just remove it */
lpfc_nlp_remove(phba, nlp); lpfc_nlp_remove(phba, nlp);
/* as node removed - stop further transport calls */
rport_del = none;
break; break;
case NLP_UNUSED_LIST: case NLP_UNUSED_LIST:
spin_lock_irq(phba->host->host_lock); spin_lock_irq(phba->host->host_lock);

View File

@@ -28,6 +28,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h> #include <scsi/scsi_transport_fc.h>
@@ -1339,14 +1340,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
goto out_disable_device; goto out_disable_device;
host = scsi_host_alloc(&lpfc_template, host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba));
sizeof (struct lpfc_hba) + sizeof (unsigned long));
if (!host) if (!host)
goto out_release_regions; goto out_release_regions;
phba = (struct lpfc_hba*)host->hostdata; phba = (struct lpfc_hba*)host->hostdata;
memset(phba, 0, sizeof (struct lpfc_hba)); memset(phba, 0, sizeof (struct lpfc_hba));
phba->link_stats = (void *)&phba[1];
phba->host = host; phba->host = host;
phba->fc_flag |= FC_LOADING; phba->fc_flag |= FC_LOADING;

View File

@@ -23,6 +23,11 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi.h>
#include "lpfc_hw.h" #include "lpfc_hw.h"
#include "lpfc_sli.h" #include "lpfc_sli.h"
#include "lpfc_disc.h" #include "lpfc_disc.h"

View File

@@ -23,6 +23,11 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi.h>
#include "lpfc_hw.h" #include "lpfc_hw.h"
#include "lpfc_sli.h" #include "lpfc_sli.h"
#include "lpfc_disc.h" #include "lpfc_disc.h"

View File

@@ -23,6 +23,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h> #include <scsi/scsi_transport_fc.h>

View File

@@ -40,11 +40,6 @@
#define LPFC_RESET_WAIT 2 #define LPFC_RESET_WAIT 2
#define LPFC_ABORT_WAIT 2 #define LPFC_ABORT_WAIT 2
static inline void lpfc_put_lun(struct fcp_cmnd *fcmd, unsigned int lun)
{
fcmd->fcpLunLsl = 0;
fcmd->fcpLunMsl = swab16((uint16_t)lun);
}
/* /*
* This routine allocates a scsi buffer, which contains all the necessary * This routine allocates a scsi buffer, which contains all the necessary
@@ -238,6 +233,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
if (datadir == DMA_TO_DEVICE) if (datadir == DMA_TO_DEVICE)
bpl->tus.f.bdeFlags = 0; bpl->tus.f.bdeFlags = 0;
else
bpl->tus.f.bdeFlags = BUFF_USE_RCV;
bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl->tus.w = le32_to_cpu(bpl->tus.w);
num_bde = 1; num_bde = 1;
bpl++; bpl++;
@@ -245,8 +242,11 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
/* /*
* Finish initializing those IOCB fields that are dependent on the * Finish initializing those IOCB fields that are dependent on the
* scsi_cmnd request_buffer * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
* reinitialized since all iocb memory resources are used many times
* for transmit, receive, and continuation bpl's.
*/ */
iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
iocb_cmd->un.fcpi64.bdl.bdeSize += iocb_cmd->un.fcpi64.bdl.bdeSize +=
(num_bde * sizeof (struct ulp_bde64)); (num_bde * sizeof (struct ulp_bde64));
iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpBdeCount = 1;
@@ -445,8 +445,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
int datadir = scsi_cmnd->sc_data_direction; int datadir = scsi_cmnd->sc_data_direction;
lpfc_cmd->fcp_rsp->rspSnsLen = 0; lpfc_cmd->fcp_rsp->rspSnsLen = 0;
/* clear task management bits */
lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun); int_to_scsilun(lpfc_cmd->pCmd->device->lun,
&lpfc_cmd->fcp_cmnd->fcp_lun);
memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16); memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
@@ -545,7 +548,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
piocb = &piocbq->iocb; piocb = &piocbq->iocb;
fcp_cmnd = lpfc_cmd->fcp_cmnd; fcp_cmnd = lpfc_cmd->fcp_cmnd;
lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun); int_to_scsilun(lpfc_cmd->pCmd->device->lun,
&lpfc_cmd->fcp_cmnd->fcp_lun);
fcp_cmnd->fcpCntl2 = task_mgmt_cmd; fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
piocb->ulpCommand = CMD_FCP_ICMND64_CR; piocb->ulpCommand = CMD_FCP_ICMND64_CR;
@@ -746,6 +750,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = ScsiResult(DID_NO_CONNECT, 0); cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
goto out_fail_command; goto out_fail_command;
} }
else if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
goto out_fail_command;
}
/* /*
* The device is most likely recovered and the driver * The device is most likely recovered and the driver
* needs a bit more time to finish. Ask the midlayer * needs a bit more time to finish. Ask the midlayer

View File

@@ -78,18 +78,7 @@ struct fcp_rsp {
}; };
struct fcp_cmnd { struct fcp_cmnd {
uint32_t fcpLunMsl; /* most significant lun word (32 bits) */ struct scsi_lun fcp_lun;
uint32_t fcpLunLsl; /* least significant lun word (32 bits) */
/* # of bits to shift lun id to end up in right
* payload word, little endian = 8, big = 16.
*/
#ifdef __BIG_ENDIAN
#define FC_LUN_SHIFT 16
#define FC_ADDR_MODE_SHIFT 24
#else /* __LITTLE_ENDIAN */
#define FC_LUN_SHIFT 8
#define FC_ADDR_MODE_SHIFT 0
#endif
uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */ uint8_t fcpCntl0; /* FCP_CNTL byte 0 (reserved) */
uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */ uint8_t fcpCntl1; /* FCP_CNTL byte 1 task codes */

View File

@@ -24,9 +24,11 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include "lpfc_hw.h" #include "lpfc_hw.h"
#include "lpfc_sli.h" #include "lpfc_sli.h"

View File

@@ -18,7 +18,7 @@
* included with this package. * * included with this package. *
*******************************************************************/ *******************************************************************/
#define LPFC_DRIVER_VERSION "8.0.29" #define LPFC_DRIVER_VERSION "8.0.30"
#define LPFC_DRIVER_NAME "lpfc" #define LPFC_DRIVER_NAME "lpfc"

View File

@@ -996,7 +996,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
break; break;
case ABORT_DEVICE: case ABORT_DEVICE:
ha->flags.in_reset = 1;
if (qla1280_verbose) if (qla1280_verbose)
printk(KERN_INFO printk(KERN_INFO
"scsi(%ld:%d:%d:%d): Queueing abort device " "scsi(%ld:%d:%d:%d): Queueing abort device "
@@ -1010,7 +1009,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
printk(KERN_INFO printk(KERN_INFO
"scsi(%ld:%d:%d:%d): Queueing device reset " "scsi(%ld:%d:%d:%d): Queueing device reset "
"command.\n", ha->host_no, bus, target, lun); "command.\n", ha->host_no, bus, target, lun);
ha->flags.in_reset = 1;
if (qla1280_device_reset(ha, bus, target) == 0) if (qla1280_device_reset(ha, bus, target) == 0)
result = SUCCESS; result = SUCCESS;
break; break;
@@ -1019,7 +1017,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
if (qla1280_verbose) if (qla1280_verbose)
printk(KERN_INFO "qla1280(%ld:%d): Issuing BUS " printk(KERN_INFO "qla1280(%ld:%d): Issuing BUS "
"DEVICE RESET\n", ha->host_no, bus); "DEVICE RESET\n", ha->host_no, bus);
ha->flags.in_reset = 1;
if (qla1280_bus_reset(ha, bus == 0)) if (qla1280_bus_reset(ha, bus == 0))
result = SUCCESS; result = SUCCESS;
@@ -1047,7 +1044,6 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
if (!list_empty(&ha->done_q)) if (!list_empty(&ha->done_q))
qla1280_done(ha); qla1280_done(ha);
ha->flags.in_reset = 0;
/* If we didn't manage to issue the action, or we have no /* If we didn't manage to issue the action, or we have no
* command to wait for, exit here */ * command to wait for, exit here */
@@ -1269,6 +1265,22 @@ qla1280_biosparam_old(Disk * disk, kdev_t dev, int geom[])
return qla1280_biosparam(disk->device, NULL, disk->capacity, geom); return qla1280_biosparam(disk->device, NULL, disk->capacity, geom);
} }
#endif #endif
/* disable risc and host interrupts */
static inline void
qla1280_disable_intrs(struct scsi_qla_host *ha)
{
WRT_REG_WORD(&ha->iobase->ictrl, 0);
RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
}
/* enable risc and host interrupts */
static inline void
qla1280_enable_intrs(struct scsi_qla_host *ha)
{
WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
}
/************************************************************************** /**************************************************************************
* qla1280_intr_handler * qla1280_intr_handler
@@ -1290,7 +1302,7 @@ qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
ha->isr_count++; ha->isr_count++;
reg = ha->iobase; reg = ha->iobase;
WRT_REG_WORD(&reg->ictrl, 0); /* disable our interrupt. */ qla1280_disable_intrs(ha);
data = qla1280_debounce_register(&reg->istatus); data = qla1280_debounce_register(&reg->istatus);
/* Check for pending interrupts. */ /* Check for pending interrupts. */
@@ -1303,8 +1315,7 @@ qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
spin_unlock(HOST_LOCK); spin_unlock(HOST_LOCK);
/* enable our interrupt. */ qla1280_enable_intrs(ha);
WRT_REG_WORD(&reg->ictrl, (ISP_EN_INT | ISP_EN_RISC));
LEAVE_INTR("qla1280_intr_handler"); LEAVE_INTR("qla1280_intr_handler");
return IRQ_RETVAL(handled); return IRQ_RETVAL(handled);
@@ -1317,7 +1328,7 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
uint8_t mr; uint8_t mr;
uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t mb[MAILBOX_REGISTER_COUNT];
struct nvram *nv; struct nvram *nv;
int status; int status, lun;
nv = &ha->nvram; nv = &ha->nvram;
@@ -1325,24 +1336,38 @@ qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
/* Set Target Parameters. */ /* Set Target Parameters. */
mb[0] = MBC_SET_TARGET_PARAMETERS; mb[0] = MBC_SET_TARGET_PARAMETERS;
mb[1] = (uint16_t) (bus ? target | BIT_7 : target); mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
mb[1] <<= 8; mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
mb[2] = (nv->bus[bus].target[target].parameter.c << 8); mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
if (IS_ISP1x160(ha)) { if (IS_ISP1x160(ha)) {
mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5; mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8) | mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
nv->bus[bus].target[target].sync_period;
mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) | mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width; nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
mr |= BIT_6; mr |= BIT_6;
} else { } else {
mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8) | mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
nv->bus[bus].target[target].sync_period;
} }
mb[3] |= nv->bus[bus].target[target].sync_period;
status = qla1280_mailbox_command(ha, mr, &mb[0]); status = qla1280_mailbox_command(ha, mr, mb);
/* Set Device Queue Parameters. */
for (lun = 0; lun < MAX_LUNS; lun++) {
mb[0] = MBC_SET_DEVICE_QUEUE;
mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
mb[1] |= lun;
mb[2] = nv->bus[bus].max_queue_depth;
mb[3] = nv->bus[bus].target[target].execution_throttle;
status |= qla1280_mailbox_command(ha, 0x0f, mb);
}
if (status) if (status)
printk(KERN_WARNING "scsi(%ld:%i:%i): " printk(KERN_WARNING "scsi(%ld:%i:%i): "
@@ -1389,19 +1414,19 @@ qla1280_slave_configure(struct scsi_device *device)
} }
#if LINUX_VERSION_CODE > 0x020500 #if LINUX_VERSION_CODE > 0x020500
nv->bus[bus].target[target].parameter.f.enable_sync = device->sdtr; nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
nv->bus[bus].target[target].parameter.f.enable_wide = device->wdtr; nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr; nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
#endif #endif
if (driver_setup.no_sync || if (driver_setup.no_sync ||
(driver_setup.sync_mask && (driver_setup.sync_mask &&
(~driver_setup.sync_mask & (1 << target)))) (~driver_setup.sync_mask & (1 << target))))
nv->bus[bus].target[target].parameter.f.enable_sync = 0; nv->bus[bus].target[target].parameter.enable_sync = 0;
if (driver_setup.no_wide || if (driver_setup.no_wide ||
(driver_setup.wide_mask && (driver_setup.wide_mask &&
(~driver_setup.wide_mask & (1 << target)))) (~driver_setup.wide_mask & (1 << target))))
nv->bus[bus].target[target].parameter.f.enable_wide = 0; nv->bus[bus].target[target].parameter.enable_wide = 0;
if (IS_ISP1x160(ha)) { if (IS_ISP1x160(ha)) {
if (driver_setup.no_ppr || if (driver_setup.no_ppr ||
(driver_setup.ppr_mask && (driver_setup.ppr_mask &&
@@ -1410,7 +1435,7 @@ qla1280_slave_configure(struct scsi_device *device)
} }
spin_lock_irqsave(HOST_LOCK, flags); spin_lock_irqsave(HOST_LOCK, flags);
if (nv->bus[bus].target[target].parameter.f.enable_sync) if (nv->bus[bus].target[target].parameter.enable_sync)
status = qla1280_set_target_parameters(ha, bus, target); status = qla1280_set_target_parameters(ha, bus, target);
qla1280_get_target_parameters(ha, device); qla1280_get_target_parameters(ha, device);
spin_unlock_irqrestore(HOST_LOCK, flags); spin_unlock_irqrestore(HOST_LOCK, flags);
@@ -1448,7 +1473,6 @@ qla1280_select_queue_depth(struct Scsi_Host *host, struct scsi_device *sdev_q)
* *
* Input: * Input:
* ha = adapter block pointer. * ha = adapter block pointer.
* done_q = done queue.
*/ */
static void static void
qla1280_done(struct scsi_qla_host *ha) qla1280_done(struct scsi_qla_host *ha)
@@ -1522,7 +1546,7 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
int host_status = DID_ERROR; int host_status = DID_ERROR;
uint16_t comp_status = le16_to_cpu(sts->comp_status); uint16_t comp_status = le16_to_cpu(sts->comp_status);
uint16_t state_flags = le16_to_cpu(sts->state_flags); uint16_t state_flags = le16_to_cpu(sts->state_flags);
uint16_t residual_length = le16_to_cpu(sts->residual_length); uint16_t residual_length = le32_to_cpu(sts->residual_length);
uint16_t scsi_status = le16_to_cpu(sts->scsi_status); uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
#if DEBUG_QLA1280_INTR #if DEBUG_QLA1280_INTR
static char *reason[] = { static char *reason[] = {
@@ -1582,7 +1606,7 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
case CS_DATA_OVERRUN: case CS_DATA_OVERRUN:
dprintk(2, "Data overrun 0x%x\n", residual_length); dprintk(2, "Data overrun 0x%x\n", residual_length);
dprintk(2, "qla1280_isr: response packet data\n"); dprintk(2, "qla1280_return_status: response packet data\n");
qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE); qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
host_status = DID_ERROR; host_status = DID_ERROR;
break; break;
@@ -1617,40 +1641,6 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
/* QLogic ISP1280 Hardware Support Functions. */ /* QLogic ISP1280 Hardware Support Functions. */
/****************************************************************************/ /****************************************************************************/
/*
* qla2100_enable_intrs
* qla2100_disable_intrs
*
* Input:
* ha = adapter block pointer.
*
* Returns:
* None
*/
static inline void
qla1280_enable_intrs(struct scsi_qla_host *ha)
{
struct device_reg __iomem *reg;
reg = ha->iobase;
/* enable risc and host interrupts */
WRT_REG_WORD(&reg->ictrl, (ISP_EN_INT | ISP_EN_RISC));
RD_REG_WORD(&reg->ictrl); /* PCI Posted Write flush */
ha->flags.ints_enabled = 1;
}
static inline void
qla1280_disable_intrs(struct scsi_qla_host *ha)
{
struct device_reg __iomem *reg;
reg = ha->iobase;
/* disable risc and host interrupts */
WRT_REG_WORD(&reg->ictrl, 0);
RD_REG_WORD(&reg->ictrl); /* PCI Posted Write flush */
ha->flags.ints_enabled = 0;
}
/* /*
* qla1280_initialize_adapter * qla1280_initialize_adapter
* Initialize board. * Initialize board.
@@ -1679,7 +1669,6 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
ha->flags.reset_active = 0; ha->flags.reset_active = 0;
ha->flags.abort_isp_active = 0; ha->flags.abort_isp_active = 0;
ha->flags.ints_enabled = 0;
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
if (ia64_platform_is("sn2")) { if (ia64_platform_is("sn2")) {
printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
@@ -1758,69 +1747,6 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha)
return status; return status;
} }
/*
* ISP Firmware Test
* Checks if present version of RISC firmware is older than
* driver firmware.
*
* Input:
* ha = adapter block pointer.
*
* Returns:
* 0 = firmware does not need to be loaded.
*/
static int
qla1280_isp_firmware(struct scsi_qla_host *ha)
{
struct nvram *nv = (struct nvram *) ha->response_ring;
int status = 0; /* dg 2/27 always loads RISC */
uint16_t mb[MAILBOX_REGISTER_COUNT];
ENTER("qla1280_isp_firmware");
dprintk(1, "scsi(%li): Determining if RISC is loaded\n", ha->host_no);
/* Bad NVRAM data, load RISC code. */
if (!ha->nvram_valid) {
ha->flags.disable_risc_code_load = 0;
} else
ha->flags.disable_risc_code_load =
nv->cntr_flags_1.disable_loading_risc_code;
if (ha->flags.disable_risc_code_load) {
dprintk(3, "qla1280_isp_firmware: Telling RISC to verify "
"checksum of loaded BIOS code.\n");
/* Verify checksum of loaded RISC code. */
mb[0] = MBC_VERIFY_CHECKSUM;
/* mb[1] = ql12_risc_code_addr01; */
mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
if (!(status =
qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]))) {
/* Start firmware execution. */
dprintk(3, "qla1280_isp_firmware: Startng F/W "
"execution.\n");
mb[0] = MBC_EXECUTE_FIRMWARE;
/* mb[1] = ql12_risc_code_addr01; */
mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
} else
printk(KERN_INFO "qla1280: RISC checksum failed.\n");
} else {
dprintk(1, "qla1280: NVRAM configured to load RISC load.\n");
status = 1;
}
if (status)
dprintk(2, "qla1280_isp_firmware: **** Load RISC code ****\n");
LEAVE("qla1280_isp_firmware");
return status;
}
/* /*
* Chip diagnostics * Chip diagnostics
* Test chip for proper operation. * Test chip for proper operation.
@@ -2006,7 +1932,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
"%d,%d(0x%x)\n", "%d,%d(0x%x)\n",
risc_code_address, cnt, num, risc_address); risc_code_address, cnt, num, risc_address);
for(i = 0; i < cnt; i++) for(i = 0; i < cnt; i++)
((uint16_t *)ha->request_ring)[i] = ((__le16 *)ha->request_ring)[i] =
cpu_to_le16(risc_code_address[i]); cpu_to_le16(risc_code_address[i]);
mb[0] = MBC_LOAD_RAM; mb[0] = MBC_LOAD_RAM;
@@ -2085,7 +2011,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
mb[1] = *ql1280_board_tbl[ha->devnum].fwstart; mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
if (err) { if (err) {
printk(KERN_ERR "scsi(%li): Failed checksum\n", ha->host_no); printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
return err; return err;
} }
@@ -2105,14 +2031,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
static int static int
qla1280_load_firmware(struct scsi_qla_host *ha) qla1280_load_firmware(struct scsi_qla_host *ha)
{ {
int err = -ENODEV; int err;
/* If firmware needs to be loaded */
if (!qla1280_isp_firmware(ha)) {
printk(KERN_ERR "scsi(%li): isp_firmware() failed!\n",
ha->host_no);
goto out;
}
err = qla1280_chip_diag(ha); err = qla1280_chip_diag(ha);
if (err) if (err)
@@ -2246,17 +2165,17 @@ qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
{ {
struct nvram *nv = &ha->nvram; struct nvram *nv = &ha->nvram;
nv->bus[bus].target[target].parameter.f.renegotiate_on_error = 1; nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
nv->bus[bus].target[target].parameter.f.auto_request_sense = 1; nv->bus[bus].target[target].parameter.auto_request_sense = 1;
nv->bus[bus].target[target].parameter.f.tag_queuing = 1; nv->bus[bus].target[target].parameter.tag_queuing = 1;
nv->bus[bus].target[target].parameter.f.enable_sync = 1; nv->bus[bus].target[target].parameter.enable_sync = 1;
#if 1 /* Some SCSI Processors do not seem to like this */ #if 1 /* Some SCSI Processors do not seem to like this */
nv->bus[bus].target[target].parameter.f.enable_wide = 1; nv->bus[bus].target[target].parameter.enable_wide = 1;
#endif #endif
nv->bus[bus].target[target].parameter.f.parity_checking = 1;
nv->bus[bus].target[target].parameter.f.disconnect_allowed = 1;
nv->bus[bus].target[target].execution_throttle = nv->bus[bus].target[target].execution_throttle =
nv->bus[bus].max_queue_depth - 1; nv->bus[bus].max_queue_depth - 1;
nv->bus[bus].target[target].parameter.parity_checking = 1;
nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
if (IS_ISP1x160(ha)) { if (IS_ISP1x160(ha)) {
nv->bus[bus].target[target].flags.flags1x160.device_enable = 1; nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
@@ -2284,9 +2203,9 @@ qla1280_set_defaults(struct scsi_qla_host *ha)
/* nv->cntr_flags_1.disable_loading_risc_code = 1; */ /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
nv->firmware_feature.f.enable_fast_posting = 1; nv->firmware_feature.f.enable_fast_posting = 1;
nv->firmware_feature.f.disable_synchronous_backoff = 1; nv->firmware_feature.f.disable_synchronous_backoff = 1;
nv->termination.f.scsi_bus_0_control = 3; nv->termination.scsi_bus_0_control = 3;
nv->termination.f.scsi_bus_1_control = 3; nv->termination.scsi_bus_1_control = 3;
nv->termination.f.auto_term_support = 1; nv->termination.auto_term_support = 1;
/* /*
* Set default FIFO magic - What appropriate values would be here * Set default FIFO magic - What appropriate values would be here
@@ -2296,7 +2215,12 @@ qla1280_set_defaults(struct scsi_qla_host *ha)
* header file provided by QLogic seems to be bogus or incomplete * header file provided by QLogic seems to be bogus or incomplete
* at best. * at best.
*/ */
nv->isp_config.c = ISP_CFG1_BENAB|ISP_CFG1_F128; nv->isp_config.burst_enable = 1;
if (IS_ISP1040(ha))
nv->isp_config.fifo_threshold |= 3;
else
nv->isp_config.fifo_threshold |= 4;
if (IS_ISP1x160(ha)) if (IS_ISP1x160(ha))
nv->isp_parameter = 0x01; /* fast memory enable */ nv->isp_parameter = 0x01; /* fast memory enable */
@@ -2327,66 +2251,53 @@ qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
struct nvram *nv = &ha->nvram; struct nvram *nv = &ha->nvram;
uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t mb[MAILBOX_REGISTER_COUNT];
int status, lun; int status, lun;
uint16_t flag;
/* Set Target Parameters. */ /* Set Target Parameters. */
mb[0] = MBC_SET_TARGET_PARAMETERS; mb[0] = MBC_SET_TARGET_PARAMETERS;
mb[1] = (uint16_t) (bus ? target | BIT_7 : target); mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
mb[1] <<= 8;
/* /*
* Do not enable wide, sync, and ppr for the initial * Do not enable sync and ppr for the initial INQUIRY run. We
* INQUIRY run. We enable this later if we determine * enable this later if we determine the target actually
* the target actually supports it. * supports it.
*/ */
nv->bus[bus].target[target].parameter.f. mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
auto_request_sense = 1; | TP_WIDE | TP_PARITY | TP_DISCONNECT);
nv->bus[bus].target[target].parameter.f.
stop_queue_on_check = 0;
if (IS_ISP1x160(ha))
nv->bus[bus].target[target].ppr_1x160.
flags.enable_ppr = 0;
/*
* No sync, wide, etc. while probing
*/
mb[2] = (nv->bus[bus].target[target].parameter.c << 8) &
~(TP_SYNC /*| TP_WIDE | TP_PPR*/);
if (IS_ISP1x160(ha)) if (IS_ISP1x160(ha))
mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8; mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
else else
mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8; mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
mb[3] |= nv->bus[bus].target[target].sync_period; mb[3] |= nv->bus[bus].target[target].sync_period;
status = qla1280_mailbox_command(ha, 0x0f, mb);
status = qla1280_mailbox_command(ha, BIT_3 | BIT_2 | BIT_1 | BIT_0, &mb[0]);
/* Save Tag queuing enable flag. */ /* Save Tag queuing enable flag. */
mb[0] = BIT_0 << target; flag = (BIT_0 << target) & mb[0];
if (nv->bus[bus].target[target].parameter.f.tag_queuing) if (nv->bus[bus].target[target].parameter.tag_queuing)
ha->bus_settings[bus].qtag_enables |= mb[0]; ha->bus_settings[bus].qtag_enables |= flag;
/* Save Device enable flag. */ /* Save Device enable flag. */
if (IS_ISP1x160(ha)) { if (IS_ISP1x160(ha)) {
if (nv->bus[bus].target[target].flags.flags1x160.device_enable) if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
ha->bus_settings[bus].device_enables |= mb[0]; ha->bus_settings[bus].device_enables |= flag;
ha->bus_settings[bus].lun_disables |= 0; ha->bus_settings[bus].lun_disables |= 0;
} else { } else {
if (nv->bus[bus].target[target].flags.flags1x80.device_enable) if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
ha->bus_settings[bus].device_enables |= mb[0]; ha->bus_settings[bus].device_enables |= flag;
/* Save LUN disable flag. */ /* Save LUN disable flag. */
if (nv->bus[bus].target[target].flags.flags1x80.lun_disable) if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
ha->bus_settings[bus].lun_disables |= mb[0]; ha->bus_settings[bus].lun_disables |= flag;
} }
/* Set Device Queue Parameters. */ /* Set Device Queue Parameters. */
for (lun = 0; lun < MAX_LUNS; lun++) { for (lun = 0; lun < MAX_LUNS; lun++) {
mb[0] = MBC_SET_DEVICE_QUEUE; mb[0] = MBC_SET_DEVICE_QUEUE;
mb[1] = (uint16_t)(bus ? target | BIT_7 : target); mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
mb[1] = mb[1] << 8 | lun; mb[1] |= lun;
mb[2] = nv->bus[bus].max_queue_depth; mb[2] = nv->bus[bus].max_queue_depth;
mb[3] = nv->bus[bus].target[target].execution_throttle; mb[3] = nv->bus[bus].target[target].execution_throttle;
status |= qla1280_mailbox_command(ha, 0x0f, &mb[0]); status |= qla1280_mailbox_command(ha, 0x0f, mb);
} }
return status; return status;
@@ -2431,7 +2342,6 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
struct nvram *nv = &ha->nvram; struct nvram *nv = &ha->nvram;
int bus, target, status = 0; int bus, target, status = 0;
uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t mask;
ENTER("qla1280_nvram_config"); ENTER("qla1280_nvram_config");
@@ -2439,7 +2349,7 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
/* Always force AUTO sense for LINUX SCSI */ /* Always force AUTO sense for LINUX SCSI */
for (bus = 0; bus < MAX_BUSES; bus++) for (bus = 0; bus < MAX_BUSES; bus++)
for (target = 0; target < MAX_TARGETS; target++) { for (target = 0; target < MAX_TARGETS; target++) {
nv->bus[bus].target[target].parameter.f. nv->bus[bus].target[target].parameter.
auto_request_sense = 1; auto_request_sense = 1;
} }
} else { } else {
@@ -2457,31 +2367,40 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK; hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
cfg1 = RD_REG_WORD(&reg->cfg_1); cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
cdma_conf = RD_REG_WORD(&reg->cdma_cfg); cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
ddma_conf = RD_REG_WORD(&reg->ddma_cfg); ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
/* Busted fifo, says mjacob. */ /* Busted fifo, says mjacob. */
if (hwrev == ISP_CFG0_1040A) if (hwrev != ISP_CFG0_1040A)
WRT_REG_WORD(&reg->cfg_1, cfg1 | ISP_CFG1_F64); cfg1 |= nv->isp_config.fifo_threshold << 4;
else
WRT_REG_WORD(&reg->cfg_1, cfg1 | ISP_CFG1_F64 | ISP_CFG1_BENAB); cfg1 |= nv->isp_config.burst_enable << 2;
WRT_REG_WORD(&reg->cfg_1, cfg1);
WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB); WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB); WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
} else { } else {
uint16_t cfg1, term;
/* Set ISP hardware DMA burst */ /* Set ISP hardware DMA burst */
mb[0] = nv->isp_config.c; cfg1 = nv->isp_config.fifo_threshold << 4;
cfg1 |= nv->isp_config.burst_enable << 2;
/* Enable DMA arbitration on dual channel controllers */ /* Enable DMA arbitration on dual channel controllers */
if (ha->ports > 1) if (ha->ports > 1)
mb[0] |= BIT_13; cfg1 |= BIT_13;
WRT_REG_WORD(&reg->cfg_1, mb[0]); WRT_REG_WORD(&reg->cfg_1, cfg1);
/* Set SCSI termination. */ /* Set SCSI termination. */
WRT_REG_WORD(&reg->gpio_enable, (BIT_3 + BIT_2 + BIT_1 + BIT_0)); WRT_REG_WORD(&reg->gpio_enable,
mb[0] = nv->termination.c & (BIT_3 + BIT_2 + BIT_1 + BIT_0); BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
WRT_REG_WORD(&reg->gpio_data, mb[0]); term = nv->termination.scsi_bus_1_control;
term |= nv->termination.scsi_bus_0_control << 2;
term |= nv->termination.auto_term_support << 7;
RD_REG_WORD(&reg->id_l); /* Flush PCI write */
WRT_REG_WORD(&reg->gpio_data, term);
} }
RD_REG_WORD(&reg->id_l); /* Flush PCI write */
/* ISP parameter word. */ /* ISP parameter word. */
mb[0] = MBC_SET_SYSTEM_PARAMETER; mb[0] = MBC_SET_SYSTEM_PARAMETER;
@@ -2497,16 +2416,17 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
/* Firmware feature word. */ /* Firmware feature word. */
mb[0] = MBC_SET_FIRMWARE_FEATURES; mb[0] = MBC_SET_FIRMWARE_FEATURES;
mask = BIT_5 | BIT_1 | BIT_0; mb[1] = nv->firmware_feature.f.enable_fast_posting;
mb[1] = le16_to_cpu(nv->firmware_feature.w) & (mask); mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2) #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
if (ia64_platform_is("sn2")) { if (ia64_platform_is("sn2")) {
printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
"workaround\n", ha->host_no); "workaround\n", ha->host_no);
mb[1] |= BIT_9; mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
} }
#endif #endif
status |= qla1280_mailbox_command(ha, mask, &mb[0]); status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
/* Retry count and delay. */ /* Retry count and delay. */
mb[0] = MBC_SET_RETRY_COUNT; mb[0] = MBC_SET_RETRY_COUNT;
@@ -2535,27 +2455,27 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
mb[2] |= BIT_5; mb[2] |= BIT_5;
if (nv->bus[1].config_2.data_line_active_negation) if (nv->bus[1].config_2.data_line_active_negation)
mb[2] |= BIT_4; mb[2] |= BIT_4;
status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY; mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */ mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
/* thingy */ /* thingy */
mb[0] = MBC_SET_PCI_CONTROL; mb[0] = MBC_SET_PCI_CONTROL;
mb[1] = 2; /* Data DMA Channel Burst Enable */ mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
mb[2] = 2; /* Command DMA Channel Burst Enable */ mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
mb[0] = MBC_SET_TAG_AGE_LIMIT; mb[0] = MBC_SET_TAG_AGE_LIMIT;
mb[1] = 8; mb[1] = 8;
status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
/* Selection timeout. */ /* Selection timeout. */
mb[0] = MBC_SET_SELECTION_TIMEOUT; mb[0] = MBC_SET_SELECTION_TIMEOUT;
mb[1] = nv->bus[0].selection_timeout; mb[1] = nv->bus[0].selection_timeout;
mb[2] = nv->bus[1].selection_timeout; mb[2] = nv->bus[1].selection_timeout;
status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
for (bus = 0; bus < ha->ports; bus++) for (bus = 0; bus < ha->ports; bus++)
status |= qla1280_config_bus(ha, bus); status |= qla1280_config_bus(ha, bus);
@@ -3066,7 +2986,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
struct scsi_cmnd *cmd = sp->cmd; struct scsi_cmnd *cmd = sp->cmd;
cmd_a64_entry_t *pkt; cmd_a64_entry_t *pkt;
struct scatterlist *sg = NULL; struct scatterlist *sg = NULL;
u32 *dword_ptr; __le32 *dword_ptr;
dma_addr_t dma_handle; dma_addr_t dma_handle;
int status = 0; int status = 0;
int cnt; int cnt;
@@ -3104,10 +3024,13 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
} }
dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
ha->req_q_cnt, seg_cnt);
/* If room for request in request ring. */ /* If room for request in request ring. */
if ((req_cnt + 2) >= ha->req_q_cnt) { if ((req_cnt + 2) >= ha->req_q_cnt) {
status = 1; status = 1;
dprintk(2, "qla1280_64bit_start_scsi: in-ptr=0x%x req_q_cnt=" dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
"0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
req_cnt); req_cnt);
goto out; goto out;
@@ -3119,7 +3042,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
if (cnt >= MAX_OUTSTANDING_COMMANDS) { if (cnt >= MAX_OUTSTANDING_COMMANDS) {
status = 1; status = 1;
dprintk(2, "qla1280_64bit_start_scsi: NO ROOM IN " dprintk(2, "qla1280_start_scsi: NO ROOM IN "
"OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
goto out; goto out;
} }
@@ -3128,7 +3051,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
ha->req_q_cnt -= req_cnt; ha->req_q_cnt -= req_cnt;
CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1); CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
dprintk(2, "64bit_start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp, dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd)); cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
dprintk(2, " bus %i, target %i, lun %i\n", dprintk(2, " bus %i, target %i, lun %i\n",
SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
@@ -3350,7 +3273,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
struct scsi_cmnd *cmd = sp->cmd; struct scsi_cmnd *cmd = sp->cmd;
struct cmd_entry *pkt; struct cmd_entry *pkt;
struct scatterlist *sg = NULL; struct scatterlist *sg = NULL;
uint32_t *dword_ptr; __le32 *dword_ptr;
int status = 0; int status = 0;
int cnt; int cnt;
int req_cnt; int req_cnt;
@@ -3993,21 +3916,21 @@ qla1280_get_target_options(struct scsi_cmnd *cmd, struct scsi_qla_host *ha)
result = cmd->request_buffer; result = cmd->request_buffer;
n = &ha->nvram; n = &ha->nvram;
n->bus[bus].target[target].parameter.f.enable_wide = 0; n->bus[bus].target[target].parameter.enable_wide = 0;
n->bus[bus].target[target].parameter.f.enable_sync = 0; n->bus[bus].target[target].parameter.enable_sync = 0;
n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
if (result[7] & 0x60) if (result[7] & 0x60)
n->bus[bus].target[target].parameter.f.enable_wide = 1; n->bus[bus].target[target].parameter.enable_wide = 1;
if (result[7] & 0x10) if (result[7] & 0x10)
n->bus[bus].target[target].parameter.f.enable_sync = 1; n->bus[bus].target[target].parameter.enable_sync = 1;
if ((result[2] >= 3) && (result[4] + 5 > 56) && if ((result[2] >= 3) && (result[4] + 5 > 56) &&
(result[56] & 0x4)) (result[56] & 0x4))
n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; n->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
dprintk(2, "get_target_options(): wide %i, sync %i, ppr %i\n", dprintk(2, "get_target_options(): wide %i, sync %i, ppr %i\n",
n->bus[bus].target[target].parameter.f.enable_wide, n->bus[bus].target[target].parameter.enable_wide,
n->bus[bus].target[target].parameter.f.enable_sync, n->bus[bus].target[target].parameter.enable_sync,
n->bus[bus].target[target].ppr_1x160.flags.enable_ppr); n->bus[bus].target[target].ppr_1x160.flags.enable_ppr);
} }
#endif #endif
@@ -4071,7 +3994,7 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
/* Save ISP completion status */ /* Save ISP completion status */
CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd); CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
if (scsi_status & SS_CHECK_CONDITION) { if (scsi_status & SAM_STAT_CHECK_CONDITION) {
if (comp_status != CS_ARS_FAILED) { if (comp_status != CS_ARS_FAILED) {
uint16_t req_sense_length = uint16_t req_sense_length =
le16_to_cpu(pkt->req_sense_length); le16_to_cpu(pkt->req_sense_length);
@@ -4650,7 +4573,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) { if (pci_set_dma_mask(ha->pdev, (dma_addr_t) ~ 0ULL)) {
if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { if (pci_set_dma_mask(ha->pdev, 0xffffffff)) {
printk(KERN_WARNING "scsi(%li): Unable to set a " printk(KERN_WARNING "scsi(%li): Unable to set a "
" suitable DMA mask - aboring\n", ha->host_no); "suitable DMA mask - aborting\n", ha->host_no);
error = -ENODEV; error = -ENODEV;
goto error_free_irq; goto error_free_irq;
} }
@@ -4660,14 +4583,14 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
#else #else
if (pci_set_dma_mask(ha->pdev, 0xffffffff)) { if (pci_set_dma_mask(ha->pdev, 0xffffffff)) {
printk(KERN_WARNING "scsi(%li): Unable to set a " printk(KERN_WARNING "scsi(%li): Unable to set a "
" suitable DMA mask - aboring\n", ha->host_no); "suitable DMA mask - aborting\n", ha->host_no);
error = -ENODEV; error = -ENODEV;
goto error_free_irq; goto error_free_irq;
} }
#endif #endif
ha->request_ring = pci_alloc_consistent(ha->pdev, ha->request_ring = pci_alloc_consistent(ha->pdev,
((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
&ha->request_dma); &ha->request_dma);
if (!ha->request_ring) { if (!ha->request_ring) {
printk(KERN_INFO "qla1280: Failed to get request memory\n"); printk(KERN_INFO "qla1280: Failed to get request memory\n");
@@ -4675,7 +4598,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
} }
ha->response_ring = pci_alloc_consistent(ha->pdev, ha->response_ring = pci_alloc_consistent(ha->pdev,
((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
&ha->response_dma); &ha->response_dma);
if (!ha->response_ring) { if (!ha->response_ring) {
printk(KERN_INFO "qla1280: Failed to get response memory\n"); printk(KERN_INFO "qla1280: Failed to get response memory\n");
@@ -4758,7 +4681,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
#if LINUX_VERSION_CODE >= 0x020600 #if LINUX_VERSION_CODE >= 0x020600
error_disable_adapter: error_disable_adapter:
WRT_REG_WORD(&ha->iobase->ictrl, 0); qla1280_disable_intrs(ha);
#endif #endif
error_free_irq: error_free_irq:
free_irq(pdev->irq, ha); free_irq(pdev->irq, ha);
@@ -4770,11 +4693,11 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
#endif #endif
error_free_response_ring: error_free_response_ring:
pci_free_consistent(ha->pdev, pci_free_consistent(ha->pdev,
((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
ha->response_ring, ha->response_dma); ha->response_ring, ha->response_dma);
error_free_request_ring: error_free_request_ring:
pci_free_consistent(ha->pdev, pci_free_consistent(ha->pdev,
((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
ha->request_ring, ha->request_dma); ha->request_ring, ha->request_dma);
error_put_host: error_put_host:
scsi_host_put(host); scsi_host_put(host);
@@ -4795,7 +4718,7 @@ qla1280_remove_one(struct pci_dev *pdev)
scsi_remove_host(host); scsi_remove_host(host);
#endif #endif
WRT_REG_WORD(&ha->iobase->ictrl, 0); qla1280_disable_intrs(ha);
free_irq(pdev->irq, ha); free_irq(pdev->irq, ha);

View File

@@ -94,9 +94,6 @@
#define REQUEST_ENTRY_CNT 256 /* Number of request entries. */ #define REQUEST_ENTRY_CNT 256 /* Number of request entries. */
#define RESPONSE_ENTRY_CNT 16 /* Number of response entries. */ #define RESPONSE_ENTRY_CNT 16 /* Number of response entries. */
/* Number of segments 1 - 65535 */
#define SG_SEGMENTS 32 /* Cmd entry + 6 continuations */
/* /*
* SCSI Request Block structure (sp) that is placed * SCSI Request Block structure (sp) that is placed
* on cmd->SCp location of every I/O * on cmd->SCp location of every I/O
@@ -378,29 +375,23 @@ struct nvram {
uint16_t unused_12; /* 12, 13 */ uint16_t unused_12; /* 12, 13 */
uint16_t unused_14; /* 14, 15 */ uint16_t unused_14; /* 14, 15 */
union { struct {
uint8_t c; uint8_t reserved:2;
struct { uint8_t burst_enable:1;
uint8_t reserved:2; uint8_t reserved_1:1;
uint8_t burst_enable:1; uint8_t fifo_threshold:4;
uint8_t reserved_1:1;
uint8_t fifo_threshold:4;
} f;
} isp_config; /* 16 */ } isp_config; /* 16 */
/* Termination /* Termination
* 0 = Disable, 1 = high only, 3 = Auto term * 0 = Disable, 1 = high only, 3 = Auto term
*/ */
union { struct {
uint8_t c; uint8_t scsi_bus_1_control:2;
struct { uint8_t scsi_bus_0_control:2;
uint8_t scsi_bus_1_control:2; uint8_t unused_0:1;
uint8_t scsi_bus_0_control:2; uint8_t unused_1:1;
uint8_t unused_0:1; uint8_t unused_2:1;
uint8_t unused_1:1; uint8_t auto_term_support:1;
uint8_t unused_2:1;
uint8_t auto_term_support:1;
} f;
} termination; /* 17 */ } termination; /* 17 */
uint16_t isp_parameter; /* 18, 19 */ uint16_t isp_parameter; /* 18, 19 */
@@ -460,18 +451,15 @@ struct nvram {
uint16_t unused_38; /* 38, 39 */ uint16_t unused_38; /* 38, 39 */
struct { struct {
union { struct {
uint8_t c; uint8_t renegotiate_on_error:1;
struct { uint8_t stop_queue_on_check:1;
uint8_t renegotiate_on_error:1; uint8_t auto_request_sense:1;
uint8_t stop_queue_on_check:1; uint8_t tag_queuing:1;
uint8_t auto_request_sense:1; uint8_t enable_sync:1;
uint8_t tag_queuing:1; uint8_t enable_wide:1;
uint8_t enable_sync:1; uint8_t parity_checking:1;
uint8_t enable_wide:1; uint8_t disconnect_allowed:1;
uint8_t parity_checking:1;
uint8_t disconnect_allowed:1;
} f;
} parameter; /* 40 */ } parameter; /* 40 */
uint8_t execution_throttle; /* 41 */ uint8_t execution_throttle; /* 41 */
@@ -528,23 +516,23 @@ struct cmd_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */ uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */ __le32 handle; /* System handle. */
uint8_t lun; /* SCSI LUN */ uint8_t lun; /* SCSI LUN */
uint8_t target; /* SCSI ID */ uint8_t target; /* SCSI ID */
uint16_t cdb_len; /* SCSI command length. */ __le16 cdb_len; /* SCSI command length. */
uint16_t control_flags; /* Control flags. */ __le16 control_flags; /* Control flags. */
uint16_t reserved; __le16 reserved;
uint16_t timeout; /* Command timeout. */ __le16 timeout; /* Command timeout. */
uint16_t dseg_count; /* Data segment count. */ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
uint32_t dseg_0_address; /* Data segment 0 address. */ __le32 dseg_0_address; /* Data segment 0 address. */
uint32_t dseg_0_length; /* Data segment 0 length. */ __le32 dseg_0_length; /* Data segment 0 length. */
uint32_t dseg_1_address; /* Data segment 1 address. */ __le32 dseg_1_address; /* Data segment 1 address. */
uint32_t dseg_1_length; /* Data segment 1 length. */ __le32 dseg_1_length; /* Data segment 1 length. */
uint32_t dseg_2_address; /* Data segment 2 address. */ __le32 dseg_2_address; /* Data segment 2 address. */
uint32_t dseg_2_length; /* Data segment 2 length. */ __le32 dseg_2_length; /* Data segment 2 length. */
uint32_t dseg_3_address; /* Data segment 3 address. */ __le32 dseg_3_address; /* Data segment 3 address. */
uint32_t dseg_3_length; /* Data segment 3 length. */ __le32 dseg_3_length; /* Data segment 3 length. */
}; };
/* /*
@@ -556,21 +544,21 @@ struct cont_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */ uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t reserved; /* Reserved */ __le32 reserved; /* Reserved */
uint32_t dseg_0_address; /* Data segment 0 address. */ __le32 dseg_0_address; /* Data segment 0 address. */
uint32_t dseg_0_length; /* Data segment 0 length. */ __le32 dseg_0_length; /* Data segment 0 length. */
uint32_t dseg_1_address; /* Data segment 1 address. */ __le32 dseg_1_address; /* Data segment 1 address. */
uint32_t dseg_1_length; /* Data segment 1 length. */ __le32 dseg_1_length; /* Data segment 1 length. */
uint32_t dseg_2_address; /* Data segment 2 address. */ __le32 dseg_2_address; /* Data segment 2 address. */
uint32_t dseg_2_length; /* Data segment 2 length. */ __le32 dseg_2_length; /* Data segment 2 length. */
uint32_t dseg_3_address; /* Data segment 3 address. */ __le32 dseg_3_address; /* Data segment 3 address. */
uint32_t dseg_3_length; /* Data segment 3 length. */ __le32 dseg_3_length; /* Data segment 3 length. */
uint32_t dseg_4_address; /* Data segment 4 address. */ __le32 dseg_4_address; /* Data segment 4 address. */
uint32_t dseg_4_length; /* Data segment 4 length. */ __le32 dseg_4_length; /* Data segment 4 length. */
uint32_t dseg_5_address; /* Data segment 5 address. */ __le32 dseg_5_address; /* Data segment 5 address. */
uint32_t dseg_5_length; /* Data segment 5 length. */ __le32 dseg_5_length; /* Data segment 5 length. */
uint32_t dseg_6_address; /* Data segment 6 address. */ __le32 dseg_6_address; /* Data segment 6 address. */
uint32_t dseg_6_length; /* Data segment 6 length. */ __le32 dseg_6_length; /* Data segment 6 length. */
}; };
/* /*
@@ -586,22 +574,22 @@ struct response {
#define RF_FULL BIT_1 /* Full */ #define RF_FULL BIT_1 /* Full */
#define RF_BAD_HEADER BIT_2 /* Bad header. */ #define RF_BAD_HEADER BIT_2 /* Bad header. */
#define RF_BAD_PAYLOAD BIT_3 /* Bad payload. */ #define RF_BAD_PAYLOAD BIT_3 /* Bad payload. */
uint32_t handle; /* System handle. */ __le32 handle; /* System handle. */
uint16_t scsi_status; /* SCSI status. */ __le16 scsi_status; /* SCSI status. */
uint16_t comp_status; /* Completion status. */ __le16 comp_status; /* Completion status. */
uint16_t state_flags; /* State flags. */ __le16 state_flags; /* State flags. */
#define SF_TRANSFER_CMPL BIT_14 /* Transfer Complete. */ #define SF_TRANSFER_CMPL BIT_14 /* Transfer Complete. */
#define SF_GOT_SENSE BIT_13 /* Got Sense */ #define SF_GOT_SENSE BIT_13 /* Got Sense */
#define SF_GOT_STATUS BIT_12 /* Got Status */ #define SF_GOT_STATUS BIT_12 /* Got Status */
#define SF_TRANSFERRED_DATA BIT_11 /* Transferred data */ #define SF_TRANSFERRED_DATA BIT_11 /* Transferred data */
#define SF_SENT_CDB BIT_10 /* Send CDB */ #define SF_SENT_CDB BIT_10 /* Send CDB */
#define SF_GOT_TARGET BIT_9 /* */ #define SF_GOT_TARGET BIT_9 /* */
#define SF_GOT_BUS BIT_8 /* */ #define SF_GOT_BUS BIT_8 /* */
uint16_t status_flags; /* Status flags. */ __le16 status_flags; /* Status flags. */
uint16_t time; /* Time. */ __le16 time; /* Time. */
uint16_t req_sense_length; /* Request sense data length. */ __le16 req_sense_length;/* Request sense data length. */
uint32_t residual_length; /* Residual transfer length. */ __le32 residual_length; /* Residual transfer length. */
uint16_t reserved[4]; __le16 reserved[4];
uint8_t req_sense_data[32]; /* Request sense data. */ uint8_t req_sense_data[32]; /* Request sense data. */
}; };
@@ -614,7 +602,7 @@ struct mrk_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */ uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t reserved; __le32 reserved;
uint8_t lun; /* SCSI LUN */ uint8_t lun; /* SCSI LUN */
uint8_t target; /* SCSI ID */ uint8_t target; /* SCSI ID */
uint8_t modifier; /* Modifier (7-0). */ uint8_t modifier; /* Modifier (7-0). */
@@ -638,11 +626,11 @@ struct ecmd_entry {
uint32_t handle; /* System handle. */ uint32_t handle; /* System handle. */
uint8_t lun; /* SCSI LUN */ uint8_t lun; /* SCSI LUN */
uint8_t target; /* SCSI ID */ uint8_t target; /* SCSI ID */
uint16_t cdb_len; /* SCSI command length. */ __le16 cdb_len; /* SCSI command length. */
uint16_t control_flags; /* Control flags. */ __le16 control_flags; /* Control flags. */
uint16_t reserved; __le16 reserved;
uint16_t timeout; /* Command timeout. */ __le16 timeout; /* Command timeout. */
uint16_t dseg_count; /* Data segment count. */ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[88]; /* SCSI command words. */ uint8_t scsi_cdb[88]; /* SCSI command words. */
}; };
@@ -655,20 +643,20 @@ typedef struct {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */ uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t handle; /* System handle. */ __le32 handle; /* System handle. */
uint8_t lun; /* SCSI LUN */ uint8_t lun; /* SCSI LUN */
uint8_t target; /* SCSI ID */ uint8_t target; /* SCSI ID */
uint16_t cdb_len; /* SCSI command length. */ __le16 cdb_len; /* SCSI command length. */
uint16_t control_flags; /* Control flags. */ __le16 control_flags; /* Control flags. */
uint16_t reserved; __le16 reserved;
uint16_t timeout; /* Command timeout. */ __le16 timeout; /* Command timeout. */
uint16_t dseg_count; /* Data segment count. */ __le16 dseg_count; /* Data segment count. */
uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */
uint32_t reserved_1[2]; /* unused */ __le32 reserved_1[2]; /* unused */
uint32_t dseg_0_address[2]; /* Data segment 0 address. */ __le32 dseg_0_address[2]; /* Data segment 0 address. */
uint32_t dseg_0_length; /* Data segment 0 length. */ __le32 dseg_0_length; /* Data segment 0 length. */
uint32_t dseg_1_address[2]; /* Data segment 1 address. */ __le32 dseg_1_address[2]; /* Data segment 1 address. */
uint32_t dseg_1_length; /* Data segment 1 length. */ __le32 dseg_1_length; /* Data segment 1 length. */
} cmd_a64_entry_t, request_t; } cmd_a64_entry_t, request_t;
/* /*
@@ -680,16 +668,16 @@ struct cont_a64_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */ uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t dseg_0_address[2]; /* Data segment 0 address. */ __le32 dseg_0_address[2]; /* Data segment 0 address. */
uint32_t dseg_0_length; /* Data segment 0 length. */ __le32 dseg_0_length; /* Data segment 0 length. */
uint32_t dseg_1_address[2]; /* Data segment 1 address. */ __le32 dseg_1_address[2]; /* Data segment 1 address. */
uint32_t dseg_1_length; /* Data segment 1 length. */ __le32 dseg_1_length; /* Data segment 1 length. */
uint32_t dseg_2_address[2]; /* Data segment 2 address. */ __le32 dseg_2_address[2]; /* Data segment 2 address. */
uint32_t dseg_2_length; /* Data segment 2 length. */ __le32 dseg_2_length; /* Data segment 2 length. */
uint32_t dseg_3_address[2]; /* Data segment 3 address. */ __le32 dseg_3_address[2]; /* Data segment 3 address. */
uint32_t dseg_3_length; /* Data segment 3 length. */ __le32 dseg_3_length; /* Data segment 3 length. */
uint32_t dseg_4_address[2]; /* Data segment 4 address. */ __le32 dseg_4_address[2]; /* Data segment 4 address. */
uint32_t dseg_4_length; /* Data segment 4 length. */ __le32 dseg_4_length; /* Data segment 4 length. */
}; };
/* /*
@@ -701,10 +689,10 @@ struct elun_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t reserved_1; uint8_t reserved_1;
uint8_t entry_status; /* Entry Status not used. */ uint8_t entry_status; /* Entry Status not used. */
uint32_t reserved_2; __le32 reserved_2;
uint16_t lun; /* Bit 15 is bus number. */ __le16 lun; /* Bit 15 is bus number. */
uint16_t reserved_4; __le16 reserved_4;
uint32_t option_flags; __le32 option_flags;
uint8_t status; uint8_t status;
uint8_t reserved_5; uint8_t reserved_5;
uint8_t command_count; /* Number of ATIOs allocated. */ uint8_t command_count; /* Number of ATIOs allocated. */
@@ -714,8 +702,8 @@ struct elun_entry {
/* commands (2-26). */ /* commands (2-26). */
uint8_t group_7_length; /* SCSI CDB length for group 7 */ uint8_t group_7_length; /* SCSI CDB length for group 7 */
/* commands (2-26). */ /* commands (2-26). */
uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
uint16_t reserved_6[20]; __le16 reserved_6[20];
}; };
/* /*
@@ -729,20 +717,20 @@ struct modify_lun_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t reserved_1; uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t reserved_2; __le32 reserved_2;
uint8_t lun; /* SCSI LUN */ uint8_t lun; /* SCSI LUN */
uint8_t reserved_3; uint8_t reserved_3;
uint8_t operators; uint8_t operators;
uint8_t reserved_4; uint8_t reserved_4;
uint32_t option_flags; __le32 option_flags;
uint8_t status; uint8_t status;
uint8_t reserved_5; uint8_t reserved_5;
uint8_t command_count; /* Number of ATIOs allocated. */ uint8_t command_count; /* Number of ATIOs allocated. */
uint8_t immed_notify_count; /* Number of Immediate Notify */ uint8_t immed_notify_count; /* Number of Immediate Notify */
/* entries allocated. */ /* entries allocated. */
uint16_t reserved_6; __le16 reserved_6;
uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
uint16_t reserved_7[20]; __le16 reserved_7[20];
}; };
/* /*
@@ -754,20 +742,20 @@ struct notify_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t reserved_1; uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t reserved_2; __le32 reserved_2;
uint8_t lun; uint8_t lun;
uint8_t initiator_id; uint8_t initiator_id;
uint8_t reserved_3; uint8_t reserved_3;
uint8_t target_id; uint8_t target_id;
uint32_t option_flags; __le32 option_flags;
uint8_t status; uint8_t status;
uint8_t reserved_4; uint8_t reserved_4;
uint8_t tag_value; /* Received queue tag message value */ uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */ uint8_t tag_type; /* Received queue tag message type */
/* entries allocated. */ /* entries allocated. */
uint16_t seq_id; __le16 seq_id;
uint8_t scsi_msg[8]; /* SCSI message not handled by ISP */ uint8_t scsi_msg[8]; /* SCSI message not handled by ISP */
uint16_t reserved_5[8]; __le16 reserved_5[8];
uint8_t sense_data[18]; uint8_t sense_data[18];
}; };
@@ -780,16 +768,16 @@ struct nack_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t reserved_1; uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t reserved_2; __le32 reserved_2;
uint8_t lun; uint8_t lun;
uint8_t initiator_id; uint8_t initiator_id;
uint8_t reserved_3; uint8_t reserved_3;
uint8_t target_id; uint8_t target_id;
uint32_t option_flags; __le32 option_flags;
uint8_t status; uint8_t status;
uint8_t event; uint8_t event;
uint16_t seq_id; __le16 seq_id;
uint16_t reserved_4[22]; __le16 reserved_4[22];
}; };
/* /*
@@ -801,12 +789,12 @@ struct atio_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t reserved_1; uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t reserved_2; __le32 reserved_2;
uint8_t lun; uint8_t lun;
uint8_t initiator_id; uint8_t initiator_id;
uint8_t cdb_len; uint8_t cdb_len;
uint8_t target_id; uint8_t target_id;
uint32_t option_flags; __le32 option_flags;
uint8_t status; uint8_t status;
uint8_t scsi_status; uint8_t scsi_status;
uint8_t tag_value; /* Received queue tag message value */ uint8_t tag_value; /* Received queue tag message value */
@@ -824,28 +812,28 @@ struct ctio_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t reserved_1; uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t reserved_2; __le32 reserved_2;
uint8_t lun; /* SCSI LUN */ uint8_t lun; /* SCSI LUN */
uint8_t initiator_id; uint8_t initiator_id;
uint8_t reserved_3; uint8_t reserved_3;
uint8_t target_id; uint8_t target_id;
uint32_t option_flags; __le32 option_flags;
uint8_t status; uint8_t status;
uint8_t scsi_status; uint8_t scsi_status;
uint8_t tag_value; /* Received queue tag message value */ uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */ uint8_t tag_type; /* Received queue tag message type */
uint32_t transfer_length; __le32 transfer_length;
uint32_t residual; __le32 residual;
uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
uint16_t dseg_count; /* Data segment count. */ __le16 dseg_count; /* Data segment count. */
uint32_t dseg_0_address; /* Data segment 0 address. */ __le32 dseg_0_address; /* Data segment 0 address. */
uint32_t dseg_0_length; /* Data segment 0 length. */ __le32 dseg_0_length; /* Data segment 0 length. */
uint32_t dseg_1_address; /* Data segment 1 address. */ __le32 dseg_1_address; /* Data segment 1 address. */
uint32_t dseg_1_length; /* Data segment 1 length. */ __le32 dseg_1_length; /* Data segment 1 length. */
uint32_t dseg_2_address; /* Data segment 2 address. */ __le32 dseg_2_address; /* Data segment 2 address. */
uint32_t dseg_2_length; /* Data segment 2 length. */ __le32 dseg_2_length; /* Data segment 2 length. */
uint32_t dseg_3_address; /* Data segment 3 address. */ __le32 dseg_3_address; /* Data segment 3 address. */
uint32_t dseg_3_length; /* Data segment 3 length. */ __le32 dseg_3_length; /* Data segment 3 length. */
}; };
/* /*
@@ -857,24 +845,24 @@ struct ctio_ret_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t reserved_1; uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t reserved_2; __le32 reserved_2;
uint8_t lun; /* SCSI LUN */ uint8_t lun; /* SCSI LUN */
uint8_t initiator_id; uint8_t initiator_id;
uint8_t reserved_3; uint8_t reserved_3;
uint8_t target_id; uint8_t target_id;
uint32_t option_flags; __le32 option_flags;
uint8_t status; uint8_t status;
uint8_t scsi_status; uint8_t scsi_status;
uint8_t tag_value; /* Received queue tag message value */ uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */ uint8_t tag_type; /* Received queue tag message type */
uint32_t transfer_length; __le32 transfer_length;
uint32_t residual; __le32 residual;
uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
uint16_t dseg_count; /* Data segment count. */ __le16 dseg_count; /* Data segment count. */
uint32_t dseg_0_address; /* Data segment 0 address. */ __le32 dseg_0_address; /* Data segment 0 address. */
uint32_t dseg_0_length; /* Data segment 0 length. */ __le32 dseg_0_length; /* Data segment 0 length. */
uint32_t dseg_1_address; /* Data segment 1 address. */ __le32 dseg_1_address; /* Data segment 1 address. */
uint16_t dseg_1_length; /* Data segment 1 length. */ __le16 dseg_1_length; /* Data segment 1 length. */
uint8_t sense_data[18]; uint8_t sense_data[18];
}; };
@@ -887,25 +875,25 @@ struct ctio_a64_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t reserved_1; uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t reserved_2; __le32 reserved_2;
uint8_t lun; /* SCSI LUN */ uint8_t lun; /* SCSI LUN */
uint8_t initiator_id; uint8_t initiator_id;
uint8_t reserved_3; uint8_t reserved_3;
uint8_t target_id; uint8_t target_id;
uint32_t option_flags; __le32 option_flags;
uint8_t status; uint8_t status;
uint8_t scsi_status; uint8_t scsi_status;
uint8_t tag_value; /* Received queue tag message value */ uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */ uint8_t tag_type; /* Received queue tag message type */
uint32_t transfer_length; __le32 transfer_length;
uint32_t residual; __le32 residual;
uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
uint16_t dseg_count; /* Data segment count. */ __le16 dseg_count; /* Data segment count. */
uint32_t reserved_4[2]; __le32 reserved_4[2];
uint32_t dseg_0_address[2]; /* Data segment 0 address. */ __le32 dseg_0_address[2];/* Data segment 0 address. */
uint32_t dseg_0_length; /* Data segment 0 length. */ __le32 dseg_0_length; /* Data segment 0 length. */
uint32_t dseg_1_address[2]; /* Data segment 1 address. */ __le32 dseg_1_address[2];/* Data segment 1 address. */
uint32_t dseg_1_length; /* Data segment 1 length. */ __le32 dseg_1_length; /* Data segment 1 length. */
}; };
/* /*
@@ -917,21 +905,21 @@ struct ctio_a64_ret_entry {
uint8_t entry_count; /* Entry count. */ uint8_t entry_count; /* Entry count. */
uint8_t reserved_1; uint8_t reserved_1;
uint8_t entry_status; /* Entry Status. */ uint8_t entry_status; /* Entry Status. */
uint32_t reserved_2; __le32 reserved_2;
uint8_t lun; /* SCSI LUN */ uint8_t lun; /* SCSI LUN */
uint8_t initiator_id; uint8_t initiator_id;
uint8_t reserved_3; uint8_t reserved_3;
uint8_t target_id; uint8_t target_id;
uint32_t option_flags; __le32 option_flags;
uint8_t status; uint8_t status;
uint8_t scsi_status; uint8_t scsi_status;
uint8_t tag_value; /* Received queue tag message value */ uint8_t tag_value; /* Received queue tag message value */
uint8_t tag_type; /* Received queue tag message type */ uint8_t tag_type; /* Received queue tag message type */
uint32_t transfer_length; __le32 transfer_length;
uint32_t residual; __le32 residual;
uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */
uint16_t dseg_count; /* Data segment count. */ __le16 dseg_count; /* Data segment count. */
uint16_t reserved_4[7]; __le16 reserved_4[7];
uint8_t sense_data[18]; uint8_t sense_data[18];
}; };
@@ -978,14 +966,6 @@ struct ctio_a64_ret_entry {
#define CS_UNKNOWN 0x81 /* Driver defined */ #define CS_UNKNOWN 0x81 /* Driver defined */
#define CS_RETRY 0x82 /* Driver defined */ #define CS_RETRY 0x82 /* Driver defined */
/*
* ISP status entry - SCSI status byte bit definitions.
*/
#define SS_CHECK_CONDITION BIT_1
#define SS_CONDITION_MET BIT_2
#define SS_BUSY_CONDITION BIT_3
#define SS_RESERVE_CONFLICT (BIT_4 | BIT_3)
/* /*
* ISP target entries - Option flags bit definitions. * ISP target entries - Option flags bit definitions.
*/ */
@@ -1082,10 +1062,6 @@ struct scsi_qla_host {
uint32_t reset_active:1; /* 3 */ uint32_t reset_active:1; /* 3 */
uint32_t abort_isp_active:1; /* 4 */ uint32_t abort_isp_active:1; /* 4 */
uint32_t disable_risc_code_load:1; /* 5 */ uint32_t disable_risc_code_load:1; /* 5 */
uint32_t enable_64bit_addressing:1; /* 6 */
uint32_t in_reset:1; /* 7 */
uint32_t ints_enabled:1;
uint32_t ignore_nvram:1;
#ifdef __ia64__ #ifdef __ia64__
uint32_t use_pci_vchannel:1; uint32_t use_pci_vchannel:1;
#endif #endif

View File

@@ -211,6 +211,138 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr); sysfs_remove_bin_file(&host->shost_gendev.kobj, &sysfs_nvram_attr);
} }
/* Scsi_Host attributes. */
static ssize_t
qla2x00_drvr_version_show(struct class_device *cdev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
}
static ssize_t
qla2x00_fw_version_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
char fw_str[30];
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->isp_ops.fw_version_str(ha, fw_str));
}
static ssize_t
qla2x00_serial_num_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
uint32_t sn;
sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
sn % 100000);
}
static ssize_t
qla2x00_isp_name_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->brd_info->isp_name);
}
static ssize_t
qla2x00_isp_id_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
ha->product_id[0], ha->product_id[1], ha->product_id[2],
ha->product_id[3]);
}
static ssize_t
qla2x00_model_name_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_number);
}
static ssize_t
qla2x00_model_desc_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->model_desc ? ha->model_desc: "");
}
static ssize_t
qla2x00_pci_info_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
char pci_info[30];
return snprintf(buf, PAGE_SIZE, "%s\n",
ha->isp_ops.pci_info_str(ha, pci_info));
}
static ssize_t
qla2x00_state_show(struct class_device *cdev, char *buf)
{
scsi_qla_host_t *ha = to_qla_host(class_to_shost(cdev));
int len = 0;
if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
atomic_read(&ha->loop_state) == LOOP_DEAD)
len = snprintf(buf, PAGE_SIZE, "Link Down\n");
else if (atomic_read(&ha->loop_state) != LOOP_READY ||
test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags))
len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
else {
len = snprintf(buf, PAGE_SIZE, "Link Up - ");
switch (ha->current_topology) {
case ISP_CFG_NL:
len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
break;
case ISP_CFG_FL:
len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
break;
case ISP_CFG_N:
len += snprintf(buf + len, PAGE_SIZE-len,
"N_Port to N_Port\n");
break;
case ISP_CFG_F:
len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
break;
default:
len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
break;
}
}
return len;
}
static CLASS_DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show,
NULL);
static CLASS_DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static CLASS_DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
static CLASS_DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
static CLASS_DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
static CLASS_DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
static CLASS_DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
static CLASS_DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
static CLASS_DEVICE_ATTR(state, S_IRUGO, qla2x00_state_show, NULL);
struct class_device_attribute *qla2x00_host_attrs[] = {
&class_device_attr_driver_version,
&class_device_attr_fw_version,
&class_device_attr_serial_num,
&class_device_attr_isp_name,
&class_device_attr_isp_id,
&class_device_attr_model_name,
&class_device_attr_model_desc,
&class_device_attr_pci_info,
&class_device_attr_state,
NULL,
};
/* Host attributes. */ /* Host attributes. */
static void static void
@@ -304,10 +436,13 @@ struct fc_function_template qla2xxx_transport_functions = {
.show_host_node_name = 1, .show_host_node_name = 1,
.show_host_port_name = 1, .show_host_port_name = 1,
.show_host_supported_classes = 1,
.get_host_port_id = qla2x00_get_host_port_id, .get_host_port_id = qla2x00_get_host_port_id,
.show_host_port_id = 1, .show_host_port_id = 1,
.dd_fcrport_size = sizeof(struct fc_port *), .dd_fcrport_size = sizeof(struct fc_port *),
.show_rport_supported_classes = 1,
.get_starget_node_name = qla2x00_get_starget_node_name, .get_starget_node_name = qla2x00_get_starget_node_name,
.show_starget_node_name = 1, .show_starget_node_name = 1,
@@ -329,4 +464,5 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
be64_to_cpu(*(uint64_t *)ha->init_cb->node_name); be64_to_cpu(*(uint64_t *)ha->init_cb->node_name);
fc_host_port_name(ha->host) = fc_host_port_name(ha->host) =
be64_to_cpu(*(uint64_t *)ha->init_cb->port_name); be64_to_cpu(*(uint64_t *)ha->init_cb->port_name);
fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
} }

View File

@@ -81,6 +81,7 @@
#define DEBUG2_3_11(x) do {x;} while (0); #define DEBUG2_3_11(x) do {x;} while (0);
#define DEBUG2_9_10(x) do {x;} while (0); #define DEBUG2_9_10(x) do {x;} while (0);
#define DEBUG2_11(x) do {x;} while (0); #define DEBUG2_11(x) do {x;} while (0);
#define DEBUG2_13(x) do {x;} while (0);
#else #else
#define DEBUG2(x) do {} while (0); #define DEBUG2(x) do {} while (0);
#endif #endif
@@ -169,8 +170,14 @@
#if defined(QL_DEBUG_LEVEL_13) #if defined(QL_DEBUG_LEVEL_13)
#define DEBUG13(x) do {x;} while (0) #define DEBUG13(x) do {x;} while (0)
#if !defined(DEBUG2_13)
#define DEBUG2_13(x) do {x;} while(0)
#endif
#else #else
#define DEBUG13(x) do {} while (0) #define DEBUG13(x) do {} while (0)
#if !defined(QL_DEBUG_LEVEL_2)
#define DEBUG2_13(x) do {} while(0)
#endif
#endif #endif
#if defined(QL_DEBUG_LEVEL_14) #if defined(QL_DEBUG_LEVEL_14)

View File

@@ -214,6 +214,7 @@
* valid range of an N-PORT id is 0 through 0x7ef. * valid range of an N-PORT id is 0 through 0x7ef.
*/ */
#define NPH_LAST_HANDLE 0x7ef #define NPH_LAST_HANDLE 0x7ef
#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */
#define NPH_SNS 0x7fc /* FFFFFC */ #define NPH_SNS 0x7fc /* FFFFFC */
#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */ #define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
#define NPH_F_PORT 0x7fe /* FFFFFE */ #define NPH_F_PORT 0x7fe /* FFFFFE */
@@ -630,6 +631,7 @@ typedef struct {
#define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */ #define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */
#define MBC_READ_RAM_EXTENDED 0xf /* Read RAM extended. */ #define MBC_READ_RAM_EXTENDED 0xf /* Read RAM extended. */
#define MBC_IOCB_COMMAND 0x12 /* Execute IOCB command. */ #define MBC_IOCB_COMMAND 0x12 /* Execute IOCB command. */
#define MBC_STOP_FIRMWARE 0x14 /* Stop firmware. */
#define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command. */ #define MBC_ABORT_COMMAND 0x15 /* Abort IOCB command. */
#define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN). */ #define MBC_ABORT_DEVICE 0x16 /* Abort device (ID/LUN). */
#define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */ #define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */
@@ -913,7 +915,7 @@ typedef struct {
* MSB BIT 1 = * MSB BIT 1 =
* MSB BIT 2 = * MSB BIT 2 =
* MSB BIT 3 = * MSB BIT 3 =
* MSB BIT 4 = * MSB BIT 4 = LED mode
* MSB BIT 5 = enable 50 ohm termination * MSB BIT 5 = enable 50 ohm termination
* MSB BIT 6 = Data Rate (2300 only) * MSB BIT 6 = Data Rate (2300 only)
* MSB BIT 7 = Data Rate (2300 only) * MSB BIT 7 = Data Rate (2300 only)
@@ -1035,7 +1037,7 @@ typedef struct {
* MSB BIT 1 = * MSB BIT 1 =
* MSB BIT 2 = * MSB BIT 2 =
* MSB BIT 3 = * MSB BIT 3 =
* MSB BIT 4 = * MSB BIT 4 = LED mode
* MSB BIT 5 = enable 50 ohm termination * MSB BIT 5 = enable 50 ohm termination
* MSB BIT 6 = Data Rate (2300 only) * MSB BIT 6 = Data Rate (2300 only)
* MSB BIT 7 = Data Rate (2300 only) * MSB BIT 7 = Data Rate (2300 only)
@@ -1131,10 +1133,7 @@ typedef struct {
uint8_t link_down_timeout; uint8_t link_down_timeout;
uint8_t adapter_id_0[4]; uint8_t adapter_id[16];
uint8_t adapter_id_1[4];
uint8_t adapter_id_2[4];
uint8_t adapter_id_3[4];
uint8_t alt1_boot_node_name[WWN_SIZE]; uint8_t alt1_boot_node_name[WWN_SIZE];
uint16_t alt1_boot_lun_number; uint16_t alt1_boot_lun_number;
@@ -1673,6 +1672,7 @@ typedef struct fc_port {
uint8_t cur_path; /* current path id */ uint8_t cur_path; /* current path id */
struct fc_rport *rport; struct fc_rport *rport;
u32 supported_classes;
} fc_port_t; } fc_port_t;
/* /*
@@ -1727,6 +1727,8 @@ typedef struct fc_port {
#define CT_REJECT_RESPONSE 0x8001 #define CT_REJECT_RESPONSE 0x8001
#define CT_ACCEPT_RESPONSE 0x8002 #define CT_ACCEPT_RESPONSE 0x8002
#define CT_REASON_CANNOT_PERFORM 0x09
#define CT_EXPL_ALREADY_REGISTERED 0x10
#define NS_N_PORT_TYPE 0x01 #define NS_N_PORT_TYPE 0x01
#define NS_NL_PORT_TYPE 0x02 #define NS_NL_PORT_TYPE 0x02
@@ -1768,6 +1770,100 @@ typedef struct fc_port {
#define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255) #define RSNN_NN_REQ_SIZE (16 + 8 + 1 + 255)
#define RSNN_NN_RSP_SIZE 16 #define RSNN_NN_RSP_SIZE 16
/*
* HBA attribute types.
*/
#define FDMI_HBA_ATTR_COUNT 9
#define FDMI_HBA_NODE_NAME 1
#define FDMI_HBA_MANUFACTURER 2
#define FDMI_HBA_SERIAL_NUMBER 3
#define FDMI_HBA_MODEL 4
#define FDMI_HBA_MODEL_DESCRIPTION 5
#define FDMI_HBA_HARDWARE_VERSION 6
#define FDMI_HBA_DRIVER_VERSION 7
#define FDMI_HBA_OPTION_ROM_VERSION 8
#define FDMI_HBA_FIRMWARE_VERSION 9
#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
struct ct_fdmi_hba_attr {
uint16_t type;
uint16_t len;
union {
uint8_t node_name[WWN_SIZE];
uint8_t manufacturer[32];
uint8_t serial_num[8];
uint8_t model[16];
uint8_t model_desc[80];
uint8_t hw_version[16];
uint8_t driver_version[32];
uint8_t orom_version[16];
uint8_t fw_version[16];
uint8_t os_version[128];
uint8_t max_ct_len[4];
} a;
};
struct ct_fdmi_hba_attributes {
uint32_t count;
struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
};
/*
* Port attribute types.
*/
#define FDMI_PORT_ATTR_COUNT 5
#define FDMI_PORT_FC4_TYPES 1
#define FDMI_PORT_SUPPORT_SPEED 2
#define FDMI_PORT_CURRENT_SPEED 3
#define FDMI_PORT_MAX_FRAME_SIZE 4
#define FDMI_PORT_OS_DEVICE_NAME 5
#define FDMI_PORT_HOST_NAME 6
struct ct_fdmi_port_attr {
uint16_t type;
uint16_t len;
union {
uint8_t fc4_types[32];
uint32_t sup_speed;
uint32_t cur_speed;
uint32_t max_frame_size;
uint8_t os_dev_name[32];
uint8_t host_name[32];
} a;
};
/*
* Port Attribute Block.
*/
struct ct_fdmi_port_attributes {
uint32_t count;
struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
};
/* FDMI definitions. */
#define GRHL_CMD 0x100
#define GHAT_CMD 0x101
#define GRPL_CMD 0x102
#define GPAT_CMD 0x110
#define RHBA_CMD 0x200
#define RHBA_RSP_SIZE 16
#define RHAT_CMD 0x201
#define RPRT_CMD 0x210
#define RPA_CMD 0x211
#define RPA_RSP_SIZE 16
#define DHBA_CMD 0x300
#define DHBA_REQ_SIZE (16 + 8)
#define DHBA_RSP_SIZE 16
#define DHAT_CMD 0x301
#define DPRT_CMD 0x310
#define DPA_CMD 0x311
/* CT command header -- request/response common fields */ /* CT command header -- request/response common fields */
struct ct_cmd_hdr { struct ct_cmd_hdr {
uint8_t revision; uint8_t revision;
@@ -1825,6 +1921,43 @@ struct ct_sns_req {
uint8_t name_len; uint8_t name_len;
uint8_t sym_node_name[255]; uint8_t sym_node_name[255];
} rsnn_nn; } rsnn_nn;
struct {
uint8_t hba_indentifier[8];
} ghat;
struct {
uint8_t hba_identifier[8];
uint32_t entry_count;
uint8_t port_name[8];
struct ct_fdmi_hba_attributes attrs;
} rhba;
struct {
uint8_t hba_identifier[8];
struct ct_fdmi_hba_attributes attrs;
} rhat;
struct {
uint8_t port_name[8];
struct ct_fdmi_port_attributes attrs;
} rpa;
struct {
uint8_t port_name[8];
} dhba;
struct {
uint8_t port_name[8];
} dhat;
struct {
uint8_t port_name[8];
} dprt;
struct {
uint8_t port_name[8];
} dpa;
} req; } req;
}; };
@@ -1882,6 +2015,12 @@ struct ct_sns_rsp {
struct { struct {
uint8_t fc4_types[32]; uint8_t fc4_types[32];
} gft_id; } gft_id;
struct {
uint32_t entry_count;
uint8_t port_name[8];
struct ct_fdmi_hba_attributes attrs;
} ghat;
} rsp; } rsp;
}; };
@@ -2032,6 +2171,8 @@ struct isp_operations {
uint16_t (*calc_req_entries) (uint16_t); uint16_t (*calc_req_entries) (uint16_t);
void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t); void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t);
void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t); void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t);
void * (*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
uint32_t);
uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *, uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *,
uint32_t, uint32_t); uint32_t, uint32_t);
@@ -2111,6 +2252,7 @@ typedef struct scsi_qla_host {
#define IOCTL_ERROR_RECOVERY 23 #define IOCTL_ERROR_RECOVERY 23
#define LOOP_RESET_NEEDED 24 #define LOOP_RESET_NEEDED 24
#define BEACON_BLINK_NEEDED 25 #define BEACON_BLINK_NEEDED 25
#define REGISTER_FDMI_NEEDED 26
uint32_t device_flags; uint32_t device_flags;
#define DFLG_LOCAL_DEVICES BIT_0 #define DFLG_LOCAL_DEVICES BIT_0
@@ -2204,6 +2346,7 @@ typedef struct scsi_qla_host {
int port_down_retry_count; int port_down_retry_count;
uint8_t mbx_count; uint8_t mbx_count;
uint16_t last_loop_id; uint16_t last_loop_id;
uint16_t mgmt_svr_loop_id;
uint32_t login_retry_count; uint32_t login_retry_count;
@@ -2318,6 +2461,7 @@ typedef struct scsi_qla_host {
uint8_t model_number[16+1]; uint8_t model_number[16+1];
#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" #define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
char *model_desc; char *model_desc;
uint8_t adapter_id[16+1];
uint8_t *node_name; uint8_t *node_name;
uint8_t *port_name; uint8_t *port_name;
@@ -2377,6 +2521,7 @@ typedef struct scsi_qla_host {
#define QLA_SUSPENDED 0x106 #define QLA_SUSPENDED 0x106
#define QLA_BUSY 0x107 #define QLA_BUSY 0x107
#define QLA_RSCNS_HANDLED 0x108 #define QLA_RSCNS_HANDLED 0x108
#define QLA_ALREADY_REGISTERED 0x109
/* /*
* Stat info for all adpaters * Stat info for all adpaters

View File

@@ -79,6 +79,7 @@ extern int ql2xplogiabsentdevice;
extern int ql2xenablezio; extern int ql2xenablezio;
extern int ql2xintrdelaytimer; extern int ql2xintrdelaytimer;
extern int ql2xloginretrycount; extern int ql2xloginretrycount;
extern int ql2xfdmienable;
extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *); extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
@@ -146,9 +147,6 @@ extern int
qla2x00_abort_target(fc_port_t *); qla2x00_abort_target(fc_port_t *);
#endif #endif
extern int
qla2x00_target_reset(scsi_qla_host_t *, struct fc_port *);
extern int extern int
qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
uint8_t *, uint16_t *); uint8_t *, uint16_t *);
@@ -215,6 +213,9 @@ qla2x00_get_serdes_params(scsi_qla_host_t *, uint16_t *, uint16_t *,
extern int extern int
qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
extern int
qla2x00_stop_firmware(scsi_qla_host_t *);
/* /*
* Global Function Prototypes in qla_isr.c source file. * Global Function Prototypes in qla_isr.c source file.
*/ */
@@ -269,6 +270,9 @@ extern int qla2x00_rft_id(scsi_qla_host_t *);
extern int qla2x00_rff_id(scsi_qla_host_t *); extern int qla2x00_rff_id(scsi_qla_host_t *);
extern int qla2x00_rnn_id(scsi_qla_host_t *); extern int qla2x00_rnn_id(scsi_qla_host_t *);
extern int qla2x00_rsnn_nn(scsi_qla_host_t *); extern int qla2x00_rsnn_nn(scsi_qla_host_t *);
extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla2x00_fdmi_register(scsi_qla_host_t *);
/* /*
* Global Function Prototypes in qla_rscn.c source file. * Global Function Prototypes in qla_rscn.c source file.
@@ -289,6 +293,8 @@ extern void qla2x00_cancel_io_descriptors(scsi_qla_host_t *);
/* /*
* Global Function Prototypes in qla_attr.c source file. * Global Function Prototypes in qla_attr.c source file.
*/ */
struct class_device_attribute;
extern struct class_device_attribute *qla2x00_host_attrs[];
struct fc_function_template; struct fc_function_template;
extern struct fc_function_template qla2xxx_transport_functions; extern struct fc_function_template qla2xxx_transport_functions;
extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);

View File

@@ -1099,3 +1099,567 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
return (rval); return (rval);
} }
/**
* qla2x00_mgmt_svr_login() - Login to fabric Managment Service.
* @ha: HA context
*
* Returns 0 on success.
*/
static int
qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
{
int ret;
uint16_t mb[MAILBOX_REGISTER_COUNT];
ret = QLA_SUCCESS;
if (ha->flags.management_server_logged_in)
return ret;
ha->isp_ops.fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
mb, BIT_1);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
"loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
__func__, ha->host_no, ha->mgmt_svr_loop_id, mb[0], mb[1],
mb[2], mb[6], mb[7]));
ret = QLA_FUNCTION_FAILED;
} else
ha->flags.management_server_logged_in = 1;
return ret;
}
/**
* qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
* @ha: HA context
* @req_size: request size in bytes
* @rsp_size: response size in bytes
*
* Returns a pointer to the @ha's ms_iocb.
*/
void *
qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
uint32_t rsp_size)
{
ms_iocb_entry_t *ms_pkt;
ms_pkt = ha->ms_iocb;
memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
ms_pkt->entry_type = MS_IOCB_TYPE;
ms_pkt->entry_count = 1;
SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id);
ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
ms_pkt->timeout = __constant_cpu_to_le16(59);
ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
ms_pkt->req_bytecount = cpu_to_le32(req_size);
ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
return ms_pkt;
}
/**
* qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
* @ha: HA context
* @req_size: request size in bytes
* @rsp_size: response size in bytes
*
* Returns a pointer to the @ha's ms_iocb.
*/
void *
qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
uint32_t rsp_size)
{
struct ct_entry_24xx *ct_pkt;
ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
ct_pkt->entry_type = CT_IOCB_TYPE;
ct_pkt->entry_count = 1;
ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
ct_pkt->timeout = __constant_cpu_to_le16(59);
ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
return ct_pkt;
}
static inline ms_iocb_entry_t *
qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
{
ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
} else {
ms_pkt->req_bytecount = cpu_to_le32(req_size);
ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
}
return ms_pkt;
}
/**
* qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
* @ct_req: CT request buffer
* @cmd: GS command
* @rsp_size: response size in bytes
*
* Returns a pointer to the intitialized @ct_req.
*/
static inline struct ct_sns_req *
qla2x00_prep_ct_fdmi_req(struct ct_sns_req *ct_req, uint16_t cmd,
uint16_t rsp_size)
{
memset(ct_req, 0, sizeof(struct ct_sns_pkt));
ct_req->header.revision = 0x01;
ct_req->header.gs_type = 0xFA;
ct_req->header.gs_subtype = 0x10;
ct_req->command = cpu_to_be16(cmd);
ct_req->max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
return ct_req;
}
/**
* qla2x00_fdmi_rhba() -
* @ha: HA context
*
* Returns 0 on success.
*/
static int
qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
{
int rval, alen;
uint32_t size, sn;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
uint8_t *entries;
struct ct_fdmi_hba_attr *eiter;
/* Issue RHBA */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
RHBA_RSP_SIZE);
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- attribute block, attributes. */
memcpy(ct_req->req.rhba.hba_identifier, ha->port_name, WWN_SIZE);
ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
memcpy(ct_req->req.rhba.port_name, ha->port_name, WWN_SIZE);
size = 2 * WWN_SIZE + 4 + 4;
/* Attributes */
ct_req->req.rhba.attrs.count =
__constant_cpu_to_be32(FDMI_HBA_ATTR_COUNT);
entries = ct_req->req.rhba.hba_identifier;
/* Nodename. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
memcpy(eiter->a.node_name, ha->node_name, WWN_SIZE);
size += 4 + WWN_SIZE;
DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
__func__, ha->host_no,
eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
eiter->a.node_name[6], eiter->a.node_name[7]));
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
strcpy(eiter->a.manufacturer, "QLogic Corporation");
alen = strlen(eiter->a.manufacturer);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, ha->host_no,
eiter->a.manufacturer));
/* Serial number. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
sprintf(eiter->a.serial_num, "%c%05d", 'A' + sn / 100000, sn % 100000);
alen = strlen(eiter->a.serial_num);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, ha->host_no,
eiter->a.serial_num));
/* Model name. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
strcpy(eiter->a.model, ha->model_number);
alen = strlen(eiter->a.model);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, ha->host_no,
eiter->a.model));
/* Model description. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
if (ha->model_desc)
strncpy(eiter->a.model_desc, ha->model_desc, 80);
alen = strlen(eiter->a.model_desc);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, ha->host_no,
eiter->a.model_desc));
/* Hardware version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
strcpy(eiter->a.hw_version, ha->adapter_id);
alen = strlen(eiter->a.hw_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, ha->host_no,
eiter->a.hw_version));
/* Driver version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
strcpy(eiter->a.driver_version, qla2x00_version_str);
alen = strlen(eiter->a.driver_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, ha->host_no,
eiter->a.driver_version));
/* Option ROM version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
strcpy(eiter->a.orom_version, "0.00");
alen = strlen(eiter->a.orom_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, ha->host_no,
eiter->a.orom_version));
/* Firmware version */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
ha->isp_ops.fw_version_str(ha, eiter->a.fw_version);
alen = strlen(eiter->a.fw_version);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, ha->host_no,
eiter->a.fw_version));
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(ha, size + 16);
DEBUG13(printk("%s(%ld): RHBA identifier="
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
ha->host_no, ct_req->req.rhba.hba_identifier[0],
ct_req->req.rhba.hba_identifier[1],
ct_req->req.rhba.hba_identifier[2],
ct_req->req.rhba.hba_identifier[3],
ct_req->req.rhba.hba_identifier[4],
ct_req->req.rhba.hba_identifier[5],
ct_req->req.rhba.hba_identifier[6],
ct_req->req.rhba.hba_identifier[7], size));
DEBUG13(qla2x00_dump_buffer(entries, size));
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
ha->host_no, rval));
} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
__func__, ha->host_no));
rval = QLA_ALREADY_REGISTERED;
}
} else {
DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
ha->host_no));
}
return rval;
}
/**
* qla2x00_fdmi_dhba() -
* @ha: HA context
*
* Returns 0 on success.
*/
static int
qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
{
int rval;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
/* Issue RPA */
/* Prepare common MS IOCB */
ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE,
DHBA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, DHBA_CMD,
DHBA_RSP_SIZE);
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- portname. */
memcpy(ct_req->req.dhba.port_name, ha->port_name, WWN_SIZE);
DEBUG13(printk("%s(%ld): DHBA portname="
"%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, ha->host_no,
ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
ha->host_no, rval));
} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "DHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
ha->host_no));
}
return rval;
}
/**
* qla2x00_fdmi_rpa() -
* @ha: HA context
*
* Returns 0 on success.
*/
static int
qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
{
int rval, alen;
uint32_t size, max_frame_size;
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
uint8_t *entries;
struct ct_fdmi_port_attr *eiter;
struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
/* Issue RPA */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
RPA_RSP_SIZE);
ct_rsp = &ha->ct_sns->p.rsp;
/* Prepare FDMI command arguments -- attribute block, attributes. */
memcpy(ct_req->req.rpa.port_name, ha->port_name, WWN_SIZE);
size = WWN_SIZE + 4;
/* Attributes */
ct_req->req.rpa.attrs.count =
__constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
entries = ct_req->req.rpa.port_name;
/* FC4 types. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES);
eiter->len = __constant_cpu_to_be16(4 + 32);
eiter->a.fc4_types[2] = 0x01;
size += 4 + 32;
DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, ha->host_no,
eiter->a.fc4_types[2], eiter->a.fc4_types[1]));
/* Supported speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
if (IS_QLA25XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(4);
else if (IS_QLA24XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(8);
else if (IS_QLA23XX(ha))
eiter->a.sup_speed = __constant_cpu_to_be32(2);
else
eiter->a.sup_speed = __constant_cpu_to_be32(1);
size += 4 + 4;
DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
eiter->a.sup_speed));
/* Current speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
eiter->len = __constant_cpu_to_be16(4 + 4);
switch (ha->link_data_rate) {
case 0:
eiter->a.cur_speed = __constant_cpu_to_be32(1);
break;
case 1:
eiter->a.cur_speed = __constant_cpu_to_be32(2);
break;
case 3:
eiter->a.cur_speed = __constant_cpu_to_be32(8);
break;
case 4:
eiter->a.cur_speed = __constant_cpu_to_be32(4);
break;
}
size += 4 + 4;
DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, ha->host_no,
eiter->a.cur_speed));
/* Max frame size. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
eiter->len = __constant_cpu_to_be16(4 + 4);
max_frame_size = IS_QLA24XX(ha) || IS_QLA25XX(ha) ?
(uint32_t) icb24->frame_payload_size:
(uint32_t) ha->init_cb->frame_payload_size;
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
size += 4 + 4;
DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, ha->host_no,
eiter->a.max_frame_size));
/* OS device name. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
sprintf(eiter->a.os_dev_name, "/proc/scsi/qla2xxx/%ld", ha->host_no);
alen = strlen(eiter->a.os_dev_name);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no,
eiter->a.os_dev_name));
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(ha, size + 16);
DEBUG13(printk("%s(%ld): RPA portname="
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
ha->host_no, ct_req->req.rpa.port_name[0],
ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
ct_req->req.rpa.port_name[7], size));
DEBUG13(qla2x00_dump_buffer(entries, size));
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
ha->host_no, rval));
} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RPA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
ha->host_no));
}
return rval;
}
/**
* qla2x00_fdmi_register() -
* @ha: HA context
*
* Returns 0 on success.
*/
int
qla2x00_fdmi_register(scsi_qla_host_t *ha)
{
int rval;
rval = qla2x00_mgmt_svr_login(ha);
if (rval)
return rval;
rval = qla2x00_fdmi_rhba(ha);
if (rval) {
if (rval != QLA_ALREADY_REGISTERED)
return rval;
rval = qla2x00_fdmi_dhba(ha);
if (rval)
return rval;
rval = qla2x00_fdmi_rhba(ha);
if (rval)
return rval;
}
rval = qla2x00_fdmi_rpa(ha);
return rval;
}

View File

@@ -88,6 +88,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
ha->mbx_flags = 0; ha->mbx_flags = 0;
ha->isp_abort_cnt = 0; ha->isp_abort_cnt = 0;
ha->beacon_blink_led = 0; ha->beacon_blink_led = 0;
set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
rval = ha->isp_ops.pci_config(ha); rval = ha->isp_ops.pci_config(ha);
@@ -1563,7 +1564,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0); ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0); ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0); ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
ha->flags.enable_led_scheme = ((nv->efi_parameters & BIT_3) ? 1 : 0); ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
ha->operating_mode = ha->operating_mode =
(icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4; (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
@@ -1697,6 +1698,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags)
fcport->iodesc_idx_sent = IODESC_INVALID_INDEX; fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
atomic_set(&fcport->state, FCS_UNCONFIGURED); atomic_set(&fcport->state, FCS_UNCONFIGURED);
fcport->flags = FCF_RLC_SUPPORT; fcport->flags = FCF_RLC_SUPPORT;
fcport->supported_classes = FC_COS_UNSPECIFIED;
return (fcport); return (fcport);
} }
@@ -1898,7 +1900,8 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
continue; continue;
/* Bypass if not same domain and area of adapter. */ /* Bypass if not same domain and area of adapter. */
if (area != ha->d_id.b.area || domain != ha->d_id.b.domain) if (area && domain &&
(area != ha->d_id.b.area || domain != ha->d_id.b.domain))
continue; continue;
/* Bypass invalid local loop ID. */ /* Bypass invalid local loop ID. */
@@ -2075,6 +2078,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
return; return;
} }
rport->dd_data = fcport; rport->dd_data = fcport;
rport->supported_classes = fcport->supported_classes;
rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
if (fcport->port_type == FCT_INITIATOR) if (fcport->port_type == FCT_INITIATOR)
@@ -2130,6 +2134,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
return (QLA_SUCCESS); return (QLA_SUCCESS);
} }
do { do {
/* FDMI support. */
if (ql2xfdmienable &&
test_and_clear_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags))
qla2x00_fdmi_register(ha);
/* Ensure we are logged into the SNS. */ /* Ensure we are logged into the SNS. */
if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) if (IS_QLA24XX(ha) || IS_QLA25XX(ha))
loop_id = NPH_SNS; loop_id = NPH_SNS;
@@ -2392,6 +2401,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
if (new_fcport->d_id.b24 == ha->d_id.b24) if (new_fcport->d_id.b24 == ha->d_id.b24)
continue; continue;
/* Bypass if same domain and area of adapter. */
if (((new_fcport->d_id.b24 & 0xffff00) ==
(ha->d_id.b24 & 0xffff00)) && ha->current_topology ==
ISP_CFG_FL)
continue;
/* Bypass reserved domain fields. */ /* Bypass reserved domain fields. */
if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0) if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
continue; continue;
@@ -2794,6 +2809,11 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
} }
} }
if (mb[10] & BIT_0)
fcport->supported_classes |= FC_COS_CLASS2;
if (mb[10] & BIT_1)
fcport->supported_classes |= FC_COS_CLASS3;
rval = QLA_SUCCESS; rval = QLA_SUCCESS;
break; break;
} else if (mb[0] == MBS_LOOP_ID_USED) { } else if (mb[0] == MBS_LOOP_ID_USED) {

View File

@@ -810,12 +810,8 @@ qla24xx_start_scsi(srb_t *sp)
ha->req_q_cnt = ha->request_q_length - ha->req_q_cnt = ha->request_q_length -
(ha->req_ring_index - cnt); (ha->req_ring_index - cnt);
} }
if (ha->req_q_cnt < (req_cnt + 2)) { if (ha->req_q_cnt < (req_cnt + 2))
if (cmd->use_sg)
pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
cmd->sc_data_direction);
goto queuing_error; goto queuing_error;
}
/* Build command packet. */ /* Build command packet. */
ha->current_outstanding_cmd = handle; ha->current_outstanding_cmd = handle;

View File

@@ -451,6 +451,8 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
ha->flags.management_server_logged_in = 0; ha->flags.management_server_logged_in = 0;
ha->link_data_rate = 0; ha->link_data_rate = 0;
if (ql2xfdmienable)
set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
/* Update AEN queue. */ /* Update AEN queue. */
qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL); qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL);

View File

@@ -19,6 +19,7 @@
#include "qla_def.h" #include "qla_def.h"
#include <linux/delay.h> #include <linux/delay.h>
#include <scsi/scsi_transport_fc.h>
static void static void
qla2x00_mbx_sem_timeout(unsigned long data) qla2x00_mbx_sem_timeout(unsigned long data)
@@ -251,7 +252,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp)
mb0 = RD_REG_WORD(&reg->isp24.mailbox0); mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
ictrl = RD_REG_DWORD(&reg->isp24.ictrl); ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
} else { } else {
mb0 = RD_MAILBOX_REG(ha, reg->isp, 0); mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl); ictrl = RD_REG_WORD(&reg->isp.ictrl);
} }
printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
@@ -982,58 +983,6 @@ qla2x00_abort_target(fc_port_t *fcport)
} }
#endif #endif
/*
* qla2x00_target_reset
* Issue target reset mailbox command.
*
* Input:
* ha = adapter block pointer.
* TARGET_QUEUE_LOCK must be released.
* ADAPTER_STATE_LOCK must be released.
*
* Returns:
* qla2x00 local function return status code.
*
* Context:
* Kernel context.
*/
int
qla2x00_target_reset(scsi_qla_host_t *ha, struct fc_port *fcport)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
DEBUG11(printk("qla2x00_target_reset(%ld): entered.\n", ha->host_no);)
if (atomic_read(&fcport->state) != FCS_ONLINE)
return 0;
mcp->mb[0] = MBC_TARGET_RESET;
if (HAS_EXTENDED_IDS(ha))
mcp->mb[1] = fcport->loop_id;
else
mcp->mb[1] = fcport->loop_id << 8;
mcp->mb[2] = ha->loop_reset_delay;
mcp->out_mb = MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = 30;
mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3_11(printk("qla2x00_target_reset(%ld): failed=%x.\n",
ha->host_no, rval);)
} else {
/*EMPTY*/
DEBUG11(printk("qla2x00_target_reset(%ld): done.\n",
ha->host_no);)
}
return rval;
}
/* /*
* qla2x00_get_adapter_id * qla2x00_get_adapter_id
* Get adapter ID and topology. * Get adapter ID and topology.
@@ -1326,6 +1275,10 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
fcport->port_type = FCT_INITIATOR; fcport->port_type = FCT_INITIATOR;
else else
fcport->port_type = FCT_TARGET; fcport->port_type = FCT_TARGET;
/* Passback COS information. */
fcport->supported_classes = (pd->options & BIT_4) ?
FC_COS_CLASS2: FC_COS_CLASS3;
} }
gpd_error_out: gpd_error_out:
@@ -1661,6 +1614,13 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
mb[1] |= BIT_1; mb[1] |= BIT_1;
} else } else
mb[1] = BIT_0; mb[1] = BIT_0;
/* Passback COS information. */
mb[10] = 0;
if (lg->io_parameter[7] || lg->io_parameter[8])
mb[10] |= BIT_0; /* Class 2. */
if (lg->io_parameter[9] || lg->io_parameter[10])
mb[10] |= BIT_1; /* Class 3. */
} }
dma_pool_free(ha->s_dma_pool, lg, lg_dma); dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1723,6 +1683,8 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
mb[2] = mcp->mb[2]; mb[2] = mcp->mb[2];
mb[6] = mcp->mb[6]; mb[6] = mcp->mb[6];
mb[7] = mcp->mb[7]; mb[7] = mcp->mb[7];
/* COS retrieved from Get-Port-Database mailbox command. */
mb[10] = 0;
} }
if (rval != QLA_SUCCESS) { if (rval != QLA_SUCCESS) {
@@ -2465,3 +2427,32 @@ qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
return rval; return rval;
} }
int
qla2x00_stop_firmware(scsi_qla_host_t *ha)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
if (!IS_QLA24XX(ha) && !IS_QLA25XX(ha))
return QLA_FUNCTION_FAILED;
DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
mcp->mb[0] = MBC_STOP_FIRMWARE;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = 5;
mcp->flags = 0;
rval = qla2x00_mailbox_command(ha, mcp);
if (rval != QLA_SUCCESS) {
DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
ha->host_no, rval));
} else {
DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
}
return rval;
}

View File

@@ -79,7 +79,7 @@ module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xloginretrycount, MODULE_PARM_DESC(ql2xloginretrycount,
"Specify an alternate value for the NVRAM login retry count."); "Specify an alternate value for the NVRAM login retry count.");
int ql2xfwloadbin; int ql2xfwloadbin=1;
module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR); module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfwloadbin, MODULE_PARM_DESC(ql2xfwloadbin,
"Load ISP2xxx firmware image via hotplug."); "Load ISP2xxx firmware image via hotplug.");
@@ -88,6 +88,12 @@ static void qla2x00_free_device(scsi_qla_host_t *);
static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha); static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
int ql2xfdmienable;
module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registratons "
"Default is 0 - no FDMI. 1 - perfom FDMI.");
/* /*
* SCSI host template entry points * SCSI host template entry points
*/ */
@@ -105,6 +111,9 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static int qla2x00_loop_reset(scsi_qla_host_t *ha); static int qla2x00_loop_reset(scsi_qla_host_t *ha);
static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *); static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_change_queue_depth(struct scsi_device *, int);
static int qla2x00_change_queue_type(struct scsi_device *, int);
static struct scsi_host_template qla2x00_driver_template = { static struct scsi_host_template qla2x00_driver_template = {
.module = THIS_MODULE, .module = THIS_MODULE,
.name = "qla2xxx", .name = "qla2xxx",
@@ -119,6 +128,8 @@ static struct scsi_host_template qla2x00_driver_template = {
.slave_alloc = qla2xxx_slave_alloc, .slave_alloc = qla2xxx_slave_alloc,
.slave_destroy = qla2xxx_slave_destroy, .slave_destroy = qla2xxx_slave_destroy,
.change_queue_depth = qla2x00_change_queue_depth,
.change_queue_type = qla2x00_change_queue_type,
.this_id = -1, .this_id = -1,
.cmd_per_lun = 3, .cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
@@ -129,6 +140,7 @@ static struct scsi_host_template qla2x00_driver_template = {
* which equates to 0x800000 sectors. * which equates to 0x800000 sectors.
*/ */
.max_sectors = 0xFFFF, .max_sectors = 0xFFFF,
.shost_attrs = qla2x00_host_attrs,
}; };
static struct scsi_host_template qla24xx_driver_template = { static struct scsi_host_template qla24xx_driver_template = {
@@ -145,12 +157,15 @@ static struct scsi_host_template qla24xx_driver_template = {
.slave_alloc = qla2xxx_slave_alloc, .slave_alloc = qla2xxx_slave_alloc,
.slave_destroy = qla2xxx_slave_destroy, .slave_destroy = qla2xxx_slave_destroy,
.change_queue_depth = qla2x00_change_queue_depth,
.change_queue_type = qla2x00_change_queue_type,
.this_id = -1, .this_id = -1,
.cmd_per_lun = 3, .cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = SG_ALL, .sg_tablesize = SG_ALL,
.max_sectors = 0xFFFF, .max_sectors = 0xFFFF,
.shost_attrs = qla2x00_host_attrs,
}; };
static struct scsi_transport_template *qla2xxx_transport_template = NULL; static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -487,14 +502,13 @@ qc24_fail_command:
static int static int
qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
{ {
#define ABORT_POLLING_PERIOD HZ #define ABORT_POLLING_PERIOD 1000
#define ABORT_WAIT_ITER ((10 * HZ) / (ABORT_POLLING_PERIOD)) #define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
unsigned long wait_iter = ABORT_WAIT_ITER; unsigned long wait_iter = ABORT_WAIT_ITER;
int ret = QLA_SUCCESS; int ret = QLA_SUCCESS;
while (CMD_SP(cmd)) { while (CMD_SP(cmd)) {
set_current_state(TASK_UNINTERRUPTIBLE); msleep(ABORT_POLLING_PERIOD);
schedule_timeout(ABORT_POLLING_PERIOD);
if (--wait_iter) if (--wait_iter)
break; break;
@@ -1016,7 +1030,7 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
if (fcport->port_type != FCT_TARGET) if (fcport->port_type != FCT_TARGET)
continue; continue;
status = qla2x00_target_reset(ha, fcport); status = qla2x00_device_reset(ha, fcport);
if (status != QLA_SUCCESS) if (status != QLA_SUCCESS)
break; break;
} }
@@ -1103,6 +1117,28 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
sdev->hostdata = NULL; sdev->hostdata = NULL;
} }
static int
qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
return sdev->queue_depth;
}
static int
qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
{
if (sdev->tagged_supported) {
scsi_set_tag_type(sdev, tag_type);
if (tag_type)
scsi_activate_tcq(sdev, sdev->queue_depth);
else
scsi_deactivate_tcq(sdev, sdev->queue_depth);
} else
tag_type = 0;
return tag_type;
}
/** /**
* qla2x00_config_dma_addressing() - Configure OS DMA addressing method. * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context * @ha: HA context
@@ -1113,36 +1149,23 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
static void static void
qla2x00_config_dma_addressing(scsi_qla_host_t *ha) qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
{ {
/* Assume 32bit DMA address */ /* Assume a 32bit DMA mask. */
ha->flags.enable_64bit_addressing = 0; ha->flags.enable_64bit_addressing = 0;
/* if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) {
* Given the two variants pci_set_dma_mask(), allow the compiler to /* Any upper-dword bits set? */
* assist in setting the proper dma mask. if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
*/ !pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
if (sizeof(dma_addr_t) > 4) { /* Ok, a 64bit DMA mask is applicable. */
if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) {
ha->flags.enable_64bit_addressing = 1; ha->flags.enable_64bit_addressing = 1;
ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64; ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_64;
ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64; ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_64;
return;
if (pci_set_consistent_dma_mask(ha->pdev,
DMA_64BIT_MASK)) {
qla_printk(KERN_DEBUG, ha,
"Failed to set 64 bit PCI consistent mask; "
"using 32 bit.\n");
pci_set_consistent_dma_mask(ha->pdev,
DMA_32BIT_MASK);
}
} else {
qla_printk(KERN_DEBUG, ha,
"Failed to set 64 bit PCI DMA mask, falling back "
"to 32 bit MASK.\n");
pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
} }
} else {
pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK);
} }
dma_set_mask(&ha->pdev->dev, DMA_32BIT_MASK);
pci_set_consistent_dma_mask(ha->pdev, DMA_32BIT_MASK);
} }
static int static int
@@ -1316,6 +1339,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
ha->prev_topology = 0; ha->prev_topology = 0;
ha->ports = MAX_BUSES; ha->ports = MAX_BUSES;
ha->init_cb_size = sizeof(init_cb_t); ha->init_cb_size = sizeof(init_cb_t);
ha->mgmt_svr_loop_id = MANAGEMENT_SERVER;
/* Assign ISP specific operations. */ /* Assign ISP specific operations. */
ha->isp_ops.pci_config = qla2100_pci_config; ha->isp_ops.pci_config = qla2100_pci_config;
@@ -1338,6 +1362,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32; ha->isp_ops.calc_req_entries = qla2x00_calc_iocbs_32;
ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32; ha->isp_ops.build_iocbs = qla2x00_build_scsi_iocbs_32;
ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb; ha->isp_ops.prep_ms_iocb = qla2x00_prep_ms_iocb;
ha->isp_ops.prep_ms_fdmi_iocb = qla2x00_prep_ms_fdmi_iocb;
ha->isp_ops.read_nvram = qla2x00_read_nvram_data; ha->isp_ops.read_nvram = qla2x00_read_nvram_data;
ha->isp_ops.write_nvram = qla2x00_write_nvram_data; ha->isp_ops.write_nvram = qla2x00_write_nvram_data;
ha->isp_ops.fw_dump = qla2100_fw_dump; ha->isp_ops.fw_dump = qla2100_fw_dump;
@@ -1375,6 +1400,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
ha->response_q_length = RESPONSE_ENTRY_CNT_2300; ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
ha->last_loop_id = SNS_LAST_LOOP_ID_2300; ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct init_cb_24xx); ha->init_cb_size = sizeof(struct init_cb_24xx);
ha->mgmt_svr_loop_id = 10;
ha->isp_ops.pci_config = qla24xx_pci_config; ha->isp_ops.pci_config = qla24xx_pci_config;
ha->isp_ops.reset_chip = qla24xx_reset_chip; ha->isp_ops.reset_chip = qla24xx_reset_chip;
ha->isp_ops.chip_diag = qla24xx_chip_diag; ha->isp_ops.chip_diag = qla24xx_chip_diag;
@@ -1395,6 +1421,7 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
ha->isp_ops.fabric_login = qla24xx_login_fabric; ha->isp_ops.fabric_login = qla24xx_login_fabric;
ha->isp_ops.fabric_logout = qla24xx_fabric_logout; ha->isp_ops.fabric_logout = qla24xx_fabric_logout;
ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb; ha->isp_ops.prep_ms_iocb = qla24xx_prep_ms_iocb;
ha->isp_ops.prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb;
ha->isp_ops.read_nvram = qla24xx_read_nvram_data; ha->isp_ops.read_nvram = qla24xx_read_nvram_data;
ha->isp_ops.write_nvram = qla24xx_write_nvram_data; ha->isp_ops.write_nvram = qla24xx_write_nvram_data;
ha->isp_ops.fw_dump = qla24xx_fw_dump; ha->isp_ops.fw_dump = qla24xx_fw_dump;
@@ -1558,8 +1585,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info)
return 0; return 0;
probe_failed: probe_failed:
fc_remove_host(ha->host);
qla2x00_free_device(ha); qla2x00_free_device(ha);
scsi_host_put(host); scsi_host_put(host);
@@ -1601,10 +1626,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
qla2x00_cancel_io_descriptors(ha); qla2x00_cancel_io_descriptors(ha);
/* turn-off interrupts on the card */
if (ha->interrupts_on)
ha->isp_ops.disable_intrs(ha);
/* Disable timer */ /* Disable timer */
if (ha->timer_active) if (ha->timer_active)
qla2x00_stop_timer(ha); qla2x00_stop_timer(ha);
@@ -1624,8 +1645,14 @@ qla2x00_free_device(scsi_qla_host_t *ha)
} }
} }
qla2x00_mem_free(ha); /* Stop currently executing firmware. */
qla2x00_stop_firmware(ha);
/* turn-off interrupts on the card */
if (ha->interrupts_on)
ha->isp_ops.disable_intrs(ha);
qla2x00_mem_free(ha);
ha->flags.online = 0; ha->flags.online = 0;
@@ -1934,7 +1961,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
{ {
struct list_head *fcpl, *fcptemp; struct list_head *fcpl, *fcptemp;
fc_port_t *fcport; fc_port_t *fcport;
unsigned long wtime;/* max wait time if mbx cmd is busy. */ unsigned int wtime;/* max wait time if mbx cmd is busy. */
if (ha == NULL) { if (ha == NULL) {
/* error */ /* error */
@@ -1943,11 +1970,9 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
} }
/* Make sure all other threads are stopped. */ /* Make sure all other threads are stopped. */
wtime = 60 * HZ; wtime = 60 * 1000;
while (ha->dpc_wait && wtime) { while (ha->dpc_wait && wtime)
set_current_state(TASK_INTERRUPTIBLE); wtime = msleep_interruptible(wtime);
wtime = schedule_timeout(wtime);
}
/* free ioctl memory */ /* free ioctl memory */
qla2x00_free_ioctl_mem(ha); qla2x00_free_ioctl_mem(ha);
@@ -2478,15 +2503,15 @@ qla2x00_timer(scsi_qla_host_t *ha)
int int
qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout) qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout)
{ {
const unsigned int step = HZ/10; const unsigned int step = 100; /* msecs */
unsigned int iterations = jiffies_to_msecs(timeout)/100;
do { do {
if (!down_trylock(sema)) if (!down_trylock(sema))
return 0; return 0;
set_current_state(TASK_INTERRUPTIBLE); if (msleep_interruptible(step))
if (schedule_timeout(step))
break; break;
} while ((timeout -= step) > 0); } while (--iterations >= 0);
return -ETIMEDOUT; return -ETIMEDOUT;
} }

View File

@@ -468,21 +468,12 @@ qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
uint32_t dwords) uint32_t dwords)
{ {
uint32_t i; uint32_t i;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
/* Pause RISC. */
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
/* Dword reads to flash. */ /* Dword reads to flash. */
for (i = 0; i < dwords; i++, faddr++) for (i = 0; i < dwords; i++, faddr++)
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
flash_data_to_access_addr(faddr))); flash_data_to_access_addr(faddr)));
/* Release RISC pause. */
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
return dwptr; return dwptr;
} }
@@ -532,10 +523,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
ret = QLA_SUCCESS; ret = QLA_SUCCESS;
/* Pause RISC. */
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id); qla24xx_get_flash_manufacturer(ha, &man_id, &flash_id);
DEBUG9(printk("%s(%ld): Flash man_id=%d flash_id=%d\n", __func__, DEBUG9(printk("%s(%ld): Flash man_id=%d flash_id=%d\n", __func__,
ha->host_no, man_id, flash_id)); ha->host_no, man_id, flash_id));
@@ -599,10 +586,6 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
/* Release RISC pause. */
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
return ret; return ret;
} }
@@ -630,11 +613,6 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
{ {
uint32_t i; uint32_t i;
uint32_t *dwptr; uint32_t *dwptr;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
/* Pause RISC. */
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
/* Dword reads to flash. */ /* Dword reads to flash. */
dwptr = (uint32_t *)buf; dwptr = (uint32_t *)buf;
@@ -642,10 +620,6 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha,
nvram_data_to_access_addr(naddr))); nvram_data_to_access_addr(naddr)));
/* Release RISC pause. */
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
return buf; return buf;
} }
@@ -690,10 +664,6 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
ret = QLA_SUCCESS; ret = QLA_SUCCESS;
/* Pause RISC. */
WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
/* Enable flash write. */ /* Enable flash write. */
WRT_REG_DWORD(&reg->ctrl_status, WRT_REG_DWORD(&reg->ctrl_status,
RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE); RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -728,9 +698,5 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
/* Release RISC pause. */
WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
return ret; return ret;
} }

View File

@@ -19,9 +19,9 @@
/* /*
* Driver version * Driver version
*/ */
#define QLA2XXX_VERSION "8.01.00b5-k" #define QLA2XXX_VERSION "8.01.00-k"
#define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 1 #define QLA_DRIVER_MINOR_VER 1
#define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 5 #define QLA_DRIVER_BETA_VER 0

250
drivers/scsi/raid_class.c Normal file
View File

@@ -0,0 +1,250 @@
/*
* RAID Attributes
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/raid_class.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#define RAID_NUM_ATTRS 3
struct raid_internal {
struct raid_template r;
struct raid_function_template *f;
/* The actual attributes */
struct class_device_attribute private_attrs[RAID_NUM_ATTRS];
/* The array of null terminated pointers to attributes
* needed by scsi_sysfs.c */
struct class_device_attribute *attrs[RAID_NUM_ATTRS + 1];
};
struct raid_component {
struct list_head node;
struct device *dev;
int num;
};
#define to_raid_internal(tmpl) container_of(tmpl, struct raid_internal, r)
#define tc_to_raid_internal(tcont) ({ \
struct raid_template *r = \
container_of(tcont, struct raid_template, raid_attrs); \
to_raid_internal(r); \
})
#define ac_to_raid_internal(acont) ({ \
struct transport_container *tc = \
container_of(acont, struct transport_container, ac); \
tc_to_raid_internal(tc); \
})
#define class_device_to_raid_internal(cdev) ({ \
struct attribute_container *ac = \
attribute_container_classdev_to_container(cdev); \
ac_to_raid_internal(ac); \
})
static int raid_match(struct attribute_container *cont, struct device *dev)
{
/* We have to look for every subsystem that could house
* emulated RAID devices, so start with SCSI */
struct raid_internal *i = ac_to_raid_internal(cont);
if (scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
if (i->f->cookie != sdev->host->hostt)
return 0;
return i->f->is_raid(dev);
}
/* FIXME: look at other subsystems too */
return 0;
}
static int raid_setup(struct transport_container *tc, struct device *dev,
struct class_device *cdev)
{
struct raid_data *rd;
BUG_ON(class_get_devdata(cdev));
rd = kmalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return -ENOMEM;
memset(rd, 0, sizeof(*rd));
INIT_LIST_HEAD(&rd->component_list);
class_set_devdata(cdev, rd);
return 0;
}
static int raid_remove(struct transport_container *tc, struct device *dev,
struct class_device *cdev)
{
struct raid_data *rd = class_get_devdata(cdev);
struct raid_component *rc, *next;
class_set_devdata(cdev, NULL);
list_for_each_entry_safe(rc, next, &rd->component_list, node) {
char buf[40];
snprintf(buf, sizeof(buf), "component-%d", rc->num);
list_del(&rc->node);
sysfs_remove_link(&cdev->kobj, buf);
kfree(rc);
}
kfree(class_get_devdata(cdev));
return 0;
}
static DECLARE_TRANSPORT_CLASS(raid_class,
"raid_devices",
raid_setup,
raid_remove,
NULL);
static struct {
enum raid_state value;
char *name;
} raid_states[] = {
{ RAID_ACTIVE, "active" },
{ RAID_DEGRADED, "degraded" },
{ RAID_RESYNCING, "resyncing" },
{ RAID_OFFLINE, "offline" },
};
static const char *raid_state_name(enum raid_state state)
{
int i;
char *name = NULL;
for (i = 0; i < sizeof(raid_states)/sizeof(raid_states[0]); i++) {
if (raid_states[i].value == state) {
name = raid_states[i].name;
break;
}
}
return name;
}
#define raid_attr_show_internal(attr, fmt, var, code) \
static ssize_t raid_show_##attr(struct class_device *cdev, char *buf) \
{ \
struct raid_data *rd = class_get_devdata(cdev); \
code \
return snprintf(buf, 20, #fmt "\n", var); \
}
#define raid_attr_ro_states(attr, states, code) \
raid_attr_show_internal(attr, %s, name, \
const char *name; \
code \
name = raid_##states##_name(rd->attr); \
) \
static CLASS_DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
#define raid_attr_ro_internal(attr, code) \
raid_attr_show_internal(attr, %d, rd->attr, code) \
static CLASS_DEVICE_ATTR(attr, S_IRUGO, raid_show_##attr, NULL)
#define ATTR_CODE(attr) \
struct raid_internal *i = class_device_to_raid_internal(cdev); \
if (i->f->get_##attr) \
i->f->get_##attr(cdev->dev);
#define raid_attr_ro(attr) raid_attr_ro_internal(attr, )
#define raid_attr_ro_fn(attr) raid_attr_ro_internal(attr, ATTR_CODE(attr))
#define raid_attr_ro_state(attr) raid_attr_ro_states(attr, attr, ATTR_CODE(attr))
raid_attr_ro(level);
raid_attr_ro_fn(resync);
raid_attr_ro_state(state);
void raid_component_add(struct raid_template *r,struct device *raid_dev,
struct device *component_dev)
{
struct class_device *cdev =
attribute_container_find_class_device(&r->raid_attrs.ac,
raid_dev);
struct raid_component *rc;
struct raid_data *rd = class_get_devdata(cdev);
char buf[40];
rc = kmalloc(sizeof(*rc), GFP_KERNEL);
if (!rc)
return;
INIT_LIST_HEAD(&rc->node);
rc->dev = component_dev;
rc->num = rd->component_count++;
snprintf(buf, sizeof(buf), "component-%d", rc->num);
list_add_tail(&rc->node, &rd->component_list);
sysfs_create_link(&cdev->kobj, &component_dev->kobj, buf);
}
EXPORT_SYMBOL(raid_component_add);
struct raid_template *
raid_class_attach(struct raid_function_template *ft)
{
struct raid_internal *i = kmalloc(sizeof(struct raid_internal),
GFP_KERNEL);
int count = 0;
if (unlikely(!i))
return NULL;
memset(i, 0, sizeof(*i));
i->f = ft;
i->r.raid_attrs.ac.class = &raid_class.class;
i->r.raid_attrs.ac.match = raid_match;
i->r.raid_attrs.ac.attrs = &i->attrs[0];
attribute_container_register(&i->r.raid_attrs.ac);
i->attrs[count++] = &class_device_attr_level;
i->attrs[count++] = &class_device_attr_resync;
i->attrs[count++] = &class_device_attr_state;
i->attrs[count] = NULL;
BUG_ON(count > RAID_NUM_ATTRS);
return &i->r;
}
EXPORT_SYMBOL(raid_class_attach);
void
raid_class_release(struct raid_template *r)
{
struct raid_internal *i = to_raid_internal(r);
attribute_container_unregister(&i->r.raid_attrs.ac);
kfree(i);
}
EXPORT_SYMBOL(raid_class_release);
static __init int raid_init(void)
{
return transport_class_register(&raid_class);
}
static __exit void raid_exit(void)
{
transport_class_unregister(&raid_class);
}
MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("RAID device class");
MODULE_LICENSE("GPL");
module_init(raid_init);
module_exit(raid_exit);

View File

@@ -268,6 +268,7 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask)
} else } else
put_device(&dev->sdev_gendev); put_device(&dev->sdev_gendev);
cmd->jiffies_at_alloc = jiffies;
return cmd; return cmd;
} }
EXPORT_SYMBOL(scsi_get_command); EXPORT_SYMBOL(scsi_get_command);
@@ -627,7 +628,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
spin_lock_irqsave(host->host_lock, flags); spin_lock_irqsave(host->host_lock, flags);
scsi_cmd_get_serial(host, cmd); scsi_cmd_get_serial(host, cmd);
if (unlikely(test_bit(SHOST_CANCEL, &host->shost_state))) { if (unlikely(host->shost_state == SHOST_DEL)) {
cmd->result = (DID_NO_CONNECT << 16); cmd->result = (DID_NO_CONNECT << 16);
scsi_done(cmd); scsi_done(cmd);
} else { } else {
@@ -798,9 +799,23 @@ static void scsi_softirq(struct softirq_action *h)
while (!list_empty(&local_q)) { while (!list_empty(&local_q)) {
struct scsi_cmnd *cmd = list_entry(local_q.next, struct scsi_cmnd *cmd = list_entry(local_q.next,
struct scsi_cmnd, eh_entry); struct scsi_cmnd, eh_entry);
/* The longest time any command should be outstanding is the
* per command timeout multiplied by the number of retries.
*
* For a typical command, this is 2.5 minutes */
unsigned long wait_for
= cmd->allowed * cmd->timeout_per_command;
list_del_init(&cmd->eh_entry); list_del_init(&cmd->eh_entry);
disposition = scsi_decide_disposition(cmd); disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
dev_printk(KERN_ERR, &cmd->device->sdev_gendev,
"timing out command, waited %lus\n",
wait_for/HZ);
disposition = SUCCESS;
}
scsi_log_completion(cmd, disposition); scsi_log_completion(cmd, disposition);
switch (disposition) { switch (disposition) {
case SUCCESS: case SUCCESS:

View File

@@ -114,6 +114,7 @@ static struct {
{"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */ {"YAMAHA", "CRW6416S", "1.0c", BLIST_NOLUN}, /* locks up */
{"", "Scanner", "1.80", BLIST_NOLUN}, /* responds to all lun */
/* /*
* Other types of devices that have special flags. * Other types of devices that have special flags.
@@ -135,7 +136,7 @@ static struct {
{"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, {"COMPAQ", "MSA1000 VOLUME", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD},
{"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"COMPAQ", "HSV110", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN}, {"DDN", "SAN DataDirector", "*", BLIST_SPARSELUN},
{"DEC", "HSG80", NULL, BLIST_SPARSELUN | BLIST_NOSTARTONADD}, {"DEC", "HSG80", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"DELL", "PV660F", NULL, BLIST_SPARSELUN}, {"DELL", "PV660F", NULL, BLIST_SPARSELUN},
{"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN}, {"DELL", "PV660F PSEUDO", NULL, BLIST_SPARSELUN},
{"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */ {"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
@@ -191,6 +192,7 @@ static struct {
{"SGI", "RAID5", "*", BLIST_SPARSELUN}, {"SGI", "RAID5", "*", BLIST_SPARSELUN},
{"SGI", "TP9100", "*", BLIST_REPORTLUN2}, {"SGI", "TP9100", "*", BLIST_REPORTLUN2},
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */

View File

@@ -20,6 +20,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/delay.h> #include <linux/delay.h>
@@ -75,7 +76,7 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
scmd->eh_eflags |= eh_flag; scmd->eh_eflags |= eh_flag;
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
set_bit(SHOST_RECOVERY, &shost->shost_state); scsi_host_set_state(shost, SHOST_RECOVERY);
shost->host_failed++; shost->host_failed++;
scsi_eh_wakeup(shost); scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags); spin_unlock_irqrestore(shost->host_lock, flags);
@@ -115,7 +116,6 @@ void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
add_timer(&scmd->eh_timeout); add_timer(&scmd->eh_timeout);
} }
EXPORT_SYMBOL(scsi_add_timer);
/** /**
* scsi_delete_timer - Delete/cancel timer for a given function. * scsi_delete_timer - Delete/cancel timer for a given function.
@@ -143,7 +143,6 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
return rtn; return rtn;
} }
EXPORT_SYMBOL(scsi_delete_timer);
/** /**
* scsi_times_out - Timeout function for normal scsi commands. * scsi_times_out - Timeout function for normal scsi commands.
@@ -197,7 +196,8 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
{ {
int online; int online;
wait_event(sdev->host->host_wait, (!test_bit(SHOST_RECOVERY, &sdev->host->shost_state))); wait_event(sdev->host->host_wait, (sdev->host->shost_state !=
SHOST_RECOVERY));
online = scsi_device_online(sdev); online = scsi_device_online(sdev);
@@ -775,9 +775,11 @@ retry_tur:
__FUNCTION__, scmd, rtn)); __FUNCTION__, scmd, rtn));
if (rtn == SUCCESS) if (rtn == SUCCESS)
return 0; return 0;
else if (rtn == NEEDS_RETRY) else if (rtn == NEEDS_RETRY) {
if (retry_cnt--) if (retry_cnt--)
goto retry_tur; goto retry_tur;
return 0;
}
return 1; return 1;
} }
@@ -1458,7 +1460,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
__FUNCTION__)); __FUNCTION__));
clear_bit(SHOST_RECOVERY, &shost->shost_state); scsi_host_set_state(shost, SHOST_RUNNING);
wake_up(&shost->host_wait); wake_up(&shost->host_wait);
@@ -1582,16 +1584,8 @@ int scsi_error_handler(void *data)
int rtn; int rtn;
DECLARE_MUTEX_LOCKED(sem); DECLARE_MUTEX_LOCKED(sem);
/*
* Flush resources
*/
daemonize("scsi_eh_%d", shost->host_no);
current->flags |= PF_NOFREEZE; current->flags |= PF_NOFREEZE;
shost->eh_wait = &sem; shost->eh_wait = &sem;
shost->ehandler = current;
/* /*
* Wake up the thread that created us. * Wake up the thread that created us.
@@ -1599,8 +1593,6 @@ int scsi_error_handler(void *data)
SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of" SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of"
" scsi_eh_%d\n",shost->host_no)); " scsi_eh_%d\n",shost->host_no));
complete(shost->eh_notify);
while (1) { while (1) {
/* /*
* If we get a signal, it means we are supposed to go * If we get a signal, it means we are supposed to go
@@ -1621,7 +1613,7 @@ int scsi_error_handler(void *data)
* semaphores isn't unreasonable. * semaphores isn't unreasonable.
*/ */
down_interruptible(&sem); down_interruptible(&sem);
if (shost->eh_kill) if (kthread_should_stop())
break; break;
SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
@@ -1660,22 +1652,6 @@ int scsi_error_handler(void *data)
* Make sure that nobody tries to wake us up again. * Make sure that nobody tries to wake us up again.
*/ */
shost->eh_wait = NULL; shost->eh_wait = NULL;
/*
* Knock this down too. From this point on, the host is flying
* without a pilot. If this is because the module is being unloaded,
* that's fine. If the user sent a signal to this thing, we are
* potentially in real danger.
*/
shost->eh_active = 0;
shost->ehandler = NULL;
/*
* If anyone is waiting for us to exit (i.e. someone trying to unload
* a driver), then wake up that process to let them know we are on
* the way out the door.
*/
complete_and_exit(shost->eh_notify, 0);
return 0; return 0;
} }
@@ -1846,12 +1822,16 @@ EXPORT_SYMBOL(scsi_reset_provider);
int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
struct scsi_sense_hdr *sshdr) struct scsi_sense_hdr *sshdr)
{ {
if (!sense_buffer || !sb_len || (sense_buffer[0] & 0x70) != 0x70) if (!sense_buffer || !sb_len)
return 0; return 0;
memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
sshdr->response_code = (sense_buffer[0] & 0x7f); sshdr->response_code = (sense_buffer[0] & 0x7f);
if (!scsi_sense_valid(sshdr))
return 0;
if (sshdr->response_code >= 0x72) { if (sshdr->response_code >= 0x72) {
/* /*
* descriptor format * descriptor format

View File

@@ -30,20 +30,20 @@
#define MAX_BUF PAGE_SIZE #define MAX_BUF PAGE_SIZE
/* /**
* If we are told to probe a host, we will return 0 if the host is not * ioctl_probe -- return host identification
* present, 1 if the host is present, and will return an identifying * @host: host to identify
* string at *arg, if arg is non null, filling to the length stored at * @buffer: userspace buffer for identification
* (int *) arg *
* Return an identifying string at @buffer, if @buffer is non-NULL, filling
* to the length stored at * (int *) @buffer.
*/ */
static int ioctl_probe(struct Scsi_Host *host, void __user *buffer) static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
{ {
unsigned int len, slen; unsigned int len, slen;
const char *string; const char *string;
int temp = host->hostt->present;
if (temp && buffer) { if (buffer) {
if (get_user(len, (unsigned int __user *) buffer)) if (get_user(len, (unsigned int __user *) buffer))
return -EFAULT; return -EFAULT;
@@ -59,7 +59,7 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
return -EFAULT; return -EFAULT;
} }
} }
return temp; return 1;
} }
/* /*
@@ -88,25 +88,18 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer)
static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
int timeout, int retries) int timeout, int retries)
{ {
struct scsi_request *sreq;
int result; int result;
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr sshdr;
SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd)); SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd));
sreq = scsi_allocate_request(sdev, GFP_KERNEL); result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
if (!sreq) { &sshdr, timeout, retries);
printk(KERN_WARNING "SCSI internal ioctl failed, no memory\n");
return -ENOMEM;
}
sreq->sr_data_direction = DMA_NONE; SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result));
scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", sreq->sr_result)); if ((driver_byte(result) & DRIVER_SENSE) &&
(scsi_sense_valid(&sshdr))) {
if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) &&
(scsi_request_normalize_sense(sreq, &sshdr))) {
switch (sshdr.sense_key) { switch (sshdr.sense_key) {
case ILLEGAL_REQUEST: case ILLEGAL_REQUEST:
if (cmd[0] == ALLOW_MEDIUM_REMOVAL) if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
@@ -125,7 +118,7 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
case UNIT_ATTENTION: case UNIT_ATTENTION:
if (sdev->removable) { if (sdev->removable) {
sdev->changed = 1; sdev->changed = 1;
sreq->sr_result = 0; /* This is no longer considered an error */ result = 0; /* This is no longer considered an error */
break; break;
} }
default: /* Fall through for non-removable media */ default: /* Fall through for non-removable media */
@@ -135,15 +128,13 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
sdev->channel, sdev->channel,
sdev->id, sdev->id,
sdev->lun, sdev->lun,
sreq->sr_result); result);
scsi_print_req_sense(" ", sreq); scsi_print_sense_hdr(" ", &sshdr);
break; break;
} }
} }
result = sreq->sr_result;
SCSI_LOG_IOCTL(2, printk("IOCTL Releasing command\n")); SCSI_LOG_IOCTL(2, printk("IOCTL Releasing command\n"));
scsi_release_request(sreq);
return result; return result;
} }
@@ -208,8 +199,8 @@ int scsi_ioctl_send_command(struct scsi_device *sdev,
{ {
char *buf; char *buf;
unsigned char cmd[MAX_COMMAND_SIZE]; unsigned char cmd[MAX_COMMAND_SIZE];
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
char __user *cmd_in; char __user *cmd_in;
struct scsi_request *sreq;
unsigned char opcode; unsigned char opcode;
unsigned int inlen, outlen, cmdlen; unsigned int inlen, outlen, cmdlen;
unsigned int needed, buf_needed; unsigned int needed, buf_needed;
@@ -321,31 +312,23 @@ int scsi_ioctl_send_command(struct scsi_device *sdev,
break; break;
} }
sreq = scsi_allocate_request(sdev, GFP_KERNEL); result = scsi_execute(sdev, cmd, data_direction, buf, needed,
if (!sreq) { sense, timeout, retries, 0);
result = -EINTR;
goto error;
}
sreq->sr_data_direction = data_direction;
scsi_wait_req(sreq, cmd, buf, needed, timeout, retries);
/* /*
* If there was an error condition, pass the info back to the user. * If there was an error condition, pass the info back to the user.
*/ */
result = sreq->sr_result;
if (result) { if (result) {
int sb_len = sizeof(sreq->sr_sense_buffer); int sb_len = sizeof(*sense);
sb_len = (sb_len > OMAX_SB_LEN) ? OMAX_SB_LEN : sb_len; sb_len = (sb_len > OMAX_SB_LEN) ? OMAX_SB_LEN : sb_len;
if (copy_to_user(cmd_in, sreq->sr_sense_buffer, sb_len)) if (copy_to_user(cmd_in, sense, sb_len))
result = -EFAULT; result = -EFAULT;
} else { } else {
if (copy_to_user(cmd_in, buf, outlen)) if (copy_to_user(cmd_in, buf, outlen))
result = -EFAULT; result = -EFAULT;
} }
scsi_release_request(sreq);
error: error:
kfree(buf); kfree(buf);
return result; return result;
@@ -475,8 +458,7 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
* error processing, as long as the device was opened * error processing, as long as the device was opened
* non-blocking */ * non-blocking */
if (filp && filp->f_flags & O_NONBLOCK) { if (filp && filp->f_flags & O_NONBLOCK) {
if (test_bit(SHOST_RECOVERY, if (sdev->host->shost_state == SHOST_RECOVERY)
&sdev->host->shost_state))
return -ENODEV; return -ENODEV;
} else if (!scsi_block_when_processing_errors(sdev)) } else if (!scsi_block_when_processing_errors(sdev))
return -ENODEV; return -ENODEV;

View File

@@ -232,23 +232,6 @@ void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
} }
EXPORT_SYMBOL(scsi_do_req); EXPORT_SYMBOL(scsi_do_req);
static void scsi_wait_done(struct scsi_cmnd *cmd)
{
struct request *req = cmd->request;
struct request_queue *q = cmd->device->request_queue;
unsigned long flags;
req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
spin_lock_irqsave(q->queue_lock, flags);
if (blk_rq_tagged(req))
blk_queue_end_tag(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
if (req->waiting)
complete(req->waiting);
}
/* This is the end routine we get to if a command was never attached /* This is the end routine we get to if a command was never attached
* to the request. Simply complete the request without changing * to the request. Simply complete the request without changing
* rq_status; this will cause a DRIVER_ERROR. */ * rq_status; this will cause a DRIVER_ERROR. */
@@ -263,21 +246,114 @@ void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
unsigned bufflen, int timeout, int retries) unsigned bufflen, int timeout, int retries)
{ {
DECLARE_COMPLETION(wait); DECLARE_COMPLETION(wait);
int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
sreq->sr_request->waiting = &wait; struct request *req;
sreq->sr_request->rq_status = RQ_SCSI_BUSY;
sreq->sr_request->end_io = scsi_wait_req_end_io; req = blk_get_request(sreq->sr_device->request_queue, write,
scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done, __GFP_WAIT);
timeout, retries); if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
buffer, bufflen, __GFP_WAIT)) {
sreq->sr_result = DRIVER_ERROR << 24;
blk_put_request(req);
return;
}
req->flags |= REQ_NOMERGE;
req->waiting = &wait;
req->end_io = scsi_wait_req_end_io;
req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
req->sense = sreq->sr_sense_buffer;
req->sense_len = 0;
memcpy(req->cmd, cmnd, req->cmd_len);
req->timeout = timeout;
req->flags |= REQ_BLOCK_PC;
req->rq_disk = NULL;
blk_insert_request(sreq->sr_device->request_queue, req,
sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
wait_for_completion(&wait); wait_for_completion(&wait);
sreq->sr_request->waiting = NULL; sreq->sr_request->waiting = NULL;
if (sreq->sr_request->rq_status != RQ_SCSI_DONE) sreq->sr_result = req->errors;
if (req->errors)
sreq->sr_result |= (DRIVER_ERROR << 24); sreq->sr_result |= (DRIVER_ERROR << 24);
__scsi_release_request(sreq); blk_put_request(req);
} }
EXPORT_SYMBOL(scsi_wait_req); EXPORT_SYMBOL(scsi_wait_req);
/**
* scsi_execute - insert request and wait for the result
* @sdev: scsi device
* @cmd: scsi command
* @data_direction: data direction
* @buffer: data buffer
* @bufflen: len of buffer
* @sense: optional sense buffer
* @timeout: request timeout in seconds
* @retries: number of times to retry request
* @flags: or into request flags;
*
* returns the req->errors value which is the the scsi_cmnd result
* field.
**/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, int timeout, int retries, int flags)
{
struct request *req;
int write = (data_direction == DMA_TO_DEVICE);
int ret = DRIVER_ERROR << 24;
req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_WAIT))
goto out;
req->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(req->cmd, cmd, req->cmd_len);
req->sense = sense;
req->sense_len = 0;
req->timeout = timeout;
req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
/*
* head injection *required* here otherwise quiesce won't work
*/
blk_execute_rq(req->q, NULL, req, 1);
ret = req->errors;
out:
blk_put_request(req);
return ret;
}
EXPORT_SYMBOL(scsi_execute);
int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
struct scsi_sense_hdr *sshdr, int timeout, int retries)
{
char *sense = NULL;
int result;
if (sshdr) {
sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
if (!sense)
return DRIVER_ERROR << 24;
memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
}
result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
sense, timeout, retries, 0);
if (sshdr)
scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
kfree(sense);
return result;
}
EXPORT_SYMBOL(scsi_execute_req);
/* /*
* Function: scsi_init_cmd_errh() * Function: scsi_init_cmd_errh()
* *
@@ -348,7 +424,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
spin_lock_irqsave(shost->host_lock, flags); spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--; shost->host_busy--;
if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) && if (unlikely((shost->shost_state == SHOST_RECOVERY) &&
shost->host_failed)) shost->host_failed))
scsi_eh_wakeup(shost); scsi_eh_wakeup(shost);
spin_unlock(shost->host_lock); spin_unlock(shost->host_lock);
@@ -851,17 +927,20 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
scsi_requeue_command(q, cmd); scsi_requeue_command(q, cmd);
return; return;
} }
printk(KERN_INFO "Device %s not ready.\n", if (!(req->flags & REQ_QUIET))
req->rq_disk ? req->rq_disk->disk_name : ""); dev_printk(KERN_INFO,
&cmd->device->sdev_gendev,
"Device not ready.\n");
cmd = scsi_end_request(cmd, 0, this_count, 1); cmd = scsi_end_request(cmd, 0, this_count, 1);
return; return;
case VOLUME_OVERFLOW: case VOLUME_OVERFLOW:
printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ", if (!(req->flags & REQ_QUIET)) {
cmd->device->host->host_no, dev_printk(KERN_INFO,
(int)cmd->device->channel, &cmd->device->sdev_gendev,
(int)cmd->device->id, (int)cmd->device->lun); "Volume overflow, CDB: ");
__scsi_print_command(cmd->data_cmnd); __scsi_print_command(cmd->data_cmnd);
scsi_print_sense("", cmd); scsi_print_sense("", cmd);
}
cmd = scsi_end_request(cmd, 0, block_bytes, 1); cmd = scsi_end_request(cmd, 0, block_bytes, 1);
return; return;
default: default:
@@ -878,14 +957,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
return; return;
} }
if (result) { if (result) {
printk(KERN_INFO "SCSI error : <%d %d %d %d> return code " if (!(req->flags & REQ_QUIET)) {
"= 0x%x\n", cmd->device->host->host_no, dev_printk(KERN_INFO, &cmd->device->sdev_gendev,
cmd->device->channel, "SCSI error: return code = 0x%x\n", result);
cmd->device->id,
cmd->device->lun, result);
if (driver_byte(result) & DRIVER_SENSE) if (driver_byte(result) & DRIVER_SENSE)
scsi_print_sense("", cmd); scsi_print_sense("", cmd);
}
/* /*
* Mark a single buffer as not uptodate. Queue the remainder. * Mark a single buffer as not uptodate. Queue the remainder.
* We sometimes get this cruft in the event that a medium error * We sometimes get this cruft in the event that a medium error
@@ -1020,6 +1098,12 @@ static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static void scsi_generic_done(struct scsi_cmnd *cmd)
{
BUG_ON(!blk_pc_request(cmd->request));
scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
}
static int scsi_prep_fn(struct request_queue *q, struct request *req) static int scsi_prep_fn(struct request_queue *q, struct request *req)
{ {
struct scsi_device *sdev = q->queuedata; struct scsi_device *sdev = q->queuedata;
@@ -1061,7 +1145,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
* these two cases differently. We differentiate by looking * these two cases differently. We differentiate by looking
* at request->cmd, as this tells us the real story. * at request->cmd, as this tells us the real story.
*/ */
if (req->flags & REQ_SPECIAL) { if (req->flags & REQ_SPECIAL && req->special) {
struct scsi_request *sreq = req->special; struct scsi_request *sreq = req->special;
if (sreq->sr_magic == SCSI_REQ_MAGIC) { if (sreq->sr_magic == SCSI_REQ_MAGIC) {
@@ -1073,7 +1157,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
cmd = req->special; cmd = req->special;
} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
if(unlikely(specials_only)) { if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
if(specials_only == SDEV_QUIESCE || if(specials_only == SDEV_QUIESCE ||
specials_only == SDEV_BLOCK) specials_only == SDEV_BLOCK)
return BLKPREP_DEFER; return BLKPREP_DEFER;
@@ -1142,11 +1226,26 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
/* /*
* Initialize the actual SCSI command for this request. * Initialize the actual SCSI command for this request.
*/ */
drv = *(struct scsi_driver **)req->rq_disk->private_data; if (req->rq_disk) {
if (unlikely(!drv->init_command(cmd))) { drv = *(struct scsi_driver **)req->rq_disk->private_data;
scsi_release_buffers(cmd); if (unlikely(!drv->init_command(cmd))) {
scsi_put_command(cmd); scsi_release_buffers(cmd);
return BLKPREP_KILL; scsi_put_command(cmd);
return BLKPREP_KILL;
}
} else {
memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
if (rq_data_dir(req) == WRITE)
cmd->sc_data_direction = DMA_TO_DEVICE;
else if (req->data_len)
cmd->sc_data_direction = DMA_FROM_DEVICE;
else
cmd->sc_data_direction = DMA_NONE;
cmd->transfersize = req->data_len;
cmd->allowed = 3;
cmd->timeout_per_command = req->timeout;
cmd->done = scsi_generic_done;
} }
} }
@@ -1207,7 +1306,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost, struct Scsi_Host *shost,
struct scsi_device *sdev) struct scsi_device *sdev)
{ {
if (test_bit(SHOST_RECOVERY, &shost->shost_state)) if (shost->shost_state == SHOST_RECOVERY)
return 0; return 0;
if (shost->host_busy == 0 && shost->host_blocked) { if (shost->host_busy == 0 && shost->host_blocked) {
/* /*
@@ -1539,9 +1638,9 @@ void scsi_exit_queue(void)
} }
} }
/** /**
* __scsi_mode_sense - issue a mode sense, falling back from 10 to * scsi_mode_sense - issue a mode sense, falling back from 10 to
* six bytes if necessary. * six bytes if necessary.
* @sreq: SCSI request to fill in with the MODE_SENSE * @sdev: SCSI device to be queried
* @dbd: set if mode sense will allow block descriptors to be returned * @dbd: set if mode sense will allow block descriptors to be returned
* @modepage: mode page being requested * @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes) * @buffer: request buffer (may not be smaller than eight bytes)
@@ -1549,26 +1648,34 @@ void scsi_exit_queue(void)
* @timeout: command timeout * @timeout: command timeout
* @retries: number of retries before failing * @retries: number of retries before failing
* @data: returns a structure abstracting the mode header data * @data: returns a structure abstracting the mode header data
* @sense: place to put sense data (or NULL if no sense to be collected).
* must be SCSI_SENSE_BUFFERSIZE big.
* *
* Returns zero if unsuccessful, or the header offset (either 4 * Returns zero if unsuccessful, or the header offset (either 4
* or 8 depending on whether a six or ten byte command was * or 8 depending on whether a six or ten byte command was
* issued) if successful. * issued) if successful.
**/ **/
int int
__scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage, scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
unsigned char *buffer, int len, int timeout, int retries, unsigned char *buffer, int len, int timeout, int retries,
struct scsi_mode_data *data) { struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
unsigned char cmd[12]; unsigned char cmd[12];
int use_10_for_ms; int use_10_for_ms;
int header_length; int header_length;
int result;
struct scsi_sense_hdr my_sshdr;
memset(data, 0, sizeof(*data)); memset(data, 0, sizeof(*data));
memset(&cmd[0], 0, 12); memset(&cmd[0], 0, 12);
cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
cmd[2] = modepage; cmd[2] = modepage;
/* caller might not be interested in sense, but we need it */
if (!sshdr)
sshdr = &my_sshdr;
retry: retry:
use_10_for_ms = sreq->sr_device->use_10_for_ms; use_10_for_ms = sdev->use_10_for_ms;
if (use_10_for_ms) { if (use_10_for_ms) {
if (len < 8) if (len < 8)
@@ -1586,36 +1693,31 @@ __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
header_length = 4; header_length = 4;
} }
sreq->sr_cmd_len = 0;
memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
sreq->sr_data_direction = DMA_FROM_DEVICE;
memset(buffer, 0, len); memset(buffer, 0, len);
scsi_wait_req(sreq, cmd, buffer, len, timeout, retries); result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
sshdr, timeout, retries);
/* This code looks awful: what it's doing is making sure an /* This code looks awful: what it's doing is making sure an
* ILLEGAL REQUEST sense return identifies the actual command * ILLEGAL REQUEST sense return identifies the actual command
* byte as the problem. MODE_SENSE commands can return * byte as the problem. MODE_SENSE commands can return
* ILLEGAL REQUEST if the code page isn't supported */ * ILLEGAL REQUEST if the code page isn't supported */
if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) && if (use_10_for_ms && !scsi_status_is_good(result) &&
(driver_byte(sreq->sr_result) & DRIVER_SENSE)) { (driver_byte(result) & DRIVER_SENSE)) {
struct scsi_sense_hdr sshdr; if (scsi_sense_valid(sshdr)) {
if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
if (scsi_request_normalize_sense(sreq, &sshdr)) { (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
(sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
/* /*
* Invalid command operation code * Invalid command operation code
*/ */
sreq->sr_device->use_10_for_ms = 0; sdev->use_10_for_ms = 0;
goto retry; goto retry;
} }
} }
} }
if(scsi_status_is_good(sreq->sr_result)) { if(scsi_status_is_good(result)) {
data->header_length = header_length; data->header_length = header_length;
if(use_10_for_ms) { if(use_10_for_ms) {
data->length = buffer[0]*256 + buffer[1] + 2; data->length = buffer[0]*256 + buffer[1] + 2;
@@ -1632,73 +1734,31 @@ __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
} }
} }
return sreq->sr_result; return result;
}
EXPORT_SYMBOL(__scsi_mode_sense);
/**
* scsi_mode_sense - issue a mode sense, falling back from 10 to
* six bytes if necessary.
* @sdev: scsi device to send command to.
* @dbd: set if mode sense will disable block descriptors in the return
* @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes)
* @len: length of request buffer.
* @timeout: command timeout
* @retries: number of retries before failing
*
* Returns zero if unsuccessful, or the header offset (either 4
* or 8 depending on whether a six or ten byte command was
* issued) if successful.
**/
int
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
unsigned char *buffer, int len, int timeout, int retries,
struct scsi_mode_data *data)
{
struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
int ret;
if (!sreq)
return -1;
ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
timeout, retries, data);
scsi_release_request(sreq);
return ret;
} }
EXPORT_SYMBOL(scsi_mode_sense); EXPORT_SYMBOL(scsi_mode_sense);
int int
scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
{ {
struct scsi_request *sreq;
char cmd[] = { char cmd[] = {
TEST_UNIT_READY, 0, 0, 0, 0, 0, TEST_UNIT_READY, 0, 0, 0, 0, 0,
}; };
struct scsi_sense_hdr sshdr;
int result; int result;
sreq = scsi_allocate_request(sdev, GFP_KERNEL); result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
if (!sreq) timeout, retries);
return -ENOMEM;
sreq->sr_data_direction = DMA_NONE; if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) { if ((scsi_sense_valid(&sshdr)) &&
struct scsi_sense_hdr sshdr;
if ((scsi_request_normalize_sense(sreq, &sshdr)) &&
((sshdr.sense_key == UNIT_ATTENTION) || ((sshdr.sense_key == UNIT_ATTENTION) ||
(sshdr.sense_key == NOT_READY))) { (sshdr.sense_key == NOT_READY))) {
sdev->changed = 1; sdev->changed = 1;
sreq->sr_result = 0; result = 0;
} }
} }
result = sreq->sr_result;
scsi_release_request(sreq);
return result; return result;
} }
EXPORT_SYMBOL(scsi_test_unit_ready); EXPORT_SYMBOL(scsi_test_unit_ready);

View File

@@ -63,6 +63,9 @@ extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void); extern void scsi_exit_devinfo(void);
/* scsi_error.c */ /* scsi_error.c */
extern void scsi_add_timer(struct scsi_cmnd *, int,
void (*)(struct scsi_cmnd *));
extern int scsi_delete_timer(struct scsi_cmnd *);
extern void scsi_times_out(struct scsi_cmnd *cmd); extern void scsi_times_out(struct scsi_cmnd *cmd);
extern int scsi_error_handler(void *host); extern int scsi_error_handler(void *host);
extern int scsi_decide_disposition(struct scsi_cmnd *cmd); extern int scsi_decide_disposition(struct scsi_cmnd *cmd);

View File

@@ -111,15 +111,14 @@ MODULE_PARM_DESC(inq_timeout,
/** /**
* scsi_unlock_floptical - unlock device via a special MODE SENSE command * scsi_unlock_floptical - unlock device via a special MODE SENSE command
* @sreq: used to send the command * @sdev: scsi device to send command to
* @result: area to store the result of the MODE SENSE * @result: area to store the result of the MODE SENSE
* *
* Description: * Description:
* Send a vendor specific MODE SENSE (not a MODE SELECT) command using * Send a vendor specific MODE SENSE (not a MODE SELECT) command.
* @sreq to unlock a device, storing the (unused) results into result.
* Called for BLIST_KEY devices. * Called for BLIST_KEY devices.
**/ **/
static void scsi_unlock_floptical(struct scsi_request *sreq, static void scsi_unlock_floptical(struct scsi_device *sdev,
unsigned char *result) unsigned char *result)
{ {
unsigned char scsi_cmd[MAX_COMMAND_SIZE]; unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -129,11 +128,10 @@ static void scsi_unlock_floptical(struct scsi_request *sreq,
scsi_cmd[1] = 0; scsi_cmd[1] = 0;
scsi_cmd[2] = 0x2e; scsi_cmd[2] = 0x2e;
scsi_cmd[3] = 0; scsi_cmd[3] = 0;
scsi_cmd[4] = 0x2a; /* size */ scsi_cmd[4] = 0x2a; /* size */
scsi_cmd[5] = 0; scsi_cmd[5] = 0;
sreq->sr_cmd_len = 0; scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
sreq->sr_data_direction = DMA_FROM_DEVICE; SCSI_TIMEOUT, 3);
scsi_wait_req(sreq, scsi_cmd, result, 0x2a /* size */, SCSI_TIMEOUT, 3);
} }
/** /**
@@ -433,26 +431,25 @@ void scsi_target_reap(struct scsi_target *starget)
/** /**
* scsi_probe_lun - probe a single LUN using a SCSI INQUIRY * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
* @sreq: used to send the INQUIRY * @sdev: scsi_device to probe
* @inq_result: area to store the INQUIRY result * @inq_result: area to store the INQUIRY result
* @result_len: len of inq_result
* @bflags: store any bflags found here * @bflags: store any bflags found here
* *
* Description: * Description:
* Probe the lun associated with @sreq using a standard SCSI INQUIRY; * Probe the lun associated with @req using a standard SCSI INQUIRY;
* *
* If the INQUIRY is successful, sreq->sr_result is zero and: the * If the INQUIRY is successful, zero is returned and the
* INQUIRY data is in @inq_result; the scsi_level and INQUIRY length * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
* are copied to the Scsi_Device at @sreq->sr_device (sdev); * are copied to the Scsi_Device any flags value is stored in *@bflags.
* any flags value is stored in *@bflags.
**/ **/
static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result, static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
int *bflags) int result_len, int *bflags)
{ {
struct scsi_device *sdev = sreq->sr_device; /* a bit ugly */
unsigned char scsi_cmd[MAX_COMMAND_SIZE]; unsigned char scsi_cmd[MAX_COMMAND_SIZE];
int first_inquiry_len, try_inquiry_len, next_inquiry_len; int first_inquiry_len, try_inquiry_len, next_inquiry_len;
int response_len = 0; int response_len = 0;
int pass, count; int pass, count, result;
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr sshdr;
*bflags = 0; *bflags = 0;
@@ -475,28 +472,26 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
memset(scsi_cmd, 0, 6); memset(scsi_cmd, 0, 6);
scsi_cmd[0] = INQUIRY; scsi_cmd[0] = INQUIRY;
scsi_cmd[4] = (unsigned char) try_inquiry_len; scsi_cmd[4] = (unsigned char) try_inquiry_len;
sreq->sr_cmd_len = 0;
sreq->sr_data_direction = DMA_FROM_DEVICE;
memset(inq_result, 0, try_inquiry_len); memset(inq_result, 0, try_inquiry_len);
scsi_wait_req(sreq, (void *) scsi_cmd, (void *) inq_result,
try_inquiry_len, result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
HZ/2 + HZ*scsi_inq_timeout, 3); inq_result, try_inquiry_len, &sshdr,
HZ / 2 + HZ * scsi_inq_timeout, 3);
SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s " SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s "
"with code 0x%x\n", "with code 0x%x\n",
sreq->sr_result ? "failed" : "successful", result ? "failed" : "successful", result));
sreq->sr_result));
if (sreq->sr_result) { if (result) {
/* /*
* not-ready to ready transition [asc/ascq=0x28/0x0] * not-ready to ready transition [asc/ascq=0x28/0x0]
* or power-on, reset [asc/ascq=0x29/0x0], continue. * or power-on, reset [asc/ascq=0x29/0x0], continue.
* INQUIRY should not yield UNIT_ATTENTION * INQUIRY should not yield UNIT_ATTENTION
* but many buggy devices do so anyway. * but many buggy devices do so anyway.
*/ */
if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && if ((driver_byte(result) & DRIVER_SENSE) &&
scsi_request_normalize_sense(sreq, &sshdr)) { scsi_sense_valid(&sshdr)) {
if ((sshdr.sense_key == UNIT_ATTENTION) && if ((sshdr.sense_key == UNIT_ATTENTION) &&
((sshdr.asc == 0x28) || ((sshdr.asc == 0x28) ||
(sshdr.asc == 0x29)) && (sshdr.asc == 0x29)) &&
@@ -507,7 +502,7 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
break; break;
} }
if (sreq->sr_result == 0) { if (result == 0) {
response_len = (unsigned char) inq_result[4] + 5; response_len = (unsigned char) inq_result[4] + 5;
if (response_len > 255) if (response_len > 255)
response_len = first_inquiry_len; /* sanity */ response_len = first_inquiry_len; /* sanity */
@@ -556,8 +551,8 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
/* If the last transfer attempt got an error, assume the /* If the last transfer attempt got an error, assume the
* peripheral doesn't exist or is dead. */ * peripheral doesn't exist or is dead. */
if (sreq->sr_result) if (result)
return; return -EIO;
/* Don't report any more data than the device says is valid */ /* Don't report any more data than the device says is valid */
sdev->inquiry_len = min(try_inquiry_len, response_len); sdev->inquiry_len = min(try_inquiry_len, response_len);
@@ -593,7 +588,7 @@ static void scsi_probe_lun(struct scsi_request *sreq, char *inq_result,
(sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
sdev->scsi_level++; sdev->scsi_level++;
return; return 0;
} }
/** /**
@@ -800,9 +795,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
void *hostdata) void *hostdata)
{ {
struct scsi_device *sdev; struct scsi_device *sdev;
struct scsi_request *sreq;
unsigned char *result; unsigned char *result;
int bflags, res = SCSI_SCAN_NO_RESPONSE; int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
/* /*
@@ -831,16 +825,13 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
sdev = scsi_alloc_sdev(starget, lun, hostdata); sdev = scsi_alloc_sdev(starget, lun, hostdata);
if (!sdev) if (!sdev)
goto out; goto out;
sreq = scsi_allocate_request(sdev, GFP_ATOMIC);
if (!sreq) result = kmalloc(result_len, GFP_ATOMIC |
goto out_free_sdev;
result = kmalloc(256, GFP_ATOMIC |
((shost->unchecked_isa_dma) ? __GFP_DMA : 0)); ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
if (!result) if (!result)
goto out_free_sreq; goto out_free_sdev;
scsi_probe_lun(sreq, result, &bflags); if (scsi_probe_lun(sdev, result, result_len, &bflags))
if (sreq->sr_result)
goto out_free_result; goto out_free_result;
/* /*
@@ -868,7 +859,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
if (res == SCSI_SCAN_LUN_PRESENT) { if (res == SCSI_SCAN_LUN_PRESENT) {
if (bflags & BLIST_KEY) { if (bflags & BLIST_KEY) {
sdev->lockable = 0; sdev->lockable = 0;
scsi_unlock_floptical(sreq, result); scsi_unlock_floptical(sdev, result);
} }
if (bflagsp) if (bflagsp)
*bflagsp = bflags; *bflagsp = bflags;
@@ -876,8 +867,6 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
out_free_result: out_free_result:
kfree(result); kfree(result);
out_free_sreq:
scsi_release_request(sreq);
out_free_sdev: out_free_sdev:
if (res == SCSI_SCAN_LUN_PRESENT) { if (res == SCSI_SCAN_LUN_PRESENT) {
if (sdevp) { if (sdevp) {
@@ -1070,8 +1059,8 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
unsigned int lun; unsigned int lun;
unsigned int num_luns; unsigned int num_luns;
unsigned int retries; unsigned int retries;
int result;
struct scsi_lun *lunp, *lun_data; struct scsi_lun *lunp, *lun_data;
struct scsi_request *sreq;
u8 *data; u8 *data;
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr sshdr;
struct scsi_target *starget = scsi_target(sdev); struct scsi_target *starget = scsi_target(sdev);
@@ -1089,10 +1078,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
if (bflags & BLIST_NOLUN) if (bflags & BLIST_NOLUN)
return 0; return 0;
sreq = scsi_allocate_request(sdev, GFP_ATOMIC);
if (!sreq)
goto out;
sprintf(devname, "host %d channel %d id %d", sprintf(devname, "host %d channel %d id %d",
sdev->host->host_no, sdev->channel, sdev->id); sdev->host->host_no, sdev->channel, sdev->id);
@@ -1110,7 +1095,7 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
lun_data = kmalloc(length, GFP_ATOMIC | lun_data = kmalloc(length, GFP_ATOMIC |
(sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
if (!lun_data) if (!lun_data)
goto out_release_request; goto out;
scsi_cmd[0] = REPORT_LUNS; scsi_cmd[0] = REPORT_LUNS;
@@ -1129,8 +1114,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
scsi_cmd[10] = 0; /* reserved */ scsi_cmd[10] = 0; /* reserved */
scsi_cmd[11] = 0; /* control */ scsi_cmd[11] = 0; /* control */
sreq->sr_cmd_len = 0;
sreq->sr_data_direction = DMA_FROM_DEVICE;
/* /*
* We can get a UNIT ATTENTION, for example a power on/reset, so * We can get a UNIT ATTENTION, for example a power on/reset, so
@@ -1146,29 +1129,29 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: Sending" SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: Sending"
" REPORT LUNS to %s (try %d)\n", devname, " REPORT LUNS to %s (try %d)\n", devname,
retries)); retries));
scsi_wait_req(sreq, scsi_cmd, lun_data, length,
SCSI_TIMEOUT + 4*HZ, 3); result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
lun_data, length, &sshdr,
SCSI_TIMEOUT + 4 * HZ, 3);
SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS" SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS"
" %s (try %d) result 0x%x\n", sreq->sr_result " %s (try %d) result 0x%x\n", result
? "failed" : "successful", retries, ? "failed" : "successful", retries, result));
sreq->sr_result)); if (result == 0)
if (sreq->sr_result == 0)
break; break;
else if (scsi_request_normalize_sense(sreq, &sshdr)) { else if (scsi_sense_valid(&sshdr)) {
if (sshdr.sense_key != UNIT_ATTENTION) if (sshdr.sense_key != UNIT_ATTENTION)
break; break;
} }
} }
if (sreq->sr_result) { if (result) {
/* /*
* The device probably does not support a REPORT LUN command * The device probably does not support a REPORT LUN command
*/ */
kfree(lun_data); kfree(lun_data);
scsi_release_request(sreq);
return 1; return 1;
} }
scsi_release_request(sreq);
/* /*
* Get the length from the first four bytes of lun_data. * Get the length from the first four bytes of lun_data.
@@ -1242,8 +1225,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
kfree(lun_data); kfree(lun_data);
return 0; return 0;
out_release_request:
scsi_release_request(sreq);
out: out:
/* /*
* We are out of memory, don't try scanning any further. * We are out of memory, don't try scanning any further.
@@ -1265,9 +1246,12 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
get_device(&starget->dev); get_device(&starget->dev);
down(&shost->scan_mutex); down(&shost->scan_mutex);
res = scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); if (scsi_host_scan_allowed(shost)) {
if (res != SCSI_SCAN_LUN_PRESENT) res = scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1,
sdev = ERR_PTR(-ENODEV); hostdata);
if (res != SCSI_SCAN_LUN_PRESENT)
sdev = ERR_PTR(-ENODEV);
}
up(&shost->scan_mutex); up(&shost->scan_mutex);
scsi_target_reap(starget); scsi_target_reap(starget);
put_device(&starget->dev); put_device(&starget->dev);
@@ -1417,11 +1401,15 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
return -EINVAL; return -EINVAL;
down(&shost->scan_mutex); down(&shost->scan_mutex);
if (channel == SCAN_WILD_CARD) if (scsi_host_scan_allowed(shost)) {
for (channel = 0; channel <= shost->max_channel; channel++) if (channel == SCAN_WILD_CARD)
for (channel = 0; channel <= shost->max_channel;
channel++)
scsi_scan_channel(shost, channel, id, lun,
rescan);
else
scsi_scan_channel(shost, channel, id, lun, rescan); scsi_scan_channel(shost, channel, id, lun, rescan);
else }
scsi_scan_channel(shost, channel, id, lun, rescan);
up(&shost->scan_mutex); up(&shost->scan_mutex);
return 0; return 0;

View File

@@ -48,6 +48,30 @@ const char *scsi_device_state_name(enum scsi_device_state state)
return name; return name;
} }
static struct {
enum scsi_host_state value;
char *name;
} shost_states[] = {
{ SHOST_CREATED, "created" },
{ SHOST_RUNNING, "running" },
{ SHOST_CANCEL, "cancel" },
{ SHOST_DEL, "deleted" },
{ SHOST_RECOVERY, "recovery" },
};
const char *scsi_host_state_name(enum scsi_host_state state)
{
int i;
char *name = NULL;
for (i = 0; i < sizeof(shost_states)/sizeof(shost_states[0]); i++) {
if (shost_states[i].value == state) {
name = shost_states[i].name;
break;
}
}
return name;
}
static int check_set(unsigned int *val, char *src) static int check_set(unsigned int *val, char *src)
{ {
char *last; char *last;
@@ -124,6 +148,43 @@ static ssize_t store_scan(struct class_device *class_dev, const char *buf,
}; };
static CLASS_DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); static CLASS_DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
static ssize_t
store_shost_state(struct class_device *class_dev, const char *buf, size_t count)
{
int i;
struct Scsi_Host *shost = class_to_shost(class_dev);
enum scsi_host_state state = 0;
for (i = 0; i < sizeof(shost_states)/sizeof(shost_states[0]); i++) {
const int len = strlen(shost_states[i].name);
if (strncmp(shost_states[i].name, buf, len) == 0 &&
buf[len] == '\n') {
state = shost_states[i].value;
break;
}
}
if (!state)
return -EINVAL;
if (scsi_host_set_state(shost, state))
return -EINVAL;
return count;
}
static ssize_t
show_shost_state(struct class_device *class_dev, char *buf)
{
struct Scsi_Host *shost = class_to_shost(class_dev);
const char *name = scsi_host_state_name(shost->shost_state);
if (!name)
return -EINVAL;
return snprintf(buf, 20, "%s\n", name);
}
static CLASS_DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
shost_rd_attr(unique_id, "%u\n"); shost_rd_attr(unique_id, "%u\n");
shost_rd_attr(host_busy, "%hu\n"); shost_rd_attr(host_busy, "%hu\n");
shost_rd_attr(cmd_per_lun, "%hd\n"); shost_rd_attr(cmd_per_lun, "%hd\n");
@@ -139,6 +200,7 @@ static struct class_device_attribute *scsi_sysfs_shost_attrs[] = {
&class_device_attr_unchecked_isa_dma, &class_device_attr_unchecked_isa_dma,
&class_device_attr_proc_name, &class_device_attr_proc_name,
&class_device_attr_scan, &class_device_attr_scan,
&class_device_attr_state,
NULL NULL
}; };

View File

@@ -252,7 +252,8 @@ struct fc_internal {
#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) #define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
static int fc_target_setup(struct device *dev) static int fc_target_setup(struct transport_container *tc, struct device *dev,
struct class_device *cdev)
{ {
struct scsi_target *starget = to_scsi_target(dev); struct scsi_target *starget = to_scsi_target(dev);
struct fc_rport *rport = starget_to_rport(starget); struct fc_rport *rport = starget_to_rport(starget);
@@ -281,7 +282,8 @@ static DECLARE_TRANSPORT_CLASS(fc_transport_class,
NULL, NULL,
NULL); NULL);
static int fc_host_setup(struct device *dev) static int fc_host_setup(struct transport_container *tc, struct device *dev,
struct class_device *cdev)
{ {
struct Scsi_Host *shost = dev_to_shost(dev); struct Scsi_Host *shost = dev_to_shost(dev);

View File

@@ -28,14 +28,14 @@
#include "scsi_priv.h" #include "scsi_priv.h"
#include <scsi/scsi_device.h> #include <scsi/scsi_device.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_request.h> #include <scsi/scsi_cmnd.h>
#include <scsi/scsi_eh.h> #include <scsi/scsi_eh.h>
#include <scsi/scsi_transport.h> #include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h> #include <scsi/scsi_transport_spi.h>
#define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a) #define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a)
#define SPI_NUM_ATTRS 13 /* increase this if you add attributes */ #define SPI_NUM_ATTRS 14 /* increase this if you add attributes */
#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always #define SPI_OTHER_ATTRS 1 /* Increase this if you add "always
* on" attributes */ * on" attributes */
#define SPI_HOST_ATTRS 1 #define SPI_HOST_ATTRS 1
@@ -106,27 +106,31 @@ static int sprint_frac(char *dest, int value, int denom)
return result; return result;
} }
/* Modification of scsi_wait_req that will clear UNIT ATTENTION conditions static int spi_execute(struct scsi_device *sdev, const void *cmd,
* resulting from (likely) bus and device resets */ enum dma_data_direction dir,
static void spi_wait_req(struct scsi_request *sreq, const void *cmd, void *buffer, unsigned bufflen,
void *buffer, unsigned bufflen) struct scsi_sense_hdr *sshdr)
{ {
int i; int i, result;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
for(i = 0; i < DV_RETRIES; i++) { for(i = 0; i < DV_RETRIES; i++) {
sreq->sr_request->flags |= REQ_FAILFAST; result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
sense, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST);
if (result & DRIVER_SENSE) {
struct scsi_sense_hdr sshdr_tmp;
if (!sshdr)
sshdr = &sshdr_tmp;
scsi_wait_req(sreq, cmd, buffer, bufflen, if (scsi_normalize_sense(sense, sizeof(*sense),
DV_TIMEOUT, /* retries */ 1); sshdr)
if (sreq->sr_result & DRIVER_SENSE) { && sshdr->sense_key == UNIT_ATTENTION)
struct scsi_sense_hdr sshdr;
if (scsi_request_normalize_sense(sreq, &sshdr)
&& sshdr.sense_key == UNIT_ATTENTION)
continue; continue;
} }
break; break;
} }
return result;
} }
static struct { static struct {
@@ -162,7 +166,8 @@ static inline enum spi_signal_type spi_signal_to_value(const char *name)
return SPI_SIGNAL_UNKNOWN; return SPI_SIGNAL_UNKNOWN;
} }
static int spi_host_setup(struct device *dev) static int spi_host_setup(struct transport_container *tc, struct device *dev,
struct class_device *cdev)
{ {
struct Scsi_Host *shost = dev_to_shost(dev); struct Scsi_Host *shost = dev_to_shost(dev);
@@ -196,7 +201,9 @@ static int spi_host_match(struct attribute_container *cont,
return &i->t.host_attrs.ac == cont; return &i->t.host_attrs.ac == cont;
} }
static int spi_device_configure(struct device *dev) static int spi_device_configure(struct transport_container *tc,
struct device *dev,
struct class_device *cdev)
{ {
struct scsi_device *sdev = to_scsi_device(dev); struct scsi_device *sdev = to_scsi_device(dev);
struct scsi_target *starget = sdev->sdev_target; struct scsi_target *starget = sdev->sdev_target;
@@ -214,7 +221,9 @@ static int spi_device_configure(struct device *dev)
return 0; return 0;
} }
static int spi_setup_transport_attrs(struct device *dev) static int spi_setup_transport_attrs(struct transport_container *tc,
struct device *dev,
struct class_device *cdev)
{ {
struct scsi_target *starget = to_scsi_target(dev); struct scsi_target *starget = to_scsi_target(dev);
@@ -231,6 +240,7 @@ static int spi_setup_transport_attrs(struct device *dev)
spi_rd_strm(starget) = 0; spi_rd_strm(starget) = 0;
spi_rti(starget) = 0; spi_rti(starget) = 0;
spi_pcomp_en(starget) = 0; spi_pcomp_en(starget) = 0;
spi_hold_mcs(starget) = 0;
spi_dv_pending(starget) = 0; spi_dv_pending(starget) = 0;
spi_initial_dv(starget) = 0; spi_initial_dv(starget) = 0;
init_MUTEX(&spi_dv_sem(starget)); init_MUTEX(&spi_dv_sem(starget));
@@ -347,6 +357,7 @@ spi_transport_rd_attr(wr_flow, "%d\n");
spi_transport_rd_attr(rd_strm, "%d\n"); spi_transport_rd_attr(rd_strm, "%d\n");
spi_transport_rd_attr(rti, "%d\n"); spi_transport_rd_attr(rti, "%d\n");
spi_transport_rd_attr(pcomp_en, "%d\n"); spi_transport_rd_attr(pcomp_en, "%d\n");
spi_transport_rd_attr(hold_mcs, "%d\n");
/* we only care about the first child device so we return 1 */ /* we only care about the first child device so we return 1 */
static int child_iter(struct device *dev, void *data) static int child_iter(struct device *dev, void *data)
@@ -539,13 +550,13 @@ enum spi_compare_returns {
/* This is for read/write Domain Validation: If the device supports /* This is for read/write Domain Validation: If the device supports
* an echo buffer, we do read/write tests to it */ * an echo buffer, we do read/write tests to it */
static enum spi_compare_returns static enum spi_compare_returns
spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer, spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
u8 *ptr, const int retries) u8 *ptr, const int retries)
{ {
struct scsi_device *sdev = sreq->sr_device;
int len = ptr - buffer; int len = ptr - buffer;
int j, k, r; int j, k, r, result;
unsigned int pattern = 0x0000ffff; unsigned int pattern = 0x0000ffff;
struct scsi_sense_hdr sshdr;
const char spi_write_buffer[] = { const char spi_write_buffer[] = {
WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
@@ -590,14 +601,12 @@ spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
} }
for (r = 0; r < retries; r++) { for (r = 0; r < retries; r++) {
sreq->sr_cmd_len = 0; /* wait_req to fill in */ result = spi_execute(sdev, spi_write_buffer, DMA_TO_DEVICE,
sreq->sr_data_direction = DMA_TO_DEVICE; buffer, len, &sshdr);
spi_wait_req(sreq, spi_write_buffer, buffer, len); if(result || !scsi_device_online(sdev)) {
if(sreq->sr_result || !scsi_device_online(sdev)) {
struct scsi_sense_hdr sshdr;
scsi_device_set_state(sdev, SDEV_QUIESCE); scsi_device_set_state(sdev, SDEV_QUIESCE);
if (scsi_request_normalize_sense(sreq, &sshdr) if (scsi_sense_valid(&sshdr)
&& sshdr.sense_key == ILLEGAL_REQUEST && sshdr.sense_key == ILLEGAL_REQUEST
/* INVALID FIELD IN CDB */ /* INVALID FIELD IN CDB */
&& sshdr.asc == 0x24 && sshdr.ascq == 0x00) && sshdr.asc == 0x24 && sshdr.ascq == 0x00)
@@ -609,14 +618,13 @@ spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
return SPI_COMPARE_SKIP_TEST; return SPI_COMPARE_SKIP_TEST;
SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Write Buffer failure %x\n", sreq->sr_result); SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Write Buffer failure %x\n", result);
return SPI_COMPARE_FAILURE; return SPI_COMPARE_FAILURE;
} }
memset(ptr, 0, len); memset(ptr, 0, len);
sreq->sr_cmd_len = 0; /* wait_req to fill in */ spi_execute(sdev, spi_read_buffer, DMA_FROM_DEVICE,
sreq->sr_data_direction = DMA_FROM_DEVICE; ptr, len, NULL);
spi_wait_req(sreq, spi_read_buffer, ptr, len);
scsi_device_set_state(sdev, SDEV_QUIESCE); scsi_device_set_state(sdev, SDEV_QUIESCE);
if (memcmp(buffer, ptr, len) != 0) if (memcmp(buffer, ptr, len) != 0)
@@ -628,25 +636,22 @@ spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
/* This is for the simplest form of Domain Validation: a read test /* This is for the simplest form of Domain Validation: a read test
* on the inquiry data from the device */ * on the inquiry data from the device */
static enum spi_compare_returns static enum spi_compare_returns
spi_dv_device_compare_inquiry(struct scsi_request *sreq, u8 *buffer, spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer,
u8 *ptr, const int retries) u8 *ptr, const int retries)
{ {
int r; int r, result;
const int len = sreq->sr_device->inquiry_len; const int len = sdev->inquiry_len;
struct scsi_device *sdev = sreq->sr_device;
const char spi_inquiry[] = { const char spi_inquiry[] = {
INQUIRY, 0, 0, 0, len, 0 INQUIRY, 0, 0, 0, len, 0
}; };
for (r = 0; r < retries; r++) { for (r = 0; r < retries; r++) {
sreq->sr_cmd_len = 0; /* wait_req to fill in */
sreq->sr_data_direction = DMA_FROM_DEVICE;
memset(ptr, 0, len); memset(ptr, 0, len);
spi_wait_req(sreq, spi_inquiry, ptr, len); result = spi_execute(sdev, spi_inquiry, DMA_FROM_DEVICE,
ptr, len, NULL);
if(sreq->sr_result || !scsi_device_online(sdev)) { if(result || !scsi_device_online(sdev)) {
scsi_device_set_state(sdev, SDEV_QUIESCE); scsi_device_set_state(sdev, SDEV_QUIESCE);
return SPI_COMPARE_FAILURE; return SPI_COMPARE_FAILURE;
} }
@@ -667,12 +672,11 @@ spi_dv_device_compare_inquiry(struct scsi_request *sreq, u8 *buffer,
} }
static enum spi_compare_returns static enum spi_compare_returns
spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr, spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr,
enum spi_compare_returns enum spi_compare_returns
(*compare_fn)(struct scsi_request *, u8 *, u8 *, int)) (*compare_fn)(struct scsi_device *, u8 *, u8 *, int))
{ {
struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); struct spi_internal *i = to_spi_internal(sdev->host->transportt);
struct scsi_device *sdev = sreq->sr_device;
struct scsi_target *starget = sdev->sdev_target; struct scsi_target *starget = sdev->sdev_target;
int period = 0, prevperiod = 0; int period = 0, prevperiod = 0;
enum spi_compare_returns retval; enum spi_compare_returns retval;
@@ -680,7 +684,7 @@ spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr,
for (;;) { for (;;) {
int newperiod; int newperiod;
retval = compare_fn(sreq, buffer, ptr, DV_LOOPS); retval = compare_fn(sdev, buffer, ptr, DV_LOOPS);
if (retval == SPI_COMPARE_SUCCESS if (retval == SPI_COMPARE_SUCCESS
|| retval == SPI_COMPARE_SKIP_TEST) || retval == SPI_COMPARE_SKIP_TEST)
@@ -726,9 +730,9 @@ spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr,
} }
static int static int
spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer) spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer)
{ {
int l; int l, result;
/* first off do a test unit ready. This can error out /* first off do a test unit ready. This can error out
* because of reservations or some other reason. If it * because of reservations or some other reason. If it
@@ -744,18 +748,16 @@ spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
}; };
sreq->sr_cmd_len = 0;
sreq->sr_data_direction = DMA_NONE;
/* We send a set of three TURs to clear any outstanding /* We send a set of three TURs to clear any outstanding
* unit attention conditions if they exist (Otherwise the * unit attention conditions if they exist (Otherwise the
* buffer tests won't be happy). If the TUR still fails * buffer tests won't be happy). If the TUR still fails
* (reservation conflict, device not ready, etc) just * (reservation conflict, device not ready, etc) just
* skip the write tests */ * skip the write tests */
for (l = 0; ; l++) { for (l = 0; ; l++) {
spi_wait_req(sreq, spi_test_unit_ready, NULL, 0); result = spi_execute(sdev, spi_test_unit_ready, DMA_NONE,
NULL, 0, NULL);
if(sreq->sr_result) { if(result) {
if(l >= 3) if(l >= 3)
return 0; return 0;
} else { } else {
@@ -764,12 +766,10 @@ spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
} }
} }
sreq->sr_cmd_len = 0; result = spi_execute(sdev, spi_read_buffer_descriptor,
sreq->sr_data_direction = DMA_FROM_DEVICE; DMA_FROM_DEVICE, buffer, 4, NULL);
spi_wait_req(sreq, spi_read_buffer_descriptor, buffer, 4); if (result)
if (sreq->sr_result)
/* Device has no echo buffer */ /* Device has no echo buffer */
return 0; return 0;
@@ -777,17 +777,16 @@ spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
} }
static void static void
spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer) spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
{ {
struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); struct spi_internal *i = to_spi_internal(sdev->host->transportt);
struct scsi_device *sdev = sreq->sr_device;
struct scsi_target *starget = sdev->sdev_target; struct scsi_target *starget = sdev->sdev_target;
int len = sdev->inquiry_len; int len = sdev->inquiry_len;
/* first set us up for narrow async */ /* first set us up for narrow async */
DV_SET(offset, 0); DV_SET(offset, 0);
DV_SET(width, 0); DV_SET(width, 0);
if (spi_dv_device_compare_inquiry(sreq, buffer, buffer, DV_LOOPS) if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
!= SPI_COMPARE_SUCCESS) { != SPI_COMPARE_SUCCESS) {
SPI_PRINTK(starget, KERN_ERR, "Domain Validation Initial Inquiry Failed\n"); SPI_PRINTK(starget, KERN_ERR, "Domain Validation Initial Inquiry Failed\n");
/* FIXME: should probably offline the device here? */ /* FIXME: should probably offline the device here? */
@@ -799,7 +798,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
scsi_device_wide(sdev)) { scsi_device_wide(sdev)) {
i->f->set_width(starget, 1); i->f->set_width(starget, 1);
if (spi_dv_device_compare_inquiry(sreq, buffer, if (spi_dv_device_compare_inquiry(sdev, buffer,
buffer + len, buffer + len,
DV_LOOPS) DV_LOOPS)
!= SPI_COMPARE_SUCCESS) { != SPI_COMPARE_SUCCESS) {
@@ -820,7 +819,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
len = 0; len = 0;
if (scsi_device_dt(sdev)) if (scsi_device_dt(sdev))
len = spi_dv_device_get_echo_buffer(sreq, buffer); len = spi_dv_device_get_echo_buffer(sdev, buffer);
retry: retry:
@@ -846,7 +845,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
if (len == 0) { if (len == 0) {
SPI_PRINTK(starget, KERN_INFO, "Domain Validation skipping write tests\n"); SPI_PRINTK(starget, KERN_INFO, "Domain Validation skipping write tests\n");
spi_dv_retrain(sreq, buffer, buffer + len, spi_dv_retrain(sdev, buffer, buffer + len,
spi_dv_device_compare_inquiry); spi_dv_device_compare_inquiry);
return; return;
} }
@@ -856,7 +855,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
len = SPI_MAX_ECHO_BUFFER_SIZE; len = SPI_MAX_ECHO_BUFFER_SIZE;
} }
if (spi_dv_retrain(sreq, buffer, buffer + len, if (spi_dv_retrain(sdev, buffer, buffer + len,
spi_dv_device_echo_buffer) spi_dv_device_echo_buffer)
== SPI_COMPARE_SKIP_TEST) { == SPI_COMPARE_SKIP_TEST) {
/* OK, the stupid drive can't do a write echo buffer /* OK, the stupid drive can't do a write echo buffer
@@ -879,16 +878,12 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
void void
spi_dv_device(struct scsi_device *sdev) spi_dv_device(struct scsi_device *sdev)
{ {
struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
struct scsi_target *starget = sdev->sdev_target; struct scsi_target *starget = sdev->sdev_target;
u8 *buffer; u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
if (unlikely(!sreq))
return;
if (unlikely(scsi_device_get(sdev))) if (unlikely(scsi_device_get(sdev)))
goto out_free_req; return;
buffer = kmalloc(len, GFP_KERNEL); buffer = kmalloc(len, GFP_KERNEL);
@@ -909,7 +904,7 @@ spi_dv_device(struct scsi_device *sdev)
SPI_PRINTK(starget, KERN_INFO, "Beginning Domain Validation\n"); SPI_PRINTK(starget, KERN_INFO, "Beginning Domain Validation\n");
spi_dv_device_internal(sreq, buffer); spi_dv_device_internal(sdev, buffer);
SPI_PRINTK(starget, KERN_INFO, "Ending Domain Validation\n"); SPI_PRINTK(starget, KERN_INFO, "Ending Domain Validation\n");
@@ -924,8 +919,6 @@ spi_dv_device(struct scsi_device *sdev)
kfree(buffer); kfree(buffer);
out_put: out_put:
scsi_device_put(sdev); scsi_device_put(sdev);
out_free_req:
scsi_release_request(sreq);
} }
EXPORT_SYMBOL(spi_dv_device); EXPORT_SYMBOL(spi_dv_device);
@@ -1028,10 +1021,17 @@ void spi_display_xfer_agreement(struct scsi_target *starget)
sprint_frac(tmp, picosec, 1000); sprint_frac(tmp, picosec, 1000);
dev_info(&starget->dev, dev_info(&starget->dev,
"%s %sSCSI %d.%d MB/s %s%s%s (%s ns, offset %d)\n", "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n",
scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10,
tp->dt ? "DT" : "ST", tp->iu ? " IU" : "", tp->dt ? "DT" : "ST",
tp->qas ? " QAS" : "", tmp, tp->offset); tp->iu ? " IU" : "",
tp->qas ? " QAS" : "",
tp->rd_strm ? " RDSTRM" : "",
tp->rti ? " RTI" : "",
tp->wr_flow ? " WRFLOW" : "",
tp->pcomp_en ? " PCOMP" : "",
tp->hold_mcs ? " HMCS" : "",
tmp, tp->offset);
} else { } else {
dev_info(&starget->dev, "%sasynchronous.\n", dev_info(&starget->dev, "%sasynchronous.\n",
tp->width ? "wide " : ""); tp->width ? "wide " : "");
@@ -1073,6 +1073,7 @@ static int spi_device_match(struct attribute_container *cont,
{ {
struct scsi_device *sdev; struct scsi_device *sdev;
struct Scsi_Host *shost; struct Scsi_Host *shost;
struct spi_internal *i;
if (!scsi_is_sdev_device(dev)) if (!scsi_is_sdev_device(dev))
return 0; return 0;
@@ -1085,6 +1086,9 @@ static int spi_device_match(struct attribute_container *cont,
/* Note: this class has no device attributes, so it has /* Note: this class has no device attributes, so it has
* no per-HBA allocation and thus we don't need to distinguish * no per-HBA allocation and thus we don't need to distinguish
* the attribute containers for the device */ * the attribute containers for the device */
i = to_spi_internal(shost->transportt);
if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target))
return 0;
return 1; return 1;
} }
@@ -1092,6 +1096,7 @@ static int spi_target_match(struct attribute_container *cont,
struct device *dev) struct device *dev)
{ {
struct Scsi_Host *shost; struct Scsi_Host *shost;
struct scsi_target *starget;
struct spi_internal *i; struct spi_internal *i;
if (!scsi_is_target_device(dev)) if (!scsi_is_target_device(dev))
@@ -1103,7 +1108,11 @@ static int spi_target_match(struct attribute_container *cont,
return 0; return 0;
i = to_spi_internal(shost->transportt); i = to_spi_internal(shost->transportt);
starget = to_scsi_target(dev);
if (i->f->deny_binding && i->f->deny_binding(starget))
return 0;
return &i->t.target_attrs.ac == cont; return &i->t.target_attrs.ac == cont;
} }
@@ -1154,6 +1163,7 @@ spi_attach_transport(struct spi_function_template *ft)
SETUP_ATTRIBUTE(rd_strm); SETUP_ATTRIBUTE(rd_strm);
SETUP_ATTRIBUTE(rti); SETUP_ATTRIBUTE(rti);
SETUP_ATTRIBUTE(pcomp_en); SETUP_ATTRIBUTE(pcomp_en);
SETUP_ATTRIBUTE(hold_mcs);
/* if you add an attribute but forget to increase SPI_NUM_ATTRS /* if you add an attribute but forget to increase SPI_NUM_ATTRS
* this bug will trigger */ * this bug will trigger */

View File

@@ -59,7 +59,6 @@
#include <scsi/scsi_eh.h> #include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h> #include <scsi/scsi_ioctl.h>
#include <scsi/scsi_request.h>
#include <scsi/scsicam.h> #include <scsi/scsicam.h>
#include "scsi_logging.h" #include "scsi_logging.h"
@@ -125,7 +124,7 @@ static int sd_issue_flush(struct device *, sector_t *);
static void sd_end_flush(request_queue_t *, struct request *); static void sd_end_flush(request_queue_t *, struct request *);
static int sd_prepare_flush(request_queue_t *, struct request *); static int sd_prepare_flush(request_queue_t *, struct request *);
static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname, static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
struct scsi_request *SRpnt, unsigned char *buffer); unsigned char *buffer);
static struct scsi_driver sd_template = { static struct scsi_driver sd_template = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
@@ -682,19 +681,13 @@ not_present:
static int sd_sync_cache(struct scsi_device *sdp) static int sd_sync_cache(struct scsi_device *sdp)
{ {
struct scsi_request *sreq;
int retries, res; int retries, res;
struct scsi_sense_hdr sshdr;
if (!scsi_device_online(sdp)) if (!scsi_device_online(sdp))
return -ENODEV; return -ENODEV;
sreq = scsi_allocate_request(sdp, GFP_KERNEL);
if (!sreq) {
printk("FAILED\n No memory for request\n");
return -ENOMEM;
}
sreq->sr_data_direction = DMA_NONE;
for (retries = 3; retries > 0; --retries) { for (retries = 3; retries > 0; --retries) {
unsigned char cmd[10] = { 0 }; unsigned char cmd[10] = { 0 };
@@ -703,22 +696,20 @@ static int sd_sync_cache(struct scsi_device *sdp)
* Leave the rest of the command zero to indicate * Leave the rest of the command zero to indicate
* flush everything. * flush everything.
*/ */
scsi_wait_req(sreq, cmd, NULL, 0, SD_TIMEOUT, SD_MAX_RETRIES); res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
if (sreq->sr_result == 0) SD_TIMEOUT, SD_MAX_RETRIES);
if (res == 0)
break; break;
} }
res = sreq->sr_result; if (res) { printk(KERN_WARNING "FAILED\n status = %x, message = %02x, "
if (res) {
printk(KERN_WARNING "FAILED\n status = %x, message = %02x, "
"host = %d, driver = %02x\n ", "host = %d, driver = %02x\n ",
status_byte(res), msg_byte(res), status_byte(res), msg_byte(res),
host_byte(res), driver_byte(res)); host_byte(res), driver_byte(res));
if (driver_byte(res) & DRIVER_SENSE) if (driver_byte(res) & DRIVER_SENSE)
scsi_print_req_sense("sd", sreq); scsi_print_sense_hdr("sd", &sshdr);
} }
scsi_release_request(sreq);
return res; return res;
} }
@@ -957,22 +948,19 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
scsi_io_completion(SCpnt, good_bytes, block_sectors << 9); scsi_io_completion(SCpnt, good_bytes, block_sectors << 9);
} }
static int media_not_present(struct scsi_disk *sdkp, struct scsi_request *srp) static int media_not_present(struct scsi_disk *sdkp,
struct scsi_sense_hdr *sshdr)
{ {
struct scsi_sense_hdr sshdr;
if (!srp->sr_result) if (!scsi_sense_valid(sshdr))
return 0;
if (!(driver_byte(srp->sr_result) & DRIVER_SENSE))
return 0; return 0;
/* not invoked for commands that could return deferred errors */ /* not invoked for commands that could return deferred errors */
if (scsi_request_normalize_sense(srp, &sshdr)) { if (sshdr->sense_key != NOT_READY &&
if (sshdr.sense_key != NOT_READY && sshdr->sense_key != UNIT_ATTENTION)
sshdr.sense_key != UNIT_ATTENTION) return 0;
return 0; if (sshdr->asc != 0x3A) /* medium not present */
if (sshdr.asc != 0x3A) /* medium not present */ return 0;
return 0;
}
set_media_not_present(sdkp); set_media_not_present(sdkp);
return 1; return 1;
} }
@@ -981,10 +969,10 @@ static int media_not_present(struct scsi_disk *sdkp, struct scsi_request *srp)
* spinup disk - called only in sd_revalidate_disk() * spinup disk - called only in sd_revalidate_disk()
*/ */
static void static void
sd_spinup_disk(struct scsi_disk *sdkp, char *diskname, sd_spinup_disk(struct scsi_disk *sdkp, char *diskname)
struct scsi_request *SRpnt, unsigned char *buffer) { {
unsigned char cmd[10]; unsigned char cmd[10];
unsigned long spintime_value = 0; unsigned long spintime_expire = 0;
int retries, spintime; int retries, spintime;
unsigned int the_result; unsigned int the_result;
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr sshdr;
@@ -1001,18 +989,13 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
cmd[0] = TEST_UNIT_READY; cmd[0] = TEST_UNIT_READY;
memset((void *) &cmd[1], 0, 9); memset((void *) &cmd[1], 0, 9);
SRpnt->sr_cmd_len = 0; the_result = scsi_execute_req(sdkp->device, cmd,
memset(SRpnt->sr_sense_buffer, 0, DMA_NONE, NULL, 0,
SCSI_SENSE_BUFFERSIZE); &sshdr, SD_TIMEOUT,
SRpnt->sr_data_direction = DMA_NONE; SD_MAX_RETRIES);
scsi_wait_req (SRpnt, (void *) cmd, (void *) buffer,
0/*512*/, SD_TIMEOUT, SD_MAX_RETRIES);
the_result = SRpnt->sr_result;
if (the_result) if (the_result)
sense_valid = scsi_request_normalize_sense( sense_valid = scsi_sense_valid(&sshdr);
SRpnt, &sshdr);
retries++; retries++;
} while (retries < 3 && } while (retries < 3 &&
(!scsi_status_is_good(the_result) || (!scsi_status_is_good(the_result) ||
@@ -1024,7 +1007,7 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
* any media in it, don't bother with any of the rest of * any media in it, don't bother with any of the rest of
* this crap. * this crap.
*/ */
if (media_not_present(sdkp, SRpnt)) if (media_not_present(sdkp, &sshdr))
return; return;
if ((driver_byte(the_result) & DRIVER_SENSE) == 0) { if ((driver_byte(the_result) & DRIVER_SENSE) == 0) {
@@ -1063,33 +1046,42 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
cmd[1] = 1; /* Return immediately */ cmd[1] = 1; /* Return immediately */
memset((void *) &cmd[2], 0, 8); memset((void *) &cmd[2], 0, 8);
cmd[4] = 1; /* Start spin cycle */ cmd[4] = 1; /* Start spin cycle */
SRpnt->sr_cmd_len = 0; scsi_execute_req(sdkp->device, cmd, DMA_NONE,
memset(SRpnt->sr_sense_buffer, 0, NULL, 0, &sshdr,
SCSI_SENSE_BUFFERSIZE); SD_TIMEOUT, SD_MAX_RETRIES);
spintime_expire = jiffies + 100 * HZ;
SRpnt->sr_data_direction = DMA_NONE; spintime = 1;
scsi_wait_req(SRpnt, (void *)cmd,
(void *) buffer, 0/*512*/,
SD_TIMEOUT, SD_MAX_RETRIES);
spintime_value = jiffies;
} }
spintime = 1;
/* Wait 1 second for next try */ /* Wait 1 second for next try */
msleep(1000); msleep(1000);
printk("."); printk(".");
/*
* Wait for USB flash devices with slow firmware.
* Yes, this sense key/ASC combination shouldn't
* occur here. It's characteristic of these devices.
*/
} else if (sense_valid &&
sshdr.sense_key == UNIT_ATTENTION &&
sshdr.asc == 0x28) {
if (!spintime) {
spintime_expire = jiffies + 5 * HZ;
spintime = 1;
}
/* Wait 1 second for next try */
msleep(1000);
} else { } else {
/* we don't understand the sense code, so it's /* we don't understand the sense code, so it's
* probably pointless to loop */ * probably pointless to loop */
if(!spintime) { if(!spintime) {
printk(KERN_NOTICE "%s: Unit Not Ready, " printk(KERN_NOTICE "%s: Unit Not Ready, "
"sense:\n", diskname); "sense:\n", diskname);
scsi_print_req_sense("", SRpnt); scsi_print_sense_hdr("", &sshdr);
} }
break; break;
} }
} while (spintime && } while (spintime && time_before_eq(jiffies, spintime_expire));
time_after(spintime_value + 100 * HZ, jiffies));
if (spintime) { if (spintime) {
if (scsi_status_is_good(the_result)) if (scsi_status_is_good(the_result))
@@ -1104,14 +1096,15 @@ sd_spinup_disk(struct scsi_disk *sdkp, char *diskname,
*/ */
static void static void
sd_read_capacity(struct scsi_disk *sdkp, char *diskname, sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
struct scsi_request *SRpnt, unsigned char *buffer) { unsigned char *buffer)
{
unsigned char cmd[16]; unsigned char cmd[16];
struct scsi_device *sdp = sdkp->device;
int the_result, retries; int the_result, retries;
int sector_size = 0; int sector_size = 0;
int longrc = 0; int longrc = 0;
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr sshdr;
int sense_valid = 0; int sense_valid = 0;
struct scsi_device *sdp = sdkp->device;
repeat: repeat:
retries = 3; retries = 3;
@@ -1128,20 +1121,15 @@ repeat:
memset((void *) buffer, 0, 8); memset((void *) buffer, 0, 8);
} }
SRpnt->sr_cmd_len = 0; the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
memset(SRpnt->sr_sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); buffer, longrc ? 12 : 8, &sshdr,
SRpnt->sr_data_direction = DMA_FROM_DEVICE; SD_TIMEOUT, SD_MAX_RETRIES);
scsi_wait_req(SRpnt, (void *) cmd, (void *) buffer, if (media_not_present(sdkp, &sshdr))
longrc ? 12 : 8, SD_TIMEOUT, SD_MAX_RETRIES);
if (media_not_present(sdkp, SRpnt))
return; return;
the_result = SRpnt->sr_result;
if (the_result) if (the_result)
sense_valid = scsi_request_normalize_sense(SRpnt, sense_valid = scsi_sense_valid(&sshdr);
&sshdr);
retries--; retries--;
} while (the_result && retries); } while (the_result && retries);
@@ -1156,7 +1144,7 @@ repeat:
driver_byte(the_result)); driver_byte(the_result));
if (driver_byte(the_result) & DRIVER_SENSE) if (driver_byte(the_result) & DRIVER_SENSE)
scsi_print_req_sense("sd", SRpnt); scsi_print_sense_hdr("sd", &sshdr);
else else
printk("%s : sense not available. \n", diskname); printk("%s : sense not available. \n", diskname);
@@ -1296,11 +1284,13 @@ got_data:
/* called with buffer of length 512 */ /* called with buffer of length 512 */
static inline int static inline int
sd_do_mode_sense(struct scsi_request *SRpnt, int dbd, int modepage, sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
unsigned char *buffer, int len, struct scsi_mode_data *data) unsigned char *buffer, int len, struct scsi_mode_data *data,
struct scsi_sense_hdr *sshdr)
{ {
return __scsi_mode_sense(SRpnt, dbd, modepage, buffer, len, return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
SD_TIMEOUT, SD_MAX_RETRIES, data); SD_TIMEOUT, SD_MAX_RETRIES, data,
sshdr);
} }
/* /*
@@ -1309,25 +1299,27 @@ sd_do_mode_sense(struct scsi_request *SRpnt, int dbd, int modepage,
*/ */
static void static void
sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname, sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
struct scsi_request *SRpnt, unsigned char *buffer) { unsigned char *buffer)
{
int res; int res;
struct scsi_device *sdp = sdkp->device;
struct scsi_mode_data data; struct scsi_mode_data data;
set_disk_ro(sdkp->disk, 0); set_disk_ro(sdkp->disk, 0);
if (sdkp->device->skip_ms_page_3f) { if (sdp->skip_ms_page_3f) {
printk(KERN_NOTICE "%s: assuming Write Enabled\n", diskname); printk(KERN_NOTICE "%s: assuming Write Enabled\n", diskname);
return; return;
} }
if (sdkp->device->use_192_bytes_for_3f) { if (sdp->use_192_bytes_for_3f) {
res = sd_do_mode_sense(SRpnt, 0, 0x3F, buffer, 192, &data); res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
} else { } else {
/* /*
* First attempt: ask for all pages (0x3F), but only 4 bytes. * First attempt: ask for all pages (0x3F), but only 4 bytes.
* We have to start carefully: some devices hang if we ask * We have to start carefully: some devices hang if we ask
* for more than is available. * for more than is available.
*/ */
res = sd_do_mode_sense(SRpnt, 0, 0x3F, buffer, 4, &data); res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
/* /*
* Second attempt: ask for page 0 When only page 0 is * Second attempt: ask for page 0 When only page 0 is
@@ -1336,14 +1328,14 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
* CDB. * CDB.
*/ */
if (!scsi_status_is_good(res)) if (!scsi_status_is_good(res))
res = sd_do_mode_sense(SRpnt, 0, 0, buffer, 4, &data); res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
/* /*
* Third attempt: ask 255 bytes, as we did earlier. * Third attempt: ask 255 bytes, as we did earlier.
*/ */
if (!scsi_status_is_good(res)) if (!scsi_status_is_good(res))
res = sd_do_mode_sense(SRpnt, 0, 0x3F, buffer, 255, res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
&data); &data, NULL);
} }
if (!scsi_status_is_good(res)) { if (!scsi_status_is_good(res)) {
@@ -1365,19 +1357,20 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, char *diskname,
*/ */
static void static void
sd_read_cache_type(struct scsi_disk *sdkp, char *diskname, sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
struct scsi_request *SRpnt, unsigned char *buffer) unsigned char *buffer)
{ {
int len = 0, res; int len = 0, res;
struct scsi_device *sdp = sdkp->device;
int dbd; int dbd;
int modepage; int modepage;
struct scsi_mode_data data; struct scsi_mode_data data;
struct scsi_sense_hdr sshdr; struct scsi_sense_hdr sshdr;
if (sdkp->device->skip_ms_page_8) if (sdp->skip_ms_page_8)
goto defaults; goto defaults;
if (sdkp->device->type == TYPE_RBC) { if (sdp->type == TYPE_RBC) {
modepage = 6; modepage = 6;
dbd = 8; dbd = 8;
} else { } else {
@@ -1386,7 +1379,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
} }
/* cautiously ask */ /* cautiously ask */
res = sd_do_mode_sense(SRpnt, dbd, modepage, buffer, 4, &data); res = sd_do_mode_sense(sdp, dbd, modepage, buffer, 4, &data, &sshdr);
if (!scsi_status_is_good(res)) if (!scsi_status_is_good(res))
goto bad_sense; goto bad_sense;
@@ -1407,7 +1400,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
len += data.header_length + data.block_descriptor_length; len += data.header_length + data.block_descriptor_length;
/* Get the data */ /* Get the data */
res = sd_do_mode_sense(SRpnt, dbd, modepage, buffer, len, &data); res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr);
if (scsi_status_is_good(res)) { if (scsi_status_is_good(res)) {
const char *types[] = { const char *types[] = {
@@ -1439,7 +1432,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, char *diskname,
} }
bad_sense: bad_sense:
if (scsi_request_normalize_sense(SRpnt, &sshdr) && if (scsi_sense_valid(&sshdr) &&
sshdr.sense_key == ILLEGAL_REQUEST && sshdr.sense_key == ILLEGAL_REQUEST &&
sshdr.asc == 0x24 && sshdr.ascq == 0x0) sshdr.asc == 0x24 && sshdr.ascq == 0x0)
printk(KERN_NOTICE "%s: cache data unavailable\n", printk(KERN_NOTICE "%s: cache data unavailable\n",
@@ -1464,7 +1457,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
{ {
struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device; struct scsi_device *sdp = sdkp->device;
struct scsi_request *sreq;
unsigned char *buffer; unsigned char *buffer;
SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name));
@@ -1476,18 +1468,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (!scsi_device_online(sdp)) if (!scsi_device_online(sdp))
goto out; goto out;
sreq = scsi_allocate_request(sdp, GFP_KERNEL);
if (!sreq) {
printk(KERN_WARNING "(sd_revalidate_disk:) Request allocation "
"failure.\n");
goto out;
}
buffer = kmalloc(512, GFP_KERNEL | __GFP_DMA); buffer = kmalloc(512, GFP_KERNEL | __GFP_DMA);
if (!buffer) { if (!buffer) {
printk(KERN_WARNING "(sd_revalidate_disk:) Memory allocation " printk(KERN_WARNING "(sd_revalidate_disk:) Memory allocation "
"failure.\n"); "failure.\n");
goto out_release_request; goto out;
} }
/* defaults, until the device tells us otherwise */ /* defaults, until the device tells us otherwise */
@@ -1498,25 +1483,23 @@ static int sd_revalidate_disk(struct gendisk *disk)
sdkp->WCE = 0; sdkp->WCE = 0;
sdkp->RCD = 0; sdkp->RCD = 0;
sd_spinup_disk(sdkp, disk->disk_name, sreq, buffer); sd_spinup_disk(sdkp, disk->disk_name);
/* /*
* Without media there is no reason to ask; moreover, some devices * Without media there is no reason to ask; moreover, some devices
* react badly if we do. * react badly if we do.
*/ */
if (sdkp->media_present) { if (sdkp->media_present) {
sd_read_capacity(sdkp, disk->disk_name, sreq, buffer); sd_read_capacity(sdkp, disk->disk_name, buffer);
if (sdp->removable) if (sdp->removable)
sd_read_write_protect_flag(sdkp, disk->disk_name, sd_read_write_protect_flag(sdkp, disk->disk_name,
sreq, buffer); buffer);
sd_read_cache_type(sdkp, disk->disk_name, sreq, buffer); sd_read_cache_type(sdkp, disk->disk_name, buffer);
} }
set_capacity(disk, sdkp->capacity); set_capacity(disk, sdkp->capacity);
kfree(buffer); kfree(buffer);
out_release_request:
scsi_release_request(sreq);
out: out:
return 0; return 0;
} }

Some files were not shown because too many files have changed in this diff Show More