Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (204 commits) [SCSI] qla4xxx: export address/port of connection (fix udev disk names) [SCSI] ipr: Fix BUG on adapter dump timeout [SCSI] megaraid_sas: Fix instance access in megasas_reset_timer [SCSI] hpsa: change confusing message to be more clear [SCSI] iscsi class: fix vlan configuration [SCSI] qla4xxx: fix data alignment and use nl helpers [SCSI] iscsi class: fix link local mispelling [SCSI] iscsi class: Replace iscsi_get_next_target_id with IDA [SCSI] aacraid: use lower snprintf() limit [SCSI] lpfc 8.3.27: Change driver version to 8.3.27 [SCSI] lpfc 8.3.27: T10 additions for SLI4 [SCSI] lpfc 8.3.27: Fix queue allocation failure recovery [SCSI] lpfc 8.3.27: Change algorithm for getting physical port name [SCSI] lpfc 8.3.27: Changed worst case mailbox timeout [SCSI] lpfc 8.3.27: Miscellanous logic and interface fixes [SCSI] megaraid_sas: Changelog and version update [SCSI] megaraid_sas: Add driver workaround for PERC5/1068 kdump kernel panic [SCSI] megaraid_sas: Add multiple MSI-X vector/multiple reply queue support [SCSI] megaraid_sas: Add support for MegaRAID 9360/9380 12GB/s controllers [SCSI] megaraid_sas: Clear FUSION_IN_RESET before enabling interrupts ...
This commit is contained in:
@@ -1263,6 +1263,10 @@ void isci_host_deinit(struct isci_host *ihost)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* disable output data selects */
|
||||
for (i = 0; i < isci_gpio_count(ihost); i++)
|
||||
writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
|
||||
|
||||
isci_host_change_state(ihost, isci_stopping);
|
||||
for (i = 0; i < SCI_MAX_PORTS; i++) {
|
||||
struct isci_port *iport = &ihost->ports[i];
|
||||
@@ -1281,6 +1285,12 @@ void isci_host_deinit(struct isci_host *ihost)
|
||||
spin_unlock_irq(&ihost->scic_lock);
|
||||
|
||||
wait_for_stop(ihost);
|
||||
|
||||
/* disable sgpio: where the above wait should give time for the
|
||||
* enclosure to sample the gpios going inactive
|
||||
*/
|
||||
writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
|
||||
|
||||
sci_controller_reset(ihost);
|
||||
|
||||
/* Cancel any/all outstanding port timers */
|
||||
@@ -2365,6 +2375,12 @@ int isci_host_init(struct isci_host *ihost)
|
||||
for (i = 0; i < SCI_MAX_PHYS; i++)
|
||||
isci_phy_init(&ihost->phys[i], ihost, i);
|
||||
|
||||
/* enable sgpio */
|
||||
writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
|
||||
for (i = 0; i < isci_gpio_count(ihost); i++)
|
||||
writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
|
||||
writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
|
||||
|
||||
for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
|
||||
struct isci_remote_device *idev = &ihost->devices[i];
|
||||
|
||||
@@ -2760,3 +2776,56 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
|
||||
{
|
||||
int d;
|
||||
|
||||
/* no support for TX_GP_CFG */
|
||||
if (reg_index == 0)
|
||||
return -EINVAL;
|
||||
|
||||
for (d = 0; d < isci_gpio_count(ihost); d++) {
|
||||
u32 val = 0x444; /* all ODx.n clear */
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
int bit = (i << 2) + 2;
|
||||
|
||||
bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
|
||||
write_data, reg_index,
|
||||
reg_count);
|
||||
if (bit < 0)
|
||||
break;
|
||||
|
||||
/* if od is set, clear the 'invert' bit */
|
||||
val &= ~(bit << ((i << 2) + 2));
|
||||
}
|
||||
|
||||
if (i < 3)
|
||||
break;
|
||||
writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
|
||||
}
|
||||
|
||||
/* unless reg_index is > 1, we should always be able to write at
|
||||
* least one register
|
||||
*/
|
||||
return d > 0;
|
||||
}
|
||||
|
||||
int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
|
||||
u8 reg_count, u8 *write_data)
|
||||
{
|
||||
struct isci_host *ihost = sas_ha->lldd_ha;
|
||||
int written;
|
||||
|
||||
switch (reg_type) {
|
||||
case SAS_GPIO_REG_TX_GP:
|
||||
written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
|
||||
break;
|
||||
default:
|
||||
written = -EINVAL;
|
||||
}
|
||||
|
||||
return written;
|
||||
}
|
||||
|
@@ -440,6 +440,18 @@ static inline bool is_c0(struct pci_dev *pdev)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* set hw control for 'activity', even though active enclosures seem to drive
|
||||
* the activity led on their own. Skip setting FSENG control on 'status' due
|
||||
* to unexpected operation and 'error' due to not being a supported automatic
|
||||
* FSENG output
|
||||
*/
|
||||
#define SGPIO_HW_CONTROL 0x00000443
|
||||
|
||||
static inline int isci_gpio_count(struct isci_host *ihost)
|
||||
{
|
||||
return ARRAY_SIZE(ihost->scu_registers->peg0.sgpio.output_data_select);
|
||||
}
|
||||
|
||||
void sci_controller_post_request(struct isci_host *ihost,
|
||||
u32 request);
|
||||
void sci_controller_release_frame(struct isci_host *ihost,
|
||||
@@ -542,4 +554,7 @@ void sci_port_configuration_agent_construct(
|
||||
enum sci_status sci_port_configuration_agent_initialize(
|
||||
struct isci_host *ihost,
|
||||
struct sci_port_configuration_agent *port_agent);
|
||||
|
||||
int isci_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index,
|
||||
u8 reg_count, u8 *write_data);
|
||||
#endif
|
||||
|
@@ -192,6 +192,9 @@ static struct sas_domain_function_template isci_transport_ops = {
|
||||
|
||||
/* Phy management */
|
||||
.lldd_control_phy = isci_phy_control,
|
||||
|
||||
/* GPIO support */
|
||||
.lldd_write_gpio = isci_gpio_write,
|
||||
};
|
||||
|
||||
|
||||
|
@@ -97,7 +97,7 @@
|
||||
#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
|
||||
|
||||
#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
|
||||
#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
|
||||
#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024U)
|
||||
#define SCU_INVALID_FRAME_INDEX (0xFFFF)
|
||||
|
||||
#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
|
||||
|
@@ -1313,6 +1313,17 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
|
||||
ret = isci_port_perform_hard_reset(ihost, iport, iphy);
|
||||
|
||||
break;
|
||||
case PHY_FUNC_GET_EVENTS: {
|
||||
struct scu_link_layer_registers __iomem *r;
|
||||
struct sas_phy *phy = sas_phy->phy;
|
||||
|
||||
r = iphy->link_layer_registers;
|
||||
phy->running_disparity_error_count = readl(&r->running_disparity_error_count);
|
||||
phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count);
|
||||
phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count);
|
||||
phy->invalid_dword_count = readl(&r->invalid_dword_counter);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
dev_dbg(&ihost->pdev->dev,
|
||||
|
@@ -294,8 +294,8 @@ static void isci_port_link_down(struct isci_host *isci_host,
|
||||
__func__, isci_device);
|
||||
set_bit(IDEV_GONE, &isci_device->flags);
|
||||
}
|
||||
isci_port_change_state(isci_port, isci_stopping);
|
||||
}
|
||||
isci_port_change_state(isci_port, isci_stopping);
|
||||
}
|
||||
|
||||
/* Notify libsas of the borken link, this will trigger calls to our
|
||||
|
@@ -678,7 +678,7 @@ static void apc_agent_timeout(unsigned long data)
|
||||
configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
|
||||
|
||||
if (!configure_phy_mask)
|
||||
return;
|
||||
goto done;
|
||||
|
||||
for (index = 0; index < SCI_MAX_PHYS; index++) {
|
||||
if ((configure_phy_mask & (1 << index)) == 0)
|
||||
|
@@ -875,122 +875,6 @@ struct scu_iit_entry {
|
||||
#define SCU_PTSxSR_GEN_BIT(name) \
|
||||
SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
|
||||
|
||||
|
||||
/*
|
||||
* *****************************************************************************
|
||||
* * SGPIO Register shift and mask values
|
||||
* ***************************************************************************** */
|
||||
#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0)
|
||||
#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001)
|
||||
#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1)
|
||||
#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002)
|
||||
#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
|
||||
#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004)
|
||||
#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15)
|
||||
#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000)
|
||||
#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8)
|
||||
|
||||
#define SCU_SGICRx_GEN_BIT(name) \
|
||||
SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
|
||||
|
||||
#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0)
|
||||
#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F)
|
||||
#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4)
|
||||
#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0)
|
||||
#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8)
|
||||
#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00)
|
||||
#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12)
|
||||
#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000)
|
||||
#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
|
||||
|
||||
#define SCU_SGPBRx_GEN_VAL(name, value) \
|
||||
SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
|
||||
|
||||
#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0)
|
||||
#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003)
|
||||
#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4)
|
||||
#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030)
|
||||
#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8)
|
||||
#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300)
|
||||
#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12)
|
||||
#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000)
|
||||
#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888)
|
||||
|
||||
#define SCU_SGSDLRx_GEN_VAL(name, value) \
|
||||
SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
|
||||
|
||||
#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0)
|
||||
#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003)
|
||||
#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4)
|
||||
#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030)
|
||||
#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8)
|
||||
#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300)
|
||||
#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12)
|
||||
#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000)
|
||||
#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888)
|
||||
|
||||
#define SCU_SGSDURx_GEN_VAL(name, value) \
|
||||
SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
|
||||
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
|
||||
|
||||
#define SCU_SGSIDLRx_GEN_VAL(name, value) \
|
||||
SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
|
||||
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000)
|
||||
#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
|
||||
|
||||
#define SCU_SGSIDURx_GEN_VAL(name, value) \
|
||||
SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
|
||||
|
||||
#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0)
|
||||
#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F)
|
||||
#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0)
|
||||
|
||||
#define SCU_SGVSCR_GEN_VAL(value) \
|
||||
SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
|
||||
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800)
|
||||
#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000)
|
||||
|
||||
#define SCU_SGODSR_GEN_VAL(name, value) \
|
||||
SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
|
||||
|
||||
#define SCU_SGODSR_GEN_BIT(name) \
|
||||
SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
|
||||
|
||||
/*
|
||||
* *****************************************************************************
|
||||
* * SMU Registers
|
||||
@@ -1529,10 +1413,12 @@ struct scu_sgpio_registers {
|
||||
u32 serial_input_upper;
|
||||
/* 0x0018 SGPIO_SGVSCR */
|
||||
u32 vendor_specific_code;
|
||||
/* 0x001C Reserved */
|
||||
u32 reserved_001c;
|
||||
/* 0x0020 SGPIO_SGODSR */
|
||||
u32 ouput_data_select[8];
|
||||
u32 output_data_select[8];
|
||||
/* Remainder of memory space 256 bytes */
|
||||
u32 reserved_1444_14ff[0x31];
|
||||
u32 reserved_1444_14ff[0x30];
|
||||
|
||||
};
|
||||
|
||||
|
@@ -386,6 +386,18 @@ static bool is_remote_device_ready(struct isci_remote_device *idev)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* called once the remote node context has transisitioned to a ready
|
||||
* state (after suspending RX and/or TX due to early D2H fis)
|
||||
*/
|
||||
static void atapi_remote_device_resume_done(void *_dev)
|
||||
{
|
||||
struct isci_remote_device *idev = _dev;
|
||||
struct isci_request *ireq = idev->working_request;
|
||||
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
}
|
||||
|
||||
enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
|
||||
u32 event_code)
|
||||
{
|
||||
@@ -432,6 +444,16 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
|
||||
if (status != SCI_SUCCESS)
|
||||
return status;
|
||||
|
||||
if (state == SCI_STP_DEV_ATAPI_ERROR) {
|
||||
/* For ATAPI error state resume the RNC right away. */
|
||||
if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
|
||||
scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
|
||||
return sci_remote_node_context_resume(&idev->rnc,
|
||||
atapi_remote_device_resume_done,
|
||||
idev);
|
||||
}
|
||||
}
|
||||
|
||||
if (state == SCI_STP_DEV_IDLE) {
|
||||
|
||||
/* We pick up suspension events to handle specifically to this
|
||||
@@ -625,6 +647,7 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
|
||||
case SCI_STP_DEV_CMD:
|
||||
case SCI_STP_DEV_NCQ:
|
||||
case SCI_STP_DEV_NCQ_ERROR:
|
||||
case SCI_STP_DEV_ATAPI_ERROR:
|
||||
status = common_complete_io(iport, idev, ireq);
|
||||
if (status != SCI_SUCCESS)
|
||||
break;
|
||||
@@ -1020,6 +1043,7 @@ static const struct sci_base_state sci_remote_device_state_table[] = {
|
||||
[SCI_STP_DEV_NCQ_ERROR] = {
|
||||
.enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
|
||||
},
|
||||
[SCI_STP_DEV_ATAPI_ERROR] = { },
|
||||
[SCI_STP_DEV_AWAIT_RESET] = { },
|
||||
[SCI_SMP_DEV_IDLE] = {
|
||||
.enter_state = sci_smp_remote_device_ready_idle_substate_enter,
|
||||
|
@@ -243,6 +243,15 @@ enum sci_remote_device_states {
|
||||
*/
|
||||
SCI_STP_DEV_NCQ_ERROR,
|
||||
|
||||
/**
|
||||
* This is the ATAPI error state for the STP ATAPI remote device.
|
||||
* This state is entered when ATAPI device sends error status FIS
|
||||
* without data while the device object is in CMD state.
|
||||
* A suspension event is expected in this state.
|
||||
* The device object will resume right away.
|
||||
*/
|
||||
SCI_STP_DEV_ATAPI_ERROR,
|
||||
|
||||
/**
|
||||
* This is the READY substate indicates the device is waiting for the RESET task
|
||||
* coming to be recovered from certain hardware specific error.
|
||||
|
@@ -481,7 +481,29 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq,
|
||||
}
|
||||
}
|
||||
|
||||
static void sci_atapi_construct(struct isci_request *ireq)
|
||||
{
|
||||
struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
|
||||
struct sas_task *task;
|
||||
|
||||
/* To simplify the implementation we take advantage of the
|
||||
* silicon's partial acceleration of atapi protocol (dma data
|
||||
* transfers), so we promote all commands to dma protocol. This
|
||||
* breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
|
||||
*/
|
||||
h2d_fis->features |= ATAPI_PKT_DMA;
|
||||
|
||||
scu_stp_raw_request_construct_task_context(ireq);
|
||||
|
||||
task = isci_request_access_task(ireq);
|
||||
if (task->data_dir == DMA_NONE)
|
||||
task->total_xfer_len = 0;
|
||||
|
||||
/* clear the response so we can detect arrivial of an
|
||||
* unsolicited h2d fis
|
||||
*/
|
||||
ireq->stp.rsp.fis_type = 0;
|
||||
}
|
||||
|
||||
static enum sci_status
|
||||
sci_io_request_construct_sata(struct isci_request *ireq,
|
||||
@@ -491,6 +513,7 @@ sci_io_request_construct_sata(struct isci_request *ireq,
|
||||
{
|
||||
enum sci_status status = SCI_SUCCESS;
|
||||
struct sas_task *task = isci_request_access_task(ireq);
|
||||
struct domain_device *dev = ireq->target_device->domain_dev;
|
||||
|
||||
/* check for management protocols */
|
||||
if (ireq->ttype == tmf_task) {
|
||||
@@ -519,6 +542,13 @@ sci_io_request_construct_sata(struct isci_request *ireq,
|
||||
|
||||
}
|
||||
|
||||
/* ATAPI */
|
||||
if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
|
||||
task->ata_task.fis.command == ATA_CMD_PACKET) {
|
||||
sci_atapi_construct(ireq);
|
||||
return SCI_SUCCESS;
|
||||
}
|
||||
|
||||
/* non data */
|
||||
if (task->data_dir == DMA_NONE) {
|
||||
scu_stp_raw_request_construct_task_context(ireq);
|
||||
@@ -627,7 +657,7 @@ enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
|
||||
|
||||
/**
|
||||
* sci_req_tx_bytes - bytes transferred when reply underruns request
|
||||
* @sci_req: request that was terminated early
|
||||
* @ireq: request that was terminated early
|
||||
*/
|
||||
#define SCU_TASK_CONTEXT_SRAM 0x200000
|
||||
static u32 sci_req_tx_bytes(struct isci_request *ireq)
|
||||
@@ -729,6 +759,10 @@ sci_io_request_terminate(struct isci_request *ireq)
|
||||
case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
|
||||
case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
|
||||
case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
|
||||
case SCI_REQ_ATAPI_WAIT_H2D:
|
||||
case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
|
||||
case SCI_REQ_ATAPI_WAIT_D2H:
|
||||
case SCI_REQ_ATAPI_WAIT_TC_COMP:
|
||||
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
|
||||
return SCI_SUCCESS;
|
||||
case SCI_REQ_TASK_WAIT_TC_RESP:
|
||||
@@ -1194,8 +1228,8 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
|
||||
{
|
||||
struct isci_stp_request *stp_req = &ireq->stp.req;
|
||||
struct scu_sgl_element_pair *sgl_pair;
|
||||
enum sci_status status = SCI_SUCCESS;
|
||||
struct scu_sgl_element *sgl;
|
||||
enum sci_status status;
|
||||
u32 offset;
|
||||
u32 len = 0;
|
||||
|
||||
@@ -1249,7 +1283,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
|
||||
*/
|
||||
static enum sci_status
|
||||
sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
|
||||
u8 *data_buf, u32 len)
|
||||
u8 *data_buf, u32 len)
|
||||
{
|
||||
struct isci_request *ireq;
|
||||
u8 *src_addr;
|
||||
@@ -1423,6 +1457,128 @@ static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_re
|
||||
return status;
|
||||
}
|
||||
|
||||
static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
|
||||
u32 frame_index)
|
||||
{
|
||||
struct isci_host *ihost = ireq->owning_controller;
|
||||
enum sci_status status;
|
||||
struct dev_to_host_fis *frame_header;
|
||||
u32 *frame_buffer;
|
||||
|
||||
status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
|
||||
frame_index,
|
||||
(void **)&frame_header);
|
||||
|
||||
if (status != SCI_SUCCESS)
|
||||
return status;
|
||||
|
||||
if (frame_header->fis_type != FIS_REGD2H) {
|
||||
dev_err(&ireq->isci_host->pdev->dev,
|
||||
"%s ERROR: invalid fis type 0x%X\n",
|
||||
__func__, frame_header->fis_type);
|
||||
return SCI_FAILURE;
|
||||
}
|
||||
|
||||
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
|
||||
frame_index,
|
||||
(void **)&frame_buffer);
|
||||
|
||||
sci_controller_copy_sata_response(&ireq->stp.rsp,
|
||||
(u32 *)frame_header,
|
||||
frame_buffer);
|
||||
|
||||
/* Frame has been decoded return it to the controller */
|
||||
sci_controller_release_frame(ihost, frame_index);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
|
||||
u32 frame_index)
|
||||
{
|
||||
struct sas_task *task = isci_request_access_task(ireq);
|
||||
enum sci_status status;
|
||||
|
||||
status = process_unsolicited_fis(ireq, frame_index);
|
||||
|
||||
if (status == SCI_SUCCESS) {
|
||||
if (ireq->stp.rsp.status & ATA_ERR)
|
||||
status = SCI_IO_FAILURE_RESPONSE_VALID;
|
||||
} else {
|
||||
status = SCI_IO_FAILURE_RESPONSE_VALID;
|
||||
}
|
||||
|
||||
if (status != SCI_SUCCESS) {
|
||||
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
|
||||
ireq->sci_status = status;
|
||||
} else {
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
ireq->sci_status = SCI_SUCCESS;
|
||||
}
|
||||
|
||||
/* the d2h ufi is the end of non-data commands */
|
||||
if (task->data_dir == DMA_NONE)
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
|
||||
{
|
||||
struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
|
||||
void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
|
||||
struct scu_task_context *task_context = ireq->tc;
|
||||
|
||||
/* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
|
||||
* type. The TC for previous Packet fis was already there, we only need to
|
||||
* change the H2D fis content.
|
||||
*/
|
||||
memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
|
||||
memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
|
||||
memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
|
||||
task_context->type.stp.fis_type = FIS_DATA;
|
||||
task_context->transfer_length_bytes = dev->cdb_len;
|
||||
}
|
||||
|
||||
static void scu_atapi_construct_task_context(struct isci_request *ireq)
|
||||
{
|
||||
struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
|
||||
struct sas_task *task = isci_request_access_task(ireq);
|
||||
struct scu_task_context *task_context = ireq->tc;
|
||||
int cdb_len = dev->cdb_len;
|
||||
|
||||
/* reference: SSTL 1.13.4.2
|
||||
* task_type, sata_direction
|
||||
*/
|
||||
if (task->data_dir == DMA_TO_DEVICE) {
|
||||
task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
|
||||
task_context->sata_direction = 0;
|
||||
} else {
|
||||
/* todo: for NO_DATA command, we need to send out raw frame. */
|
||||
task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
|
||||
task_context->sata_direction = 1;
|
||||
}
|
||||
|
||||
memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
|
||||
task_context->type.stp.fis_type = FIS_DATA;
|
||||
|
||||
memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
|
||||
memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
|
||||
task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
|
||||
|
||||
/* task phase is set to TX_CMD */
|
||||
task_context->task_phase = 0x1;
|
||||
|
||||
/* retry counter */
|
||||
task_context->stp_retry_count = 0;
|
||||
|
||||
/* data transfer size. */
|
||||
task_context->transfer_length_bytes = task->total_xfer_len;
|
||||
|
||||
/* setup sgl */
|
||||
sci_request_build_sgl(ireq);
|
||||
}
|
||||
|
||||
enum sci_status
|
||||
sci_io_request_frame_handler(struct isci_request *ireq,
|
||||
u32 frame_index)
|
||||
@@ -1490,29 +1646,30 @@ sci_io_request_frame_handler(struct isci_request *ireq,
|
||||
return SCI_SUCCESS;
|
||||
|
||||
case SCI_REQ_SMP_WAIT_RESP: {
|
||||
struct smp_resp *rsp_hdr = &ireq->smp.rsp;
|
||||
void *frame_header;
|
||||
struct sas_task *task = isci_request_access_task(ireq);
|
||||
struct scatterlist *sg = &task->smp_task.smp_resp;
|
||||
void *frame_header, *kaddr;
|
||||
u8 *rsp;
|
||||
|
||||
sci_unsolicited_frame_control_get_header(&ihost->uf_control,
|
||||
frame_index,
|
||||
&frame_header);
|
||||
frame_index,
|
||||
&frame_header);
|
||||
kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
|
||||
rsp = kaddr + sg->offset;
|
||||
sci_swab32_cpy(rsp, frame_header, 1);
|
||||
|
||||
/* byte swap the header. */
|
||||
word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
|
||||
sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
|
||||
|
||||
if (rsp_hdr->frame_type == SMP_RESPONSE) {
|
||||
if (rsp[0] == SMP_RESPONSE) {
|
||||
void *smp_resp;
|
||||
|
||||
sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
|
||||
frame_index,
|
||||
&smp_resp);
|
||||
frame_index,
|
||||
&smp_resp);
|
||||
|
||||
word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
|
||||
sizeof(u32);
|
||||
|
||||
sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
|
||||
smp_resp, word_cnt);
|
||||
word_cnt = (sg->length/4)-1;
|
||||
if (word_cnt > 0)
|
||||
word_cnt = min_t(unsigned int, word_cnt,
|
||||
SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
|
||||
sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
|
||||
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
ireq->sci_status = SCI_SUCCESS;
|
||||
@@ -1528,12 +1685,13 @@ sci_io_request_frame_handler(struct isci_request *ireq,
|
||||
__func__,
|
||||
ireq,
|
||||
frame_index,
|
||||
rsp_hdr->frame_type);
|
||||
rsp[0]);
|
||||
|
||||
ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
|
||||
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
}
|
||||
kunmap_atomic(kaddr, KM_IRQ0);
|
||||
|
||||
sci_controller_release_frame(ihost, frame_index);
|
||||
|
||||
@@ -1833,6 +1991,24 @@ sci_io_request_frame_handler(struct isci_request *ireq,
|
||||
|
||||
return status;
|
||||
}
|
||||
case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
|
||||
struct sas_task *task = isci_request_access_task(ireq);
|
||||
|
||||
sci_controller_release_frame(ihost, frame_index);
|
||||
ireq->target_device->working_request = ireq;
|
||||
if (task->data_dir == DMA_NONE) {
|
||||
sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
|
||||
scu_atapi_reconstruct_raw_frame_task_context(ireq);
|
||||
} else {
|
||||
sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
|
||||
scu_atapi_construct_task_context(ireq);
|
||||
}
|
||||
|
||||
sci_controller_continue_io(ireq);
|
||||
return SCI_SUCCESS;
|
||||
}
|
||||
case SCI_REQ_ATAPI_WAIT_D2H:
|
||||
return atapi_d2h_reg_frame_handler(ireq, frame_index);
|
||||
case SCI_REQ_ABORTING:
|
||||
/*
|
||||
* TODO: Is it even possible to get an unsolicited frame in the
|
||||
@@ -1898,10 +2074,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
|
||||
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
|
||||
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
|
||||
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
|
||||
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
|
||||
sci_remote_device_suspend(ireq->target_device,
|
||||
SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
|
||||
/* Fall through to the default case */
|
||||
/* Fall through to the default case */
|
||||
default:
|
||||
/* All other completion status cause the IO to be complete. */
|
||||
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
|
||||
@@ -1964,6 +2139,112 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
|
||||
return SCI_SUCCESS;
|
||||
}
|
||||
|
||||
static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
|
||||
enum sci_base_request_states next)
|
||||
{
|
||||
enum sci_status status = SCI_SUCCESS;
|
||||
|
||||
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
|
||||
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
ireq->sci_status = SCI_SUCCESS;
|
||||
sci_change_state(&ireq->sm, next);
|
||||
break;
|
||||
default:
|
||||
/* All other completion status cause the IO to be complete.
|
||||
* If a NAK was received, then it is up to the user to retry
|
||||
* the request.
|
||||
*/
|
||||
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
|
||||
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
|
||||
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
|
||||
u32 completion_code)
|
||||
{
|
||||
struct isci_remote_device *idev = ireq->target_device;
|
||||
struct dev_to_host_fis *d2h = &ireq->stp.rsp;
|
||||
enum sci_status status = SCI_SUCCESS;
|
||||
|
||||
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
|
||||
case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
break;
|
||||
|
||||
case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
|
||||
u16 len = sci_req_tx_bytes(ireq);
|
||||
|
||||
/* likely non-error data underrrun, workaround missing
|
||||
* d2h frame from the controller
|
||||
*/
|
||||
if (d2h->fis_type != FIS_REGD2H) {
|
||||
d2h->fis_type = FIS_REGD2H;
|
||||
d2h->flags = (1 << 6);
|
||||
d2h->status = 0x50;
|
||||
d2h->error = 0;
|
||||
d2h->lbal = 0;
|
||||
d2h->byte_count_low = len & 0xff;
|
||||
d2h->byte_count_high = len >> 8;
|
||||
d2h->device = 0xa0;
|
||||
d2h->lbal_exp = 0;
|
||||
d2h->lbam_exp = 0;
|
||||
d2h->lbah_exp = 0;
|
||||
d2h->_r_a = 0;
|
||||
d2h->sector_count = 0x3;
|
||||
d2h->sector_count_exp = 0;
|
||||
d2h->_r_b = 0;
|
||||
d2h->_r_c = 0;
|
||||
d2h->_r_d = 0;
|
||||
}
|
||||
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
|
||||
status = ireq->sci_status;
|
||||
|
||||
/* the hw will have suspended the rnc, so complete the
|
||||
* request upon pending resume
|
||||
*/
|
||||
sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
|
||||
break;
|
||||
}
|
||||
case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
|
||||
/* In this case, there is no UF coming after.
|
||||
* compelte the IO now.
|
||||
*/
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
ireq->sci_status = SCI_SUCCESS;
|
||||
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (d2h->fis_type == FIS_REGD2H) {
|
||||
/* UF received change the device state to ATAPI_ERROR */
|
||||
status = ireq->sci_status;
|
||||
sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
|
||||
} else {
|
||||
/* If receiving any non-sucess TC status, no UF
|
||||
* received yet, then an UF for the status fis
|
||||
* is coming after (XXX: suspect this is
|
||||
* actually a protocol error or a bug like the
|
||||
* DONE_UNEXP_FIS case)
|
||||
*/
|
||||
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
|
||||
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
|
||||
|
||||
sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
enum sci_status
|
||||
sci_io_request_tc_completion(struct isci_request *ireq,
|
||||
u32 completion_code)
|
||||
@@ -2015,6 +2296,17 @@ sci_io_request_tc_completion(struct isci_request *ireq,
|
||||
return request_aborting_state_tc_event(ireq,
|
||||
completion_code);
|
||||
|
||||
case SCI_REQ_ATAPI_WAIT_H2D:
|
||||
return atapi_raw_completion(ireq, completion_code,
|
||||
SCI_REQ_ATAPI_WAIT_PIO_SETUP);
|
||||
|
||||
case SCI_REQ_ATAPI_WAIT_TC_COMP:
|
||||
return atapi_raw_completion(ireq, completion_code,
|
||||
SCI_REQ_ATAPI_WAIT_D2H);
|
||||
|
||||
case SCI_REQ_ATAPI_WAIT_D2H:
|
||||
return atapi_data_tc_completion_handler(ireq, completion_code);
|
||||
|
||||
default:
|
||||
dev_warn(&ihost->pdev->dev,
|
||||
"%s: SCIC IO Request given task completion "
|
||||
@@ -2421,6 +2713,8 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_
|
||||
*/
|
||||
if (fis->status & ATA_DF)
|
||||
ts->stat = SAS_PROTO_RESPONSE;
|
||||
else if (fis->status & ATA_ERR)
|
||||
ts->stat = SAM_STAT_CHECK_CONDITION;
|
||||
else
|
||||
ts->stat = SAM_STAT_GOOD;
|
||||
|
||||
@@ -2603,18 +2897,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
|
||||
status = SAM_STAT_GOOD;
|
||||
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
|
||||
|
||||
if (task->task_proto == SAS_PROTOCOL_SMP) {
|
||||
void *rsp = &request->smp.rsp;
|
||||
|
||||
dev_dbg(&ihost->pdev->dev,
|
||||
"%s: SMP protocol completion\n",
|
||||
__func__);
|
||||
|
||||
sg_copy_from_buffer(
|
||||
&task->smp_task.smp_resp, 1,
|
||||
rsp, sizeof(struct smp_resp));
|
||||
} else if (completion_status
|
||||
== SCI_IO_SUCCESS_IO_DONE_EARLY) {
|
||||
if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
|
||||
|
||||
/* This was an SSP / STP / SATA transfer.
|
||||
* There is a possibility that less data than
|
||||
@@ -2791,6 +3074,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
|
||||
{
|
||||
struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
|
||||
struct domain_device *dev = ireq->target_device->domain_dev;
|
||||
enum sci_base_request_states state;
|
||||
struct sas_task *task;
|
||||
|
||||
/* XXX as hch said always creating an internal sas_task for tmf
|
||||
@@ -2802,26 +3086,30 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
|
||||
* substates
|
||||
*/
|
||||
if (!task && dev->dev_type == SAS_END_DEV) {
|
||||
sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
|
||||
state = SCI_REQ_TASK_WAIT_TC_COMP;
|
||||
} else if (!task &&
|
||||
(isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
|
||||
isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
|
||||
sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
|
||||
state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
|
||||
} else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
|
||||
sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
|
||||
state = SCI_REQ_SMP_WAIT_RESP;
|
||||
} else if (task && sas_protocol_ata(task->task_proto) &&
|
||||
!task->ata_task.use_ncq) {
|
||||
u32 state;
|
||||
|
||||
if (task->data_dir == DMA_NONE)
|
||||
if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
|
||||
task->ata_task.fis.command == ATA_CMD_PACKET) {
|
||||
state = SCI_REQ_ATAPI_WAIT_H2D;
|
||||
} else if (task->data_dir == DMA_NONE) {
|
||||
state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
|
||||
else if (task->ata_task.dma_xfer)
|
||||
} else if (task->ata_task.dma_xfer) {
|
||||
state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
|
||||
else /* PIO */
|
||||
} else /* PIO */ {
|
||||
state = SCI_REQ_STP_PIO_WAIT_H2D;
|
||||
|
||||
sci_change_state(sm, state);
|
||||
}
|
||||
} else {
|
||||
/* SSP or NCQ are fully accelerated, no substates */
|
||||
return;
|
||||
}
|
||||
sci_change_state(sm, state);
|
||||
}
|
||||
|
||||
static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
|
||||
@@ -2913,6 +3201,10 @@ static const struct sci_base_state sci_request_state_table[] = {
|
||||
[SCI_REQ_TASK_WAIT_TC_RESP] = { },
|
||||
[SCI_REQ_SMP_WAIT_RESP] = { },
|
||||
[SCI_REQ_SMP_WAIT_TC_COMP] = { },
|
||||
[SCI_REQ_ATAPI_WAIT_H2D] = { },
|
||||
[SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
|
||||
[SCI_REQ_ATAPI_WAIT_D2H] = { },
|
||||
[SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
|
||||
[SCI_REQ_COMPLETED] = {
|
||||
.enter_state = sci_request_completed_state_enter,
|
||||
},
|
||||
|
@@ -96,7 +96,6 @@ enum sci_request_protocol {
|
||||
* to wait for another fis or if the transfer is complete. Upon
|
||||
* receipt of a d2h fis this will be the status field of that fis.
|
||||
* @sgl - track pio transfer progress as we iterate through the sgl
|
||||
* @device_cdb_len - atapi device advertises it's transfer constraints at setup
|
||||
*/
|
||||
struct isci_stp_request {
|
||||
u32 pio_len;
|
||||
@@ -107,7 +106,6 @@ struct isci_stp_request {
|
||||
u8 set;
|
||||
u32 offset;
|
||||
} sgl;
|
||||
u32 device_cdb_len;
|
||||
};
|
||||
|
||||
struct isci_request {
|
||||
@@ -173,9 +171,6 @@ struct isci_request {
|
||||
u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
|
||||
};
|
||||
} ssp;
|
||||
struct {
|
||||
struct smp_resp rsp;
|
||||
} smp;
|
||||
struct {
|
||||
struct isci_stp_request req;
|
||||
struct host_to_dev_fis cmd;
|
||||
@@ -251,6 +246,32 @@ enum sci_base_request_states {
|
||||
*/
|
||||
SCI_REQ_STP_PIO_DATA_OUT,
|
||||
|
||||
/*
|
||||
* While in this state the IO request object is waiting for the TC
|
||||
* completion notification for the H2D Register FIS
|
||||
*/
|
||||
SCI_REQ_ATAPI_WAIT_H2D,
|
||||
|
||||
/*
|
||||
* While in this state the IO request object is waiting for either a
|
||||
* PIO Setup.
|
||||
*/
|
||||
SCI_REQ_ATAPI_WAIT_PIO_SETUP,
|
||||
|
||||
/*
|
||||
* The non-data IO transit to this state in this state after receiving
|
||||
* TC completion. While in this state IO request object is waiting for
|
||||
* D2H status frame as UF.
|
||||
*/
|
||||
SCI_REQ_ATAPI_WAIT_D2H,
|
||||
|
||||
/*
|
||||
* When transmitting raw frames hardware reports task context completion
|
||||
* after every frame submission, so in the non-accelerated case we need
|
||||
* to expect the completion for the "cdb" frame.
|
||||
*/
|
||||
SCI_REQ_ATAPI_WAIT_TC_COMP,
|
||||
|
||||
/*
|
||||
* The AWAIT_TC_COMPLETION sub-state indicates that the started raw
|
||||
* task management request is waiting for the transmission of the
|
||||
|
@@ -204,8 +204,6 @@ struct smp_req {
|
||||
u8 req_data[0];
|
||||
} __packed;
|
||||
|
||||
#define SMP_RESP_HDR_SZ 4
|
||||
|
||||
/*
|
||||
* struct sci_sas_address - This structure depicts how a SAS address is
|
||||
* represented by SCI.
|
||||
|
@@ -1345,29 +1345,6 @@ static void isci_smp_task_done(struct sas_task *task)
|
||||
complete(&task->completion);
|
||||
}
|
||||
|
||||
static struct sas_task *isci_alloc_task(void)
|
||||
{
|
||||
struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
|
||||
|
||||
if (task) {
|
||||
INIT_LIST_HEAD(&task->list);
|
||||
spin_lock_init(&task->task_state_lock);
|
||||
task->task_state_flags = SAS_TASK_STATE_PENDING;
|
||||
init_timer(&task->timer);
|
||||
init_completion(&task->completion);
|
||||
}
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
static void isci_free_task(struct isci_host *ihost, struct sas_task *task)
|
||||
{
|
||||
if (task) {
|
||||
BUG_ON(!list_empty(&task->list));
|
||||
kfree(task);
|
||||
}
|
||||
}
|
||||
|
||||
static int isci_smp_execute_task(struct isci_host *ihost,
|
||||
struct domain_device *dev, void *req,
|
||||
int req_size, void *resp, int resp_size)
|
||||
@@ -1376,7 +1353,7 @@ static int isci_smp_execute_task(struct isci_host *ihost,
|
||||
struct sas_task *task = NULL;
|
||||
|
||||
for (retry = 0; retry < 3; retry++) {
|
||||
task = isci_alloc_task();
|
||||
task = sas_alloc_task(GFP_KERNEL);
|
||||
if (!task)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -1439,13 +1416,13 @@ static int isci_smp_execute_task(struct isci_host *ihost,
|
||||
SAS_ADDR(dev->sas_addr),
|
||||
task->task_status.resp,
|
||||
task->task_status.stat);
|
||||
isci_free_task(ihost, task);
|
||||
sas_free_task(task);
|
||||
task = NULL;
|
||||
}
|
||||
}
|
||||
ex_err:
|
||||
BUG_ON(retry == 3 && task != NULL);
|
||||
isci_free_task(ihost, task);
|
||||
sas_free_task(task);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@@ -286,6 +286,25 @@ isci_task_set_completion_status(
|
||||
task->task_status.resp = response;
|
||||
task->task_status.stat = status;
|
||||
|
||||
switch (task->task_proto) {
|
||||
|
||||
case SAS_PROTOCOL_SATA:
|
||||
case SAS_PROTOCOL_STP:
|
||||
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
|
||||
|
||||
if (task_notification_selection
|
||||
== isci_perform_error_io_completion) {
|
||||
/* SATA/STP I/O has it's own means of scheduling device
|
||||
* error handling on the normal path.
|
||||
*/
|
||||
task_notification_selection
|
||||
= isci_perform_normal_io_completion;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (task_notification_selection) {
|
||||
|
||||
case isci_perform_error_io_completion:
|
||||
|
مرجع در شماره جدید
Block a user