isci: workaround port task scheduler starvation issue
There is a condition whereby TCs (task contexts) can jump to the head of the round robin queue causing indefinite starvation of pending tasks. Posting a TC to a suspended RNC (remote node context) causes the hardware to select that task first, but since the RNC is suspended the scheduler proceeds to the next task in the expected round robin fashion, restoring TC arbitration fairness. Signed-off-by: Tomasz Chudy <tomasz.chudy@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:

committed by
Dan Williams

parent
7c40a80358
commit
a8d4b9fe91
@@ -381,7 +381,6 @@ int isci_host_init(struct isci_host *isci_host)
|
||||
int index = 0;
|
||||
enum sci_status status;
|
||||
struct scic_sds_controller *controller;
|
||||
struct scic_sds_port *scic_port;
|
||||
union scic_oem_parameters scic_oem_params;
|
||||
union scic_user_parameters scic_user_params;
|
||||
|
||||
@@ -517,11 +516,5 @@ int isci_host_init(struct isci_host *isci_host)
|
||||
for (index = 0; index < SCI_MAX_PHYS; index++)
|
||||
isci_phy_init(&isci_host->phys[index], isci_host, index);
|
||||
|
||||
/* Start the ports */
|
||||
for (index = 0; index < SCI_MAX_PORTS; index++) {
|
||||
scic_controller_get_port_handle(controller, index, &scic_port);
|
||||
scic_port_start(scic_port);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user