Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: arcmsr, qla2xxx, lpfc, hisi_sas, target/iscsi and target/core. Additionally Christoph refactored gdth as part of the dma changes. The major mid-layer change this time is the removal of bidi commands and with them the whole of the osd/exofs driver and filesystem. This is a major simplification for block and mq in particular" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (240 commits) scsi: cxgb4i: validate tcp sequence number only if chip version <= T5 scsi: cxgb4i: get pf number from lldi->pf scsi: core: replace GFP_ATOMIC with GFP_KERNEL in scsi_scan.c scsi: mpt3sas: Add missing breaks in switch statements scsi: aacraid: Fix missing break in switch statement scsi: kill command serial number scsi: csiostor: drop serial_number usage scsi: mvumi: use request tag instead of serial_number scsi: dpt_i2o: remove serial number usage scsi: st: osst: Remove negative constant left-shifts scsi: ufs-bsg: Allow reading descriptors scsi: ufs: Allow reading descriptor via raw upiu scsi: ufs-bsg: Change the calling convention for write descriptor scsi: ufs: Remove unused device quirks Revert "scsi: ufs: disable vccq if it's not needed by UFS device" scsi: megaraid_sas: Remove a bunch of set but not used variables scsi: clean obsolete return values of eh_timed_out scsi: sd: Optimal I/O size should be a multiple of physical block size scsi: MAINTAINERS: SCSI initiator and target tweaks scsi: fcoe: make use of fip_mode enum complete ...
This commit is contained in:
@@ -345,7 +345,7 @@ struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
|
||||
int cxgbit_ddp_init(struct cxgbit_device *);
|
||||
int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
|
||||
int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
|
||||
void cxgbit_release_cmd(struct iscsi_conn *, struct iscsi_cmd *);
|
||||
void cxgbit_unmap_cmd(struct iscsi_conn *, struct iscsi_cmd *);
|
||||
|
||||
static inline
|
||||
struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
|
||||
|
@@ -263,7 +263,7 @@ out:
|
||||
r2t->targ_xfer_tag = ttinfo->tag;
|
||||
}
|
||||
|
||||
void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
void cxgbit_unmap_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
{
|
||||
struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
|
||||
|
||||
|
@@ -678,7 +678,7 @@ static struct iscsit_transport cxgbit_transport = {
|
||||
.iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt,
|
||||
.iscsit_get_rx_pdu = cxgbit_get_rx_pdu,
|
||||
.iscsit_validate_params = cxgbit_validate_params,
|
||||
.iscsit_release_cmd = cxgbit_release_cmd,
|
||||
.iscsit_unmap_cmd = cxgbit_unmap_cmd,
|
||||
.iscsit_aborted_task = iscsit_aborted_task,
|
||||
.iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
|
||||
};
|
||||
|
@@ -960,7 +960,7 @@ after_immediate_data:
|
||||
target_put_sess_cmd(&cmd->se_cmd);
|
||||
return 0;
|
||||
} else if (cmd->unsolicited_data) {
|
||||
iscsit_set_unsoliticed_dataout(cmd);
|
||||
iscsit_set_unsolicited_dataout(cmd);
|
||||
}
|
||||
|
||||
} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
|
||||
|
@@ -308,9 +308,6 @@ bool iscsit_check_np_match(
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with mutex np_lock held
|
||||
*/
|
||||
static struct iscsi_np *iscsit_get_np(
|
||||
struct sockaddr_storage *sockaddr,
|
||||
int network_transport)
|
||||
@@ -318,6 +315,8 @@ static struct iscsi_np *iscsit_get_np(
|
||||
struct iscsi_np *np;
|
||||
bool match;
|
||||
|
||||
lockdep_assert_held(&np_lock);
|
||||
|
||||
list_for_each_entry(np, &g_np_list, np_list) {
|
||||
spin_lock_bh(&np->np_thread_lock);
|
||||
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
|
||||
@@ -1195,7 +1194,7 @@ attach_cmd:
|
||||
}
|
||||
EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
|
||||
|
||||
void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
|
||||
void iscsit_set_unsolicited_dataout(struct iscsi_cmd *cmd)
|
||||
{
|
||||
iscsit_set_dataout_sequence_values(cmd);
|
||||
|
||||
@@ -1203,7 +1202,7 @@ void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
|
||||
iscsit_start_dataout_timer(cmd, cmd->conn);
|
||||
spin_unlock_bh(&cmd->dataout_timeout_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout);
|
||||
EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
|
||||
|
||||
int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
struct iscsi_scsi_req *hdr)
|
||||
@@ -1237,7 +1236,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
*/
|
||||
if (!cmd->immediate_data) {
|
||||
if (!cmd->sense_reason && cmd->unsolicited_data)
|
||||
iscsit_set_unsoliticed_dataout(cmd);
|
||||
iscsit_set_unsolicited_dataout(cmd);
|
||||
if (!cmd->sense_reason)
|
||||
return 0;
|
||||
|
||||
@@ -1309,7 +1308,7 @@ after_immediate_data:
|
||||
target_put_sess_cmd(&cmd->se_cmd);
|
||||
return rc;
|
||||
} else if (cmd->unsolicited_data)
|
||||
iscsit_set_unsoliticed_dataout(cmd);
|
||||
iscsit_set_unsolicited_dataout(cmd);
|
||||
|
||||
} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
|
||||
/*
|
||||
@@ -2241,28 +2240,25 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
rx_size = payload_length;
|
||||
if (payload_length) {
|
||||
u32 checksum = 0, data_crc = 0;
|
||||
u32 padding = 0, pad_bytes = 0;
|
||||
u32 padding = 0;
|
||||
int niov = 0, rx_got;
|
||||
struct kvec iov[3];
|
||||
struct kvec iov[2];
|
||||
|
||||
text_in = kzalloc(payload_length, GFP_KERNEL);
|
||||
rx_size = ALIGN(payload_length, 4);
|
||||
text_in = kzalloc(rx_size, GFP_KERNEL);
|
||||
if (!text_in)
|
||||
goto reject;
|
||||
|
||||
cmd->text_in_ptr = text_in;
|
||||
|
||||
memset(iov, 0, 3 * sizeof(struct kvec));
|
||||
memset(iov, 0, sizeof(iov));
|
||||
iov[niov].iov_base = text_in;
|
||||
iov[niov++].iov_len = payload_length;
|
||||
iov[niov++].iov_len = rx_size;
|
||||
|
||||
padding = ((-payload_length) & 3);
|
||||
if (padding != 0) {
|
||||
iov[niov].iov_base = &pad_bytes;
|
||||
iov[niov++].iov_len = padding;
|
||||
rx_size += padding;
|
||||
padding = rx_size - payload_length;
|
||||
if (padding)
|
||||
pr_debug("Receiving %u additional bytes"
|
||||
" for padding.\n", padding);
|
||||
}
|
||||
if (conn->conn_ops->DataDigest) {
|
||||
iov[niov].iov_base = &checksum;
|
||||
iov[niov++].iov_len = ISCSI_CRC_LEN;
|
||||
@@ -2274,9 +2270,9 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||
goto reject;
|
||||
|
||||
if (conn->conn_ops->DataDigest) {
|
||||
iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
|
||||
payload_length, padding,
|
||||
&pad_bytes, &data_crc);
|
||||
iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
|
||||
text_in, rx_size, 0, NULL,
|
||||
&data_crc);
|
||||
|
||||
if (checksum != data_crc) {
|
||||
pr_err("Text data CRC32C DataDigest"
|
||||
@@ -2655,9 +2651,6 @@ static int iscsit_handle_immediate_data(
|
||||
return IMMEDIATE_DATA_NORMAL_OPERATION;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with sess->conn_lock held.
|
||||
*/
|
||||
/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
|
||||
with active network interface */
|
||||
static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
|
||||
@@ -2666,6 +2659,8 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
|
||||
struct iscsi_conn *conn_p;
|
||||
bool found = false;
|
||||
|
||||
lockdep_assert_held(&conn->sess->conn_lock);
|
||||
|
||||
/*
|
||||
* Only send a Asynchronous Message on connections whos network
|
||||
* interface is still functional.
|
||||
@@ -4040,9 +4035,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
|
||||
struct se_cmd *se_cmd = &cmd->se_cmd;
|
||||
|
||||
if (se_cmd->se_tfo != NULL) {
|
||||
spin_lock(&se_cmd->t_state_lock);
|
||||
spin_lock_irq(&se_cmd->t_state_lock);
|
||||
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
|
||||
spin_unlock(&se_cmd->t_state_lock);
|
||||
spin_unlock_irq(&se_cmd->t_state_lock);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&conn->cmd_lock);
|
||||
|
@@ -31,7 +31,7 @@ extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
|
||||
struct iscsi_portal_group *, bool);
|
||||
extern int iscsit_del_np(struct iscsi_np *);
|
||||
extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
|
||||
extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
|
||||
extern void iscsit_set_unsolicited_dataout(struct iscsi_cmd *);
|
||||
extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
|
||||
extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
|
||||
extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
|
||||
|
@@ -1389,18 +1389,6 @@ static int lio_write_pending(struct se_cmd *se_cmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lio_write_pending_status(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&cmd->istate_lock);
|
||||
ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
|
||||
spin_unlock_bh(&cmd->istate_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lio_queue_status(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
|
||||
@@ -1564,7 +1552,6 @@ const struct target_core_fabric_ops iscsi_ops = {
|
||||
.sess_get_index = lio_sess_get_index,
|
||||
.sess_get_initiator_sid = lio_sess_get_initiator_sid,
|
||||
.write_pending = lio_write_pending,
|
||||
.write_pending_status = lio_write_pending_status,
|
||||
.set_default_node_attributes = lio_set_default_node_attributes,
|
||||
.get_cmd_state = iscsi_get_cmd_state,
|
||||
.queue_data_in = lio_queue_data_in,
|
||||
|
@@ -802,14 +802,13 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
|
||||
jiffies + sess->sess_ops->DefaultTime2Retain * HZ);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with spin_lock_bh(&struct se_portal_group->session_lock) held
|
||||
*/
|
||||
int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
|
||||
{
|
||||
struct iscsi_portal_group *tpg = sess->tpg;
|
||||
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
|
||||
|
||||
lockdep_assert_held(&se_tpg->session_lock);
|
||||
|
||||
if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
|
||||
return -1;
|
||||
|
||||
|
@@ -48,14 +48,20 @@ int iscsit_dump_data_payload(
|
||||
u32 buf_len,
|
||||
int dump_padding_digest)
|
||||
{
|
||||
char *buf, pad_bytes[4];
|
||||
char *buf;
|
||||
int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
|
||||
u32 length, padding, offset = 0, size;
|
||||
u32 length, offset = 0, size;
|
||||
struct kvec iov;
|
||||
|
||||
if (conn->sess->sess_ops->RDMAExtensions)
|
||||
return 0;
|
||||
|
||||
if (dump_padding_digest) {
|
||||
buf_len = ALIGN(buf_len, 4);
|
||||
if (conn->conn_ops->DataDigest)
|
||||
buf_len += ISCSI_CRC_LEN;
|
||||
}
|
||||
|
||||
length = min(buf_len, OFFLOAD_BUF_SIZE);
|
||||
|
||||
buf = kzalloc(length, GFP_ATOMIC);
|
||||
@@ -75,41 +81,12 @@ int iscsit_dump_data_payload(
|
||||
rx_got = rx_data(conn, &iov, 1, size);
|
||||
if (rx_got != size) {
|
||||
ret = DATAOUT_CANNOT_RECOVER;
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
|
||||
offset += size;
|
||||
}
|
||||
|
||||
if (!dump_padding_digest)
|
||||
goto out;
|
||||
|
||||
padding = ((-buf_len) & 3);
|
||||
if (padding != 0) {
|
||||
iov.iov_len = padding;
|
||||
iov.iov_base = pad_bytes;
|
||||
|
||||
rx_got = rx_data(conn, &iov, 1, padding);
|
||||
if (rx_got != padding) {
|
||||
ret = DATAOUT_CANNOT_RECOVER;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (conn->conn_ops->DataDigest) {
|
||||
u32 data_crc;
|
||||
|
||||
iov.iov_len = ISCSI_CRC_LEN;
|
||||
iov.iov_base = &data_crc;
|
||||
|
||||
rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
|
||||
if (rx_got != ISCSI_CRC_LEN) {
|
||||
ret = DATAOUT_CANNOT_RECOVER;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
@@ -797,14 +774,14 @@ static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
|
||||
return ooo_cmdsn;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with sess->cmdsn_mutex held.
|
||||
*/
|
||||
static int iscsit_attach_ooo_cmdsn(
|
||||
struct iscsi_session *sess,
|
||||
struct iscsi_ooo_cmdsn *ooo_cmdsn)
|
||||
{
|
||||
struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
|
||||
|
||||
lockdep_assert_held(&sess->cmdsn_mutex);
|
||||
|
||||
/*
|
||||
* We attach the struct iscsi_ooo_cmdsn entry to the out of order
|
||||
* list in increasing CmdSN order.
|
||||
@@ -871,15 +848,14 @@ void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
|
||||
mutex_unlock(&sess->cmdsn_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with sess->cmdsn_mutex held.
|
||||
*/
|
||||
int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
|
||||
{
|
||||
int ooo_count = 0;
|
||||
struct iscsi_cmd *cmd = NULL;
|
||||
struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
|
||||
|
||||
lockdep_assert_held(&sess->cmdsn_mutex);
|
||||
|
||||
list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
|
||||
&sess->sess_ooo_cmdsn_list, ooo_list) {
|
||||
if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
|
||||
@@ -980,7 +956,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
|
||||
if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
|
||||
return 0;
|
||||
|
||||
iscsit_set_unsoliticed_dataout(cmd);
|
||||
iscsit_set_unsolicited_dataout(cmd);
|
||||
}
|
||||
return transport_handle_cdb_direct(&cmd->se_cmd);
|
||||
|
||||
@@ -1232,9 +1208,6 @@ void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
|
||||
spin_unlock_bh(&cmd->dataout_timeout_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with cmd->dataout_timeout_lock held.
|
||||
*/
|
||||
void iscsit_start_dataout_timer(
|
||||
struct iscsi_cmd *cmd,
|
||||
struct iscsi_conn *conn)
|
||||
@@ -1242,6 +1215,8 @@ void iscsit_start_dataout_timer(
|
||||
struct iscsi_session *sess = conn->sess;
|
||||
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
|
||||
|
||||
lockdep_assert_held(&cmd->dataout_timeout_lock);
|
||||
|
||||
if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
|
||||
return;
|
||||
|
||||
|
@@ -56,9 +56,6 @@
|
||||
extern struct list_head g_tiqn_list;
|
||||
extern spinlock_t tiqn_lock;
|
||||
|
||||
/*
|
||||
* Called with cmd->r2t_lock held.
|
||||
*/
|
||||
int iscsit_add_r2t_to_list(
|
||||
struct iscsi_cmd *cmd,
|
||||
u32 offset,
|
||||
@@ -68,6 +65,8 @@ int iscsit_add_r2t_to_list(
|
||||
{
|
||||
struct iscsi_r2t *r2t;
|
||||
|
||||
lockdep_assert_held(&cmd->r2t_lock);
|
||||
|
||||
r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
|
||||
if (!r2t) {
|
||||
pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
|
||||
@@ -128,11 +127,10 @@ struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with cmd->r2t_lock held.
|
||||
*/
|
||||
void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
|
||||
{
|
||||
lockdep_assert_held(&cmd->r2t_lock);
|
||||
|
||||
list_del(&r2t->r2t_list);
|
||||
kmem_cache_free(lio_r2t_cache, r2t);
|
||||
}
|
||||
@@ -762,8 +760,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
|
||||
iscsit_remove_cmd_from_response_queue(cmd, conn);
|
||||
}
|
||||
|
||||
if (conn && conn->conn_transport->iscsit_release_cmd)
|
||||
conn->conn_transport->iscsit_release_cmd(conn, cmd);
|
||||
if (conn && conn->conn_transport->iscsit_unmap_cmd)
|
||||
conn->conn_transport->iscsit_unmap_cmd(conn, cmd);
|
||||
}
|
||||
|
||||
void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
|
||||
@@ -956,9 +954,6 @@ void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
|
||||
spin_unlock_bh(&conn->nopin_timer_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with conn->nopin_timer_lock held.
|
||||
*/
|
||||
void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
|
||||
{
|
||||
struct iscsi_session *sess = conn->sess;
|
||||
@@ -1016,13 +1011,13 @@ void iscsit_handle_nopin_timeout(struct timer_list *t)
|
||||
iscsit_dec_conn_usage_count(conn);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with conn->nopin_timer_lock held.
|
||||
*/
|
||||
void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
|
||||
{
|
||||
struct iscsi_session *sess = conn->sess;
|
||||
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
|
||||
|
||||
lockdep_assert_held(&conn->nopin_timer_lock);
|
||||
|
||||
/*
|
||||
* NOPIN timeout is disabled.
|
||||
*/
|
||||
|
@@ -128,14 +128,6 @@ static void tcm_loop_submission_work(struct work_struct *work)
|
||||
set_host_byte(sc, DID_ERROR);
|
||||
goto out_done;
|
||||
}
|
||||
if (scsi_bidi_cmnd(sc)) {
|
||||
struct scsi_data_buffer *sdb = scsi_in(sc);
|
||||
|
||||
sgl_bidi = sdb->table.sgl;
|
||||
sgl_bidi_count = sdb->table.nents;
|
||||
se_cmd->se_cmd_flags |= SCF_BIDI;
|
||||
|
||||
}
|
||||
|
||||
transfer_length = scsi_transfer_length(sc);
|
||||
if (!scsi_prot_sg_count(sc) &&
|
||||
@@ -304,12 +296,6 @@ static int tcm_loop_target_reset(struct scsi_cmnd *sc)
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
static int tcm_loop_slave_alloc(struct scsi_device *sd)
|
||||
{
|
||||
blk_queue_flag_set(QUEUE_FLAG_BIDI, sd->request_queue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct scsi_host_template tcm_loop_driver_template = {
|
||||
.show_info = tcm_loop_show_info,
|
||||
.proc_name = "tcm_loopback",
|
||||
@@ -325,7 +311,6 @@ static struct scsi_host_template tcm_loop_driver_template = {
|
||||
.cmd_per_lun = 1024,
|
||||
.max_sectors = 0xFFFF,
|
||||
.dma_boundary = PAGE_SIZE - 1,
|
||||
.slave_alloc = tcm_loop_slave_alloc,
|
||||
.module = THIS_MODULE,
|
||||
.track_queue_depth = 1,
|
||||
};
|
||||
@@ -560,11 +545,6 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
|
||||
@@ -1159,7 +1139,6 @@ static const struct target_core_fabric_ops loop_ops = {
|
||||
.release_cmd = tcm_loop_release_cmd,
|
||||
.sess_get_index = tcm_loop_sess_get_index,
|
||||
.write_pending = tcm_loop_write_pending,
|
||||
.write_pending_status = tcm_loop_write_pending_status,
|
||||
.set_default_node_attributes = tcm_loop_set_default_node_attributes,
|
||||
.get_cmd_state = tcm_loop_get_cmd_state,
|
||||
.queue_data_in = tcm_loop_queue_data_in,
|
||||
|
@@ -1749,11 +1749,6 @@ static int sbp_write_pending(struct se_cmd *se_cmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sbp_write_pending_status(struct se_cmd *se_cmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
|
||||
{
|
||||
return;
|
||||
@@ -2329,7 +2324,6 @@ static const struct target_core_fabric_ops sbp_ops = {
|
||||
.release_cmd = sbp_release_cmd,
|
||||
.sess_get_index = sbp_sess_get_index,
|
||||
.write_pending = sbp_write_pending,
|
||||
.write_pending_status = sbp_write_pending_status,
|
||||
.set_default_node_attributes = sbp_set_default_node_attrs,
|
||||
.get_cmd_state = sbp_get_cmd_state,
|
||||
.queue_data_in = sbp_queue_data_in,
|
||||
|
@@ -910,9 +910,6 @@ static int core_alua_write_tpg_metadata(
|
||||
return (ret < 0) ? -EIO : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with tg_pt_gp->tg_pt_gp_transition_mutex held
|
||||
*/
|
||||
static int core_alua_update_tpg_primary_metadata(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp)
|
||||
{
|
||||
@@ -921,6 +918,8 @@ static int core_alua_update_tpg_primary_metadata(
|
||||
char *path;
|
||||
int len, rc;
|
||||
|
||||
lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex);
|
||||
|
||||
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
|
||||
if (!md_buf) {
|
||||
pr_err("Unable to allocate buf for ALUA metadata\n");
|
||||
|
@@ -401,10 +401,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
|
||||
pr_err("Missing tfo->write_pending()\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!tfo->write_pending_status) {
|
||||
pr_err("Missing tfo->write_pending_status()\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!tfo->set_default_node_attributes) {
|
||||
pr_err("Missing tfo->set_default_node_attributes()\n");
|
||||
return -EINVAL;
|
||||
|
@@ -404,9 +404,6 @@ int core_enable_device_list_for_node(
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with se_node_acl->lun_entry_mutex held.
|
||||
*/
|
||||
void core_disable_device_list_for_node(
|
||||
struct se_lun *lun,
|
||||
struct se_dev_entry *orig,
|
||||
@@ -418,6 +415,9 @@ void core_disable_device_list_for_node(
|
||||
* reference to se_device->dev_group.
|
||||
*/
|
||||
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
|
||||
|
||||
lockdep_assert_held(&nacl->lun_entry_mutex);
|
||||
|
||||
/*
|
||||
* If the MappedLUN entry is being disabled, the entry in
|
||||
* lun->lun_deve_list must be removed now before clearing the
|
||||
|
@@ -1290,9 +1290,6 @@ static int core_scsi3_check_implicit_release(
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with struct t10_reservation->registration_lock held.
|
||||
*/
|
||||
static void __core_scsi3_free_registration(
|
||||
struct se_device *dev,
|
||||
struct t10_pr_registration *pr_reg,
|
||||
@@ -1308,6 +1305,8 @@ static void __core_scsi3_free_registration(
|
||||
struct se_dev_entry *deve;
|
||||
char i_buf[PR_REG_ISID_ID_LEN];
|
||||
|
||||
lockdep_assert_held(&pr_tmpl->registration_lock);
|
||||
|
||||
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
|
||||
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
|
||||
|
||||
@@ -2450,9 +2449,6 @@ core_scsi3_emulate_pro_reserve(struct se_cmd *cmd, int type, int scope,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with struct se_device->dev_reservation_lock held.
|
||||
*/
|
||||
static void __core_scsi3_complete_pro_release(
|
||||
struct se_device *dev,
|
||||
struct se_node_acl *se_nacl,
|
||||
@@ -2464,6 +2460,8 @@ static void __core_scsi3_complete_pro_release(
|
||||
char i_buf[PR_REG_ISID_ID_LEN];
|
||||
int pr_res_type = 0, pr_res_scope = 0;
|
||||
|
||||
lockdep_assert_held(&dev->dev_reservation_lock);
|
||||
|
||||
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
|
||||
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
|
||||
/*
|
||||
@@ -2760,9 +2758,6 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with struct se_device->dev_reservation_lock held.
|
||||
*/
|
||||
static void __core_scsi3_complete_pro_preempt(
|
||||
struct se_device *dev,
|
||||
struct t10_pr_registration *pr_reg,
|
||||
@@ -2775,6 +2770,8 @@ static void __core_scsi3_complete_pro_preempt(
|
||||
const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
|
||||
char i_buf[PR_REG_ISID_ID_LEN];
|
||||
|
||||
lockdep_assert_held(&dev->dev_reservation_lock);
|
||||
|
||||
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
|
||||
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
|
||||
/*
|
||||
|
@@ -114,21 +114,6 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
|
||||
spin_unlock(&se_cmd->t_state_lock);
|
||||
return false;
|
||||
}
|
||||
if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
|
||||
if (se_cmd->scsi_status) {
|
||||
pr_debug("Attempted to abort io tag: %llu early failure"
|
||||
" status: 0x%02x\n", se_cmd->tag,
|
||||
se_cmd->scsi_status);
|
||||
spin_unlock(&se_cmd->t_state_lock);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (sess->sess_tearing_down) {
|
||||
pr_debug("Attempted to abort io tag: %llu already shutdown,"
|
||||
" skipping\n", se_cmd->tag);
|
||||
spin_unlock(&se_cmd->t_state_lock);
|
||||
return false;
|
||||
}
|
||||
se_cmd->transport_state |= CMD_T_ABORTED;
|
||||
|
||||
if ((tmr_sess != se_cmd->se_sess) && tas)
|
||||
@@ -232,33 +217,13 @@ static void core_tmr_drain_tmr_list(
|
||||
continue;
|
||||
|
||||
spin_lock(&sess->sess_cmd_lock);
|
||||
spin_lock(&cmd->t_state_lock);
|
||||
if (!(cmd->transport_state & CMD_T_ACTIVE) ||
|
||||
(cmd->transport_state & CMD_T_FABRIC_STOP)) {
|
||||
spin_unlock(&cmd->t_state_lock);
|
||||
spin_unlock(&sess->sess_cmd_lock);
|
||||
continue;
|
||||
}
|
||||
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
|
||||
spin_unlock(&cmd->t_state_lock);
|
||||
spin_unlock(&sess->sess_cmd_lock);
|
||||
continue;
|
||||
}
|
||||
if (sess->sess_tearing_down) {
|
||||
spin_unlock(&cmd->t_state_lock);
|
||||
spin_unlock(&sess->sess_cmd_lock);
|
||||
continue;
|
||||
}
|
||||
cmd->transport_state |= CMD_T_ABORTED;
|
||||
spin_unlock(&cmd->t_state_lock);
|
||||
rc = __target_check_io_state(cmd, sess, 0);
|
||||
spin_unlock(&sess->sess_cmd_lock);
|
||||
|
||||
rc = kref_get_unless_zero(&cmd->cmd_kref);
|
||||
if (!rc) {
|
||||
printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
|
||||
spin_unlock(&sess->sess_cmd_lock);
|
||||
continue;
|
||||
}
|
||||
spin_unlock(&sess->sess_cmd_lock);
|
||||
|
||||
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
|
||||
}
|
||||
|
@@ -664,11 +664,6 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
|
||||
|
||||
target_remove_from_state_list(cmd);
|
||||
|
||||
/*
|
||||
* Clear struct se_cmd->se_lun before the handoff to FE.
|
||||
*/
|
||||
cmd->se_lun = NULL;
|
||||
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
/*
|
||||
* Determine if frontend context caller is requesting the stopping of
|
||||
@@ -696,17 +691,6 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
|
||||
return cmd->se_tfo->check_stop_free(cmd);
|
||||
}
|
||||
|
||||
static void transport_lun_remove_cmd(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_lun *lun = cmd->se_lun;
|
||||
|
||||
if (!lun)
|
||||
return;
|
||||
|
||||
if (cmpxchg(&cmd->lun_ref_active, true, false))
|
||||
percpu_ref_put(&lun->lun_ref);
|
||||
}
|
||||
|
||||
static void target_complete_failure_work(struct work_struct *work)
|
||||
{
|
||||
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
|
||||
@@ -797,8 +781,6 @@ static void target_handle_abort(struct se_cmd *cmd)
|
||||
|
||||
WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
|
||||
|
||||
transport_lun_remove_cmd(cmd);
|
||||
|
||||
transport_cmd_check_stop_to_fabric(cmd);
|
||||
}
|
||||
|
||||
@@ -1711,7 +1693,6 @@ static void target_complete_tmr_failure(struct work_struct *work)
|
||||
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
|
||||
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
|
||||
|
||||
transport_lun_remove_cmd(se_cmd);
|
||||
transport_cmd_check_stop_to_fabric(se_cmd);
|
||||
}
|
||||
|
||||
@@ -1902,7 +1883,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
||||
goto queue_full;
|
||||
|
||||
check_stop:
|
||||
transport_lun_remove_cmd(cmd);
|
||||
transport_cmd_check_stop_to_fabric(cmd);
|
||||
return;
|
||||
|
||||
@@ -2056,7 +2036,6 @@ void target_execute_cmd(struct se_cmd *cmd)
|
||||
|
||||
spin_lock_irq(&cmd->t_state_lock);
|
||||
cmd->t_state = TRANSPORT_PROCESSING;
|
||||
cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
|
||||
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
|
||||
spin_unlock_irq(&cmd->t_state_lock);
|
||||
|
||||
@@ -2201,7 +2180,6 @@ queue_status:
|
||||
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
||||
return;
|
||||
}
|
||||
transport_lun_remove_cmd(cmd);
|
||||
transport_cmd_check_stop_to_fabric(cmd);
|
||||
}
|
||||
|
||||
@@ -2296,7 +2274,6 @@ static void target_complete_ok_work(struct work_struct *work)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
|
||||
transport_lun_remove_cmd(cmd);
|
||||
transport_cmd_check_stop_to_fabric(cmd);
|
||||
return;
|
||||
}
|
||||
@@ -2322,7 +2299,6 @@ static void target_complete_ok_work(struct work_struct *work)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
|
||||
transport_lun_remove_cmd(cmd);
|
||||
transport_cmd_check_stop_to_fabric(cmd);
|
||||
return;
|
||||
}
|
||||
@@ -2358,7 +2334,6 @@ queue_rsp:
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
|
||||
transport_lun_remove_cmd(cmd);
|
||||
transport_cmd_check_stop_to_fabric(cmd);
|
||||
return;
|
||||
}
|
||||
@@ -2394,7 +2369,6 @@ queue_status:
|
||||
break;
|
||||
}
|
||||
|
||||
transport_lun_remove_cmd(cmd);
|
||||
transport_cmd_check_stop_to_fabric(cmd);
|
||||
return;
|
||||
|
||||
@@ -2721,9 +2695,6 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
|
||||
*/
|
||||
if (cmd->state_active)
|
||||
target_remove_from_state_list(cmd);
|
||||
|
||||
if (cmd->se_lun)
|
||||
transport_lun_remove_cmd(cmd);
|
||||
}
|
||||
if (aborted)
|
||||
cmd->free_compl = &compl;
|
||||
@@ -2765,7 +2736,6 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
|
||||
ret = -ESHUTDOWN;
|
||||
goto out;
|
||||
}
|
||||
se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
|
||||
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
|
||||
percpu_ref_get(&se_sess->cmd_count);
|
||||
out:
|
||||
@@ -2796,6 +2766,9 @@ static void target_release_cmd_kref(struct kref *kref)
|
||||
struct completion *abrt_compl = se_cmd->abrt_compl;
|
||||
unsigned long flags;
|
||||
|
||||
if (se_cmd->lun_ref_active)
|
||||
percpu_ref_put(&se_cmd->se_lun->lun_ref);
|
||||
|
||||
if (se_sess) {
|
||||
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
|
||||
list_del_init(&se_cmd->se_cmd_list);
|
||||
@@ -3273,6 +3246,22 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
|
||||
}
|
||||
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
|
||||
|
||||
/**
|
||||
* target_send_busy - Send SCSI BUSY status back to the initiator
|
||||
* @cmd: SCSI command for which to send a BUSY reply.
|
||||
*
|
||||
* Note: Only call this function if target_submit_cmd*() failed.
|
||||
*/
|
||||
int target_send_busy(struct se_cmd *cmd)
|
||||
{
|
||||
WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
|
||||
|
||||
cmd->scsi_status = SAM_STAT_BUSY;
|
||||
trace_target_cmd_complete(cmd);
|
||||
return cmd->se_tfo->queue_status(cmd);
|
||||
}
|
||||
EXPORT_SYMBOL(target_send_busy);
|
||||
|
||||
static void target_tmr_work(struct work_struct *work)
|
||||
{
|
||||
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
|
||||
|
@@ -442,11 +442,6 @@ static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
|
||||
{
|
||||
return 0;
|
||||
@@ -463,7 +458,6 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
|
||||
.release_cmd = xcopy_pt_release_cmd,
|
||||
.check_stop_free = xcopy_pt_check_stop_free,
|
||||
.write_pending = xcopy_pt_write_pending,
|
||||
.write_pending_status = xcopy_pt_write_pending_status,
|
||||
.queue_data_in = xcopy_pt_queue_data_in,
|
||||
.queue_status = xcopy_pt_queue_status,
|
||||
};
|
||||
|
@@ -158,7 +158,6 @@ void ft_release_cmd(struct se_cmd *);
|
||||
int ft_queue_status(struct se_cmd *);
|
||||
int ft_queue_data_in(struct se_cmd *);
|
||||
int ft_write_pending(struct se_cmd *);
|
||||
int ft_write_pending_status(struct se_cmd *);
|
||||
int ft_get_cmd_state(struct se_cmd *);
|
||||
void ft_queue_tm_resp(struct se_cmd *);
|
||||
void ft_aborted_task(struct se_cmd *);
|
||||
|
@@ -184,13 +184,6 @@ int ft_queue_status(struct se_cmd *se_cmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ft_write_pending_status(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
|
||||
|
||||
return cmd->write_data_len != se_cmd->data_length;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send TX_RDY (transfer ready).
|
||||
*/
|
||||
|
@@ -437,7 +437,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
|
||||
.sess_get_index = ft_sess_get_index,
|
||||
.sess_get_initiator_sid = NULL,
|
||||
.write_pending = ft_write_pending,
|
||||
.write_pending_status = ft_write_pending_status,
|
||||
.set_default_node_attributes = ft_set_default_node_attr,
|
||||
.get_cmd_state = ft_get_cmd_state,
|
||||
.queue_data_in = ft_queue_data_in,
|
||||
|
Reference in New Issue
Block a user