Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates of the usual drivers: UFS, esp_scsi, NCR5380, qla2xxx, lpfc, libsas, hisi_sas. In addition there's a set of mostly small updates to the target subsystem a set of conversions to the generic DMA API, which do have some potential for issues in the older drivers but we'll handle those as case by case fixes. A new myrs driver for the DAC960/mylex raid controllers to replace the block based DAC960 which is also being removed by Jens in this merge window. Plus the usual slew of trivial changes" [ "myrs" stands for "MYlex Raid Scsi". Obviously. Silly of me to even wonder. There's also a "myrb" driver, where the 'b' stands for 'block'. Truly, somebody has got mad naming skillz. - Linus ] * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (237 commits) scsi: myrs: Fix the processor absent message in processor_show() scsi: myrs: Fix a logical vs bitwise bug scsi: hisi_sas: Fix NULL pointer dereference scsi: myrs: fix build failure on 32 bit scsi: fnic: replace gross legacy tag hack with blk-mq hack scsi: mesh: switch to generic DMA API scsi: ips: switch to generic DMA API scsi: smartpqi: fully convert to the generic DMA API scsi: vmw_pscsi: switch to generic DMA API scsi: snic: switch to generic DMA API scsi: qla4xxx: fully convert to the generic DMA API scsi: qla2xxx: fully convert to the generic DMA API scsi: qla1280: switch to generic DMA API scsi: qedi: fully convert to the generic DMA API scsi: qedf: fully convert to the generic DMA API scsi: pm8001: switch to generic DMA API scsi: nsp32: switch to generic DMA API scsi: mvsas: fully convert to the generic DMA API scsi: mvumi: switch to generic DMA API scsi: mpt3sas: switch to generic DMA API ...
This commit is contained in:
@@ -598,9 +598,12 @@ out:
|
||||
mutex_unlock(&cdev_list_lock);
|
||||
}
|
||||
|
||||
static void __cxgbit_free_conn(struct cxgbit_sock *csk);
|
||||
|
||||
void cxgbit_free_np(struct iscsi_np *np)
|
||||
{
|
||||
struct cxgbit_np *cnp = np->np_context;
|
||||
struct cxgbit_sock *csk, *tmp;
|
||||
|
||||
cnp->com.state = CSK_STATE_DEAD;
|
||||
if (cnp->com.cdev)
|
||||
@@ -608,6 +611,13 @@ void cxgbit_free_np(struct iscsi_np *np)
|
||||
else
|
||||
cxgbit_free_all_np(cnp);
|
||||
|
||||
spin_lock_bh(&cnp->np_accept_lock);
|
||||
list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
|
||||
list_del_init(&csk->accept_node);
|
||||
__cxgbit_free_conn(csk);
|
||||
}
|
||||
spin_unlock_bh(&cnp->np_accept_lock);
|
||||
|
||||
np->np_context = NULL;
|
||||
cxgbit_put_cnp(cnp);
|
||||
}
|
||||
@@ -705,9 +715,9 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk)
|
||||
csk->tid, 600, __func__);
|
||||
}
|
||||
|
||||
void cxgbit_free_conn(struct iscsi_conn *conn)
|
||||
static void __cxgbit_free_conn(struct cxgbit_sock *csk)
|
||||
{
|
||||
struct cxgbit_sock *csk = conn->context;
|
||||
struct iscsi_conn *conn = csk->conn;
|
||||
bool release = false;
|
||||
|
||||
pr_debug("%s: state %d\n",
|
||||
@@ -716,7 +726,7 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
|
||||
spin_lock_bh(&csk->lock);
|
||||
switch (csk->com.state) {
|
||||
case CSK_STATE_ESTABLISHED:
|
||||
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
|
||||
if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
|
||||
csk->com.state = CSK_STATE_CLOSING;
|
||||
cxgbit_send_halfclose(csk);
|
||||
} else {
|
||||
@@ -741,6 +751,11 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
|
||||
cxgbit_put_csk(csk);
|
||||
}
|
||||
|
||||
void cxgbit_free_conn(struct iscsi_conn *conn)
|
||||
{
|
||||
__cxgbit_free_conn(conn->context);
|
||||
}
|
||||
|
||||
static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
|
||||
{
|
||||
csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
|
||||
@@ -803,6 +818,7 @@ void _cxgbit_free_csk(struct kref *kref)
|
||||
spin_unlock_bh(&cdev->cskq.lock);
|
||||
|
||||
cxgbit_free_skb(csk);
|
||||
cxgbit_put_cnp(csk->cnp);
|
||||
cxgbit_put_cdev(cdev);
|
||||
|
||||
kfree(csk);
|
||||
@@ -1351,6 +1367,7 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
|
||||
goto rel_skb;
|
||||
}
|
||||
|
||||
cxgbit_get_cnp(cnp);
|
||||
cxgbit_get_cdev(cdev);
|
||||
|
||||
spin_lock(&cdev->cskq.lock);
|
||||
|
@@ -4355,7 +4355,7 @@ int iscsit_close_session(struct iscsi_session *sess)
|
||||
transport_deregister_session(sess->se_sess);
|
||||
|
||||
if (sess->sess_ops->ErrorRecoveryLevel == 2)
|
||||
iscsit_free_connection_recovery_entires(sess);
|
||||
iscsit_free_connection_recovery_entries(sess);
|
||||
|
||||
iscsit_free_all_ooo_cmdsns(sess);
|
||||
|
||||
|
@@ -770,21 +770,8 @@ void iscsit_handle_time2retain_timeout(struct timer_list *t)
|
||||
|
||||
pr_err("Time2Retain timer expired for SID: %u, cleaning up"
|
||||
" iSCSI session.\n", sess->sid);
|
||||
{
|
||||
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
|
||||
|
||||
if (tiqn) {
|
||||
spin_lock(&tiqn->sess_err_stats.lock);
|
||||
strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
|
||||
(void *)sess->sess_ops->InitiatorName);
|
||||
tiqn->sess_err_stats.last_sess_failure_type =
|
||||
ISCSI_SESS_ERR_CXN_TIMEOUT;
|
||||
tiqn->sess_err_stats.cxn_timeout_errors++;
|
||||
atomic_long_inc(&sess->conn_timeout_errors);
|
||||
spin_unlock(&tiqn->sess_err_stats.lock);
|
||||
}
|
||||
}
|
||||
|
||||
iscsit_fill_cxn_timeout_err_stats(sess);
|
||||
spin_unlock_bh(&se_tpg->session_lock);
|
||||
iscsit_close_session(sess);
|
||||
}
|
||||
|
@@ -1169,15 +1169,21 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
|
||||
na = iscsit_tpg_get_node_attrib(sess);
|
||||
|
||||
if (!sess->sess_ops->ErrorRecoveryLevel) {
|
||||
pr_debug("Unable to recover from DataOut timeout while"
|
||||
" in ERL=0.\n");
|
||||
pr_err("Unable to recover from DataOut timeout while"
|
||||
" in ERL=0, closing iSCSI connection for I_T Nexus"
|
||||
" %s,i,0x%6phN,%s,t,0x%02x\n",
|
||||
sess->sess_ops->InitiatorName, sess->isid,
|
||||
sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
|
||||
goto failure;
|
||||
}
|
||||
|
||||
if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
|
||||
pr_debug("Command ITT: 0x%08x exceeded max retries"
|
||||
" for DataOUT timeout %u, closing iSCSI connection.\n",
|
||||
cmd->init_task_tag, na->dataout_timeout_retries);
|
||||
pr_err("Command ITT: 0x%08x exceeded max retries"
|
||||
" for DataOUT timeout %u, closing iSCSI connection for"
|
||||
" I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
|
||||
cmd->init_task_tag, na->dataout_timeout_retries,
|
||||
sess->sess_ops->InitiatorName, sess->isid,
|
||||
sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
|
||||
goto failure;
|
||||
}
|
||||
|
||||
@@ -1224,6 +1230,7 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
|
||||
|
||||
failure:
|
||||
spin_unlock_bh(&cmd->dataout_timeout_lock);
|
||||
iscsit_fill_cxn_timeout_err_stats(sess);
|
||||
iscsit_cause_connection_reinstatement(conn, 0);
|
||||
iscsit_dec_conn_usage_count(conn);
|
||||
}
|
||||
|
@@ -125,7 +125,7 @@ struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
|
||||
void iscsit_free_connection_recovery_entries(struct iscsi_session *sess)
|
||||
{
|
||||
struct iscsi_cmd *cmd, *cmd_tmp;
|
||||
struct iscsi_conn_recovery *cr, *cr_tmp;
|
||||
|
@@ -13,7 +13,7 @@ extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32
|
||||
extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
|
||||
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
|
||||
struct iscsi_session *, u16);
|
||||
extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
|
||||
extern void iscsit_free_connection_recovery_entries(struct iscsi_session *);
|
||||
extern int iscsit_remove_active_connection_recovery_entry(
|
||||
struct iscsi_conn_recovery *, struct iscsi_session *);
|
||||
extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
|
||||
|
@@ -578,7 +578,7 @@ int iscsi_login_post_auth_non_zero_tsih(
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for any connection recovery entires containing CID.
|
||||
* Check for any connection recovery entries containing CID.
|
||||
* We use the original ExpStatSN sent in the first login request
|
||||
* to acknowledge commands for the failed connection.
|
||||
*
|
||||
|
@@ -328,10 +328,10 @@ static ssize_t iscsi_stat_tgt_attr_fail_intr_name_show(struct config_item *item,
|
||||
{
|
||||
struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
|
||||
struct iscsi_login_stats *lstat = &tiqn->login_stats;
|
||||
unsigned char buf[224];
|
||||
unsigned char buf[ISCSI_IQN_LEN];
|
||||
|
||||
spin_lock(&lstat->lock);
|
||||
snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
|
||||
snprintf(buf, ISCSI_IQN_LEN, "%s", lstat->last_intr_fail_name[0] ?
|
||||
lstat->last_intr_fail_name : NONE);
|
||||
spin_unlock(&lstat->lock);
|
||||
|
||||
|
@@ -915,6 +915,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
|
||||
void iscsit_handle_nopin_response_timeout(struct timer_list *t)
|
||||
{
|
||||
struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
|
||||
struct iscsi_session *sess = conn->sess;
|
||||
|
||||
iscsit_inc_conn_usage_count(conn);
|
||||
|
||||
@@ -925,28 +926,14 @@ void iscsit_handle_nopin_response_timeout(struct timer_list *t)
|
||||
return;
|
||||
}
|
||||
|
||||
pr_debug("Did not receive response to NOPIN on CID: %hu on"
|
||||
" SID: %u, failing connection.\n", conn->cid,
|
||||
conn->sess->sid);
|
||||
pr_err("Did not receive response to NOPIN on CID: %hu, failing"
|
||||
" connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
|
||||
conn->cid, sess->sess_ops->InitiatorName, sess->isid,
|
||||
sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
|
||||
conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
|
||||
spin_unlock_bh(&conn->nopin_timer_lock);
|
||||
|
||||
{
|
||||
struct iscsi_portal_group *tpg = conn->sess->tpg;
|
||||
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
|
||||
|
||||
if (tiqn) {
|
||||
spin_lock_bh(&tiqn->sess_err_stats.lock);
|
||||
strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
|
||||
conn->sess->sess_ops->InitiatorName);
|
||||
tiqn->sess_err_stats.last_sess_failure_type =
|
||||
ISCSI_SESS_ERR_CXN_TIMEOUT;
|
||||
tiqn->sess_err_stats.cxn_timeout_errors++;
|
||||
atomic_long_inc(&conn->sess->conn_timeout_errors);
|
||||
spin_unlock_bh(&tiqn->sess_err_stats.lock);
|
||||
}
|
||||
}
|
||||
|
||||
iscsit_fill_cxn_timeout_err_stats(sess);
|
||||
iscsit_cause_connection_reinstatement(conn, 0);
|
||||
iscsit_dec_conn_usage_count(conn);
|
||||
}
|
||||
@@ -1405,3 +1392,22 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
|
||||
|
||||
return tpg->tpg_tiqn;
|
||||
}
|
||||
|
||||
void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *sess)
|
||||
{
|
||||
struct iscsi_portal_group *tpg = sess->tpg;
|
||||
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
|
||||
|
||||
if (!tiqn)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&tiqn->sess_err_stats.lock);
|
||||
strlcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
|
||||
sess->sess_ops->InitiatorName,
|
||||
sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
|
||||
tiqn->sess_err_stats.last_sess_failure_type =
|
||||
ISCSI_SESS_ERR_CXN_TIMEOUT;
|
||||
tiqn->sess_err_stats.cxn_timeout_errors++;
|
||||
atomic_long_inc(&sess->conn_timeout_errors);
|
||||
spin_unlock_bh(&tiqn->sess_err_stats.lock);
|
||||
}
|
||||
|
@@ -67,5 +67,6 @@ extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
|
||||
extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
|
||||
extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
|
||||
extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
|
||||
extern void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *);
|
||||
|
||||
#endif /*** ISCSI_TARGET_UTIL_H ***/
|
||||
|
@@ -514,7 +514,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
|
||||
}
|
||||
|
||||
/* Always in 512 byte units for Linux/Block */
|
||||
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
|
||||
block_lba += sg->length >> SECTOR_SHIFT;
|
||||
sectors -= 1;
|
||||
}
|
||||
|
||||
@@ -635,14 +635,15 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
|
||||
}
|
||||
|
||||
static int
|
||||
iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
|
||||
iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
|
||||
struct sg_mapping_iter *miter)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct blk_integrity *bi;
|
||||
struct bio_integrity_payload *bip;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct scatterlist *sg;
|
||||
int i, rc;
|
||||
int rc;
|
||||
size_t resid, len;
|
||||
|
||||
bi = bdev_get_integrity(ib_dev->ibd_bd);
|
||||
if (!bi) {
|
||||
@@ -650,31 +651,39 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
|
||||
bip = bio_integrity_alloc(bio, GFP_NOIO,
|
||||
min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
|
||||
if (IS_ERR(bip)) {
|
||||
pr_err("Unable to allocate bio_integrity_payload\n");
|
||||
return PTR_ERR(bip);
|
||||
}
|
||||
|
||||
bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
|
||||
dev->prot_length;
|
||||
bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
|
||||
bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
|
||||
bip_set_seed(bip, bio->bi_iter.bi_sector);
|
||||
|
||||
pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
|
||||
(unsigned long long)bip->bip_iter.bi_sector);
|
||||
|
||||
for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
|
||||
resid = bip->bip_iter.bi_size;
|
||||
while (resid > 0 && sg_miter_next(miter)) {
|
||||
|
||||
rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
|
||||
sg->offset);
|
||||
if (rc != sg->length) {
|
||||
len = min_t(size_t, miter->length, resid);
|
||||
rc = bio_integrity_add_page(bio, miter->page, len,
|
||||
offset_in_page(miter->addr));
|
||||
if (rc != len) {
|
||||
pr_err("bio_integrity_add_page() failed; %d\n", rc);
|
||||
sg_miter_stop(miter);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
|
||||
sg_page(sg), sg->length, sg->offset);
|
||||
pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
|
||||
miter->page, len, offset_in_page(miter->addr));
|
||||
|
||||
resid -= len;
|
||||
if (len < miter->length)
|
||||
miter->consumed -= miter->length - len;
|
||||
}
|
||||
sg_miter_stop(miter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -686,12 +695,13 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
|
||||
struct iblock_req *ibr;
|
||||
struct bio *bio, *bio_start;
|
||||
struct bio *bio;
|
||||
struct bio_list list;
|
||||
struct scatterlist *sg;
|
||||
u32 sg_num = sgl_nents;
|
||||
unsigned bio_cnt;
|
||||
int i, op, op_flags = 0;
|
||||
int i, rc, op, op_flags = 0;
|
||||
struct sg_mapping_iter prot_miter;
|
||||
|
||||
if (data_direction == DMA_TO_DEVICE) {
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
@@ -726,13 +736,17 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
if (!bio)
|
||||
goto fail_free_ibr;
|
||||
|
||||
bio_start = bio;
|
||||
bio_list_init(&list);
|
||||
bio_list_add(&list, bio);
|
||||
|
||||
refcount_set(&ibr->pending, 2);
|
||||
bio_cnt = 1;
|
||||
|
||||
if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
|
||||
sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
|
||||
op == REQ_OP_READ ? SG_MITER_FROM_SG :
|
||||
SG_MITER_TO_SG);
|
||||
|
||||
for_each_sg(sgl, sg, sgl_nents, i) {
|
||||
/*
|
||||
* XXX: if the length the device accepts is shorter than the
|
||||
@@ -741,6 +755,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
*/
|
||||
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
|
||||
!= sg->length) {
|
||||
if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
|
||||
rc = iblock_alloc_bip(cmd, bio, &prot_miter);
|
||||
if (rc)
|
||||
goto fail_put_bios;
|
||||
}
|
||||
|
||||
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
|
||||
iblock_submit_bios(&list);
|
||||
bio_cnt = 0;
|
||||
@@ -757,12 +777,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
}
|
||||
|
||||
/* Always in 512 byte units for Linux/Block */
|
||||
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
|
||||
block_lba += sg->length >> SECTOR_SHIFT;
|
||||
sg_num--;
|
||||
}
|
||||
|
||||
if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
|
||||
int rc = iblock_alloc_bip(cmd, bio_start);
|
||||
rc = iblock_alloc_bip(cmd, bio, &prot_miter);
|
||||
if (rc)
|
||||
goto fail_put_bios;
|
||||
}
|
||||
|
@@ -9,7 +9,6 @@
|
||||
#define IBLOCK_VERSION "4.0"
|
||||
|
||||
#define IBLOCK_MAX_CDBS 16
|
||||
#define IBLOCK_LBA_SHIFT 9
|
||||
|
||||
struct iblock_req {
|
||||
refcount_t pending;
|
||||
|
@@ -360,6 +360,10 @@ static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
|
||||
unsigned int offset;
|
||||
sense_reason_t ret = TCM_NO_SENSE;
|
||||
int i, count;
|
||||
|
||||
if (!success)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
|
||||
*
|
||||
@@ -425,14 +429,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
sense_reason_t ret = TCM_NO_SENSE;
|
||||
|
||||
/*
|
||||
* Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
|
||||
* within target_complete_ok_work() if the command was successfully
|
||||
* sent to the backend driver.
|
||||
*/
|
||||
spin_lock_irq(&cmd->t_state_lock);
|
||||
if (cmd->transport_state & CMD_T_SENT) {
|
||||
cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
|
||||
if (success) {
|
||||
*post_ret = 1;
|
||||
|
||||
if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
|
||||
@@ -453,7 +451,8 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
|
||||
int *post_ret)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct scatterlist *write_sg = NULL, *sg;
|
||||
struct sg_table write_tbl = { };
|
||||
struct scatterlist *write_sg, *sg;
|
||||
unsigned char *buf = NULL, *addr;
|
||||
struct sg_mapping_iter m;
|
||||
unsigned int offset = 0, len;
|
||||
@@ -494,14 +493,12 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
|
||||
goto out;
|
||||
}
|
||||
|
||||
write_sg = kmalloc_array(cmd->t_data_nents, sizeof(*write_sg),
|
||||
GFP_KERNEL);
|
||||
if (!write_sg) {
|
||||
if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
|
||||
pr_err("Unable to allocate compare_and_write sg\n");
|
||||
ret = TCM_OUT_OF_RESOURCES;
|
||||
goto out;
|
||||
}
|
||||
sg_init_table(write_sg, cmd->t_data_nents);
|
||||
write_sg = write_tbl.sgl;
|
||||
/*
|
||||
* Setup verify and write data payloads from total NumberLBAs.
|
||||
*/
|
||||
@@ -597,7 +594,7 @@ out:
|
||||
* sbc_compare_and_write() before the original READ I/O submission.
|
||||
*/
|
||||
up(&dev->caw_sem);
|
||||
kfree(write_sg);
|
||||
sg_free_table(&write_tbl);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(target_submit_tmr);
|
||||
void transport_generic_request_failure(struct se_cmd *cmd,
|
||||
sense_reason_t sense_reason)
|
||||
{
|
||||
int ret = 0, post_ret = 0;
|
||||
int ret = 0;
|
||||
|
||||
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
|
||||
sense_reason);
|
||||
@@ -1789,13 +1789,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
||||
*/
|
||||
transport_complete_task_attr(cmd);
|
||||
|
||||
/*
|
||||
* Handle special case for COMPARE_AND_WRITE failure, where the
|
||||
* callback is expected to drop the per device ->caw_sem.
|
||||
*/
|
||||
if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
|
||||
cmd->transport_complete_callback)
|
||||
cmd->transport_complete_callback(cmd, false, &post_ret);
|
||||
if (cmd->transport_complete_callback)
|
||||
cmd->transport_complete_callback(cmd, false, NULL);
|
||||
|
||||
if (transport_check_aborted_status(cmd, 1))
|
||||
return;
|
||||
@@ -2012,7 +2007,7 @@ void target_execute_cmd(struct se_cmd *cmd)
|
||||
* Determine if frontend context caller is requesting the stopping of
|
||||
* this command for frontend exceptions.
|
||||
*
|
||||
* If the received CDB has aleady been aborted stop processing it here.
|
||||
* If the received CDB has already been aborted stop processing it here.
|
||||
*/
|
||||
spin_lock_irq(&cmd->t_state_lock);
|
||||
if (__transport_check_aborted_status(cmd, 1)) {
|
||||
@@ -2516,7 +2511,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine is the TCM fabric module has already allocated physical
|
||||
* Determine if the TCM fabric module has already allocated physical
|
||||
* memory, and is directly calling transport_generic_map_mem_to_cmd()
|
||||
* beforehand.
|
||||
*/
|
||||
@@ -2754,7 +2749,7 @@ static void target_release_cmd_kref(struct kref *kref)
|
||||
if (se_sess) {
|
||||
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
|
||||
list_del_init(&se_cmd->se_cmd_list);
|
||||
if (list_empty(&se_sess->sess_cmd_list))
|
||||
if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
|
||||
wake_up(&se_sess->cmd_list_wq);
|
||||
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
||||
}
|
||||
@@ -2907,7 +2902,7 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
|
||||
|
||||
spin_lock_irq(&se_sess->sess_cmd_lock);
|
||||
do {
|
||||
ret = wait_event_interruptible_lock_irq_timeout(
|
||||
ret = wait_event_lock_irq_timeout(
|
||||
se_sess->cmd_list_wq,
|
||||
list_empty(&se_sess->sess_cmd_list),
|
||||
se_sess->sess_cmd_lock, 180 * HZ);
|
||||
|
@@ -391,7 +391,6 @@ out:
|
||||
struct xcopy_pt_cmd {
|
||||
bool remote_port;
|
||||
struct se_cmd se_cmd;
|
||||
struct xcopy_op *xcopy_op;
|
||||
struct completion xpt_passthrough_sem;
|
||||
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
|
||||
};
|
||||
@@ -596,8 +595,6 @@ static int target_xcopy_setup_pt_cmd(
|
||||
* X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
|
||||
*/
|
||||
target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
|
||||
|
||||
xpt_cmd->xcopy_op = xop;
|
||||
target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
|
||||
|
||||
cmd->tag = 0;
|
||||
|
Fai riferimento in un nuovo problema
Block a user