Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "The highlights this round include: - Add target_alloc_session() w/ callback helper for doing se_session allocation + tag + se_node_acl lookup. (HCH + nab) - Tree-wide fabric driver conversion to use target_alloc_session() - Convert sbp-target to use percpu_ida tag pre-allocation, and TARGET_SCF_ACK_KREF I/O krefs (Chris Boot + nab) - Convert usb-gadget to use percpu_ida tag pre-allocation, and TARGET_SCF_ACK_KREF I/O krefs (Andrzej Pietrasiewicz + nab) - Convert xen-scsiback to use percpu_ida tag pre-allocation, and TARGET_SCF_ACK_KREF I/O krefs (Juergen Gross + nab) - Convert tcm_fc to use TARGET_SCF_ACK_KREF I/O + TMR krefs - Convert ib_srpt to use percpu_ida tag pre-allocation - Add DebugFS node for qla2xxx target sess list (Quinn) - Rework iser-target connection termination (Jenny + Sagi) - Convert iser-target to new CQ API (HCH) - Add pass-through WRITE_SAME support for IBLOCK (Mike Christie) - Introduce data_bitmap for asynchronous access of data area (Sheng Yang + Andy) - Fix target_release_cmd_kref shutdown comp leak (Himanshu Madhani) Also, there is a separate PULL request coming for cxgb4 NIC driver prerequisites for supporting hw iscsi segmentation offload (ISO), that will be the base for a number of v4.7 developments involving iscsi-target hw offloads" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (36 commits) target: Fix target_release_cmd_kref shutdown comp leak target: Avoid DataIN transfers for non-GOOD SAM status target/user: Report capability of handling out-of-order completions to userspace target/user: Fix size_t format-spec build warning target/user: Don't free expired command when time out target/user: Introduce data_bitmap, replace data_length/data_head/data_tail target/user: Free data ring in unified function target/user: Use iovec[] to describe continuous area target: Remove enum transport_lunflags_table target/iblock: pass WRITE_SAME to device if possible iser-target: Kill the ->isert_cmd back pointer in struct iser_tx_desc iser-target: Kill struct isert_rdma_wr iser-target: Convert to new CQ API iser-target: Split and properly type the login buffer iser-target: Remove ISER_RECV_DATA_SEG_LEN iser-target: Remove impossible condition from isert_wait_conn iser-target: Remove redundant wait in release_conn iser-target: Rework connection termination iser-target: Separate flows for np listeners and connections cma events iser-target: Add new state ISER_CONN_BOUND to isert_conn ...
This commit is contained in:
@@ -802,58 +802,48 @@ static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
|
||||
|
||||
/* Start items for tcm_loop_nexus_cit */
|
||||
|
||||
static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
|
||||
struct se_session *se_sess, void *p)
|
||||
{
|
||||
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
|
||||
struct tcm_loop_tpg, tl_se_tpg);
|
||||
|
||||
tl_tpg->tl_nexus = p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tcm_loop_make_nexus(
|
||||
struct tcm_loop_tpg *tl_tpg,
|
||||
const char *name)
|
||||
{
|
||||
struct se_portal_group *se_tpg;
|
||||
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
int ret = -ENOMEM;
|
||||
int ret;
|
||||
|
||||
if (tl_tpg->tl_nexus) {
|
||||
pr_debug("tl_tpg->tl_nexus already exists\n");
|
||||
return -EEXIST;
|
||||
}
|
||||
se_tpg = &tl_tpg->tl_se_tpg;
|
||||
|
||||
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
|
||||
if (!tl_nexus) {
|
||||
pr_err("Unable to allocate struct tcm_loop_nexus\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/*
|
||||
* Initialize the struct se_session pointer
|
||||
*/
|
||||
tl_nexus->se_sess = transport_init_session(
|
||||
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
|
||||
|
||||
tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
|
||||
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
|
||||
name, tl_nexus, tcm_loop_alloc_sess_cb);
|
||||
if (IS_ERR(tl_nexus->se_sess)) {
|
||||
ret = PTR_ERR(tl_nexus->se_sess);
|
||||
goto out;
|
||||
kfree(tl_nexus);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Since we are running in 'demo mode' this call with generate a
|
||||
* struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
|
||||
* Initiator port name of the passed configfs group 'name'.
|
||||
*/
|
||||
tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
|
||||
se_tpg, (unsigned char *)name);
|
||||
if (!tl_nexus->se_sess->se_node_acl) {
|
||||
transport_free_session(tl_nexus->se_sess);
|
||||
goto out;
|
||||
}
|
||||
/* Now, register the I_T Nexus as active. */
|
||||
transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
|
||||
tl_nexus->se_sess, tl_nexus);
|
||||
tl_tpg->tl_nexus = tl_nexus;
|
||||
|
||||
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
|
||||
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
|
||||
name);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
kfree(tl_nexus);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tcm_loop_drop_nexus(
|
||||
|
@@ -196,45 +196,30 @@ static struct sbp_session *sbp_session_create(
|
||||
struct sbp_session *sess;
|
||||
int ret;
|
||||
char guid_str[17];
|
||||
struct se_node_acl *se_nacl;
|
||||
|
||||
snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
|
||||
|
||||
sess = kmalloc(sizeof(*sess), GFP_KERNEL);
|
||||
if (!sess) {
|
||||
pr_err("failed to allocate session descriptor\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
spin_lock_init(&sess->lock);
|
||||
INIT_LIST_HEAD(&sess->login_list);
|
||||
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
|
||||
sess->guid = guid;
|
||||
|
||||
sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
|
||||
sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
|
||||
sizeof(struct sbp_target_request),
|
||||
TARGET_PROT_NORMAL, guid_str,
|
||||
sess, NULL);
|
||||
if (IS_ERR(sess->se_sess)) {
|
||||
pr_err("failed to init se_session\n");
|
||||
|
||||
ret = PTR_ERR(sess->se_sess);
|
||||
kfree(sess);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
|
||||
|
||||
se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
|
||||
if (!se_nacl) {
|
||||
pr_warn("Node ACL not found for %s\n", guid_str);
|
||||
|
||||
transport_free_session(sess->se_sess);
|
||||
kfree(sess);
|
||||
|
||||
return ERR_PTR(-EPERM);
|
||||
}
|
||||
|
||||
sess->se_sess->se_node_acl = se_nacl;
|
||||
|
||||
spin_lock_init(&sess->lock);
|
||||
INIT_LIST_HEAD(&sess->login_list);
|
||||
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
|
||||
|
||||
sess->guid = guid;
|
||||
|
||||
transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
|
||||
|
||||
return sess;
|
||||
}
|
||||
|
||||
@@ -908,7 +893,6 @@ static void tgt_agent_process_work(struct work_struct *work)
|
||||
STATUS_BLOCK_SBP_STATUS(
|
||||
SBP_STATUS_REQ_TYPE_NOTSUPP));
|
||||
sbp_send_status(req);
|
||||
sbp_free_request(req);
|
||||
return;
|
||||
case 3: /* Dummy ORB */
|
||||
req->status.status |= cpu_to_be32(
|
||||
@@ -919,7 +903,6 @@ static void tgt_agent_process_work(struct work_struct *work)
|
||||
STATUS_BLOCK_SBP_STATUS(
|
||||
SBP_STATUS_DUMMY_ORB_COMPLETE));
|
||||
sbp_send_status(req);
|
||||
sbp_free_request(req);
|
||||
return;
|
||||
default:
|
||||
BUG();
|
||||
@@ -938,6 +921,25 @@ static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
|
||||
return active;
|
||||
}
|
||||
|
||||
static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
|
||||
struct fw_card *card, u64 next_orb)
|
||||
{
|
||||
struct se_session *se_sess = sess->se_sess;
|
||||
struct sbp_target_request *req;
|
||||
int tag;
|
||||
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
|
||||
if (tag < 0)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->se_cmd.map_tag = tag;
|
||||
req->se_cmd.tag = next_orb;
|
||||
|
||||
return req;
|
||||
}
|
||||
|
||||
static void tgt_agent_fetch_work(struct work_struct *work)
|
||||
{
|
||||
struct sbp_target_agent *agent =
|
||||
@@ -949,8 +951,8 @@ static void tgt_agent_fetch_work(struct work_struct *work)
|
||||
u64 next_orb = agent->orb_pointer;
|
||||
|
||||
while (next_orb && tgt_agent_check_active(agent)) {
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req) {
|
||||
req = sbp_mgt_get_req(sess, sess->card, next_orb);
|
||||
if (IS_ERR(req)) {
|
||||
spin_lock_bh(&agent->lock);
|
||||
agent->state = AGENT_STATE_DEAD;
|
||||
spin_unlock_bh(&agent->lock);
|
||||
@@ -985,7 +987,6 @@ static void tgt_agent_fetch_work(struct work_struct *work)
|
||||
spin_unlock_bh(&agent->lock);
|
||||
|
||||
sbp_send_status(req);
|
||||
sbp_free_request(req);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1232,7 +1233,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
|
||||
req->se_cmd.tag = req->orb_pointer;
|
||||
if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
|
||||
req->sense_buf, unpacked_lun, data_length,
|
||||
TCM_SIMPLE_TAG, data_dir, 0))
|
||||
TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
|
||||
goto err;
|
||||
|
||||
return;
|
||||
@@ -1244,7 +1245,6 @@ err:
|
||||
STATUS_BLOCK_LEN(1) |
|
||||
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
|
||||
sbp_send_status(req);
|
||||
sbp_free_request(req);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1343,22 +1343,29 @@ static int sbp_rw_data(struct sbp_target_request *req)
|
||||
|
||||
static int sbp_send_status(struct sbp_target_request *req)
|
||||
{
|
||||
int ret, length;
|
||||
int rc, ret = 0, length;
|
||||
struct sbp_login_descriptor *login = req->login;
|
||||
|
||||
length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
|
||||
|
||||
ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
|
||||
rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
|
||||
login->status_fifo_addr, &req->status, length);
|
||||
if (ret != RCODE_COMPLETE) {
|
||||
pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
|
||||
return -EIO;
|
||||
if (rc != RCODE_COMPLETE) {
|
||||
pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
|
||||
ret = -EIO;
|
||||
goto put_ref;
|
||||
}
|
||||
|
||||
pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
|
||||
req->orb_pointer);
|
||||
|
||||
return 0;
|
||||
/*
|
||||
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
|
||||
* ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
|
||||
* final se_cmd->cmd_kref put.
|
||||
*/
|
||||
put_ref:
|
||||
target_put_sess_cmd(&req->se_cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sbp_sense_mangle(struct sbp_target_request *req)
|
||||
@@ -1447,9 +1454,13 @@ static int sbp_send_sense(struct sbp_target_request *req)
|
||||
|
||||
static void sbp_free_request(struct sbp_target_request *req)
|
||||
{
|
||||
struct se_cmd *se_cmd = &req->se_cmd;
|
||||
struct se_session *se_sess = se_cmd->se_sess;
|
||||
|
||||
kfree(req->pg_tbl);
|
||||
kfree(req->cmd_buf);
|
||||
kfree(req);
|
||||
|
||||
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
|
||||
}
|
||||
|
||||
static void sbp_mgt_agent_process(struct work_struct *work)
|
||||
@@ -1609,7 +1620,6 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
|
||||
rcode = RCODE_CONFLICT_ERROR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_ATOMIC);
|
||||
if (!req) {
|
||||
rcode = RCODE_CONFLICT_ERROR;
|
||||
@@ -1815,8 +1825,7 @@ static int sbp_check_stop_free(struct se_cmd *se_cmd)
|
||||
struct sbp_target_request *req = container_of(se_cmd,
|
||||
struct sbp_target_request, se_cmd);
|
||||
|
||||
transport_generic_free_cmd(&req->se_cmd, 0);
|
||||
return 1;
|
||||
return transport_generic_free_cmd(&req->se_cmd, 0);
|
||||
}
|
||||
|
||||
static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
|
||||
|
@@ -86,7 +86,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
||||
se_cmd->lun_ref_active = true;
|
||||
|
||||
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
|
||||
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
|
||||
deve->lun_access_ro) {
|
||||
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
|
||||
" Access for 0x%08llx\n",
|
||||
se_cmd->se_tfo->get_fabric_name(),
|
||||
@@ -199,7 +199,7 @@ bool target_lun_is_rdonly(struct se_cmd *cmd)
|
||||
|
||||
rcu_read_lock();
|
||||
deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
|
||||
ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
|
||||
ret = deve && deve->lun_access_ro;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
@@ -258,22 +258,15 @@ void core_free_device_list_for_node(
|
||||
|
||||
void core_update_device_list_access(
|
||||
u64 mapped_lun,
|
||||
u32 lun_access,
|
||||
bool lun_access_ro,
|
||||
struct se_node_acl *nacl)
|
||||
{
|
||||
struct se_dev_entry *deve;
|
||||
|
||||
mutex_lock(&nacl->lun_entry_mutex);
|
||||
deve = target_nacl_find_deve(nacl, mapped_lun);
|
||||
if (deve) {
|
||||
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
|
||||
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
} else {
|
||||
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
}
|
||||
}
|
||||
if (deve)
|
||||
deve->lun_access_ro = lun_access_ro;
|
||||
mutex_unlock(&nacl->lun_entry_mutex);
|
||||
}
|
||||
|
||||
@@ -319,7 +312,7 @@ int core_enable_device_list_for_node(
|
||||
struct se_lun *lun,
|
||||
struct se_lun_acl *lun_acl,
|
||||
u64 mapped_lun,
|
||||
u32 lun_access,
|
||||
bool lun_access_ro,
|
||||
struct se_node_acl *nacl,
|
||||
struct se_portal_group *tpg)
|
||||
{
|
||||
@@ -340,11 +333,7 @@ int core_enable_device_list_for_node(
|
||||
kref_init(&new->pr_kref);
|
||||
init_completion(&new->pr_comp);
|
||||
|
||||
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
|
||||
new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
else
|
||||
new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
|
||||
new->lun_access_ro = lun_access_ro;
|
||||
new->creation_time = get_jiffies_64();
|
||||
new->attach_count++;
|
||||
|
||||
@@ -433,7 +422,7 @@ void core_disable_device_list_for_node(
|
||||
|
||||
hlist_del_rcu(&orig->link);
|
||||
clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
|
||||
orig->lun_flags = 0;
|
||||
orig->lun_access_ro = false;
|
||||
orig->creation_time = 0;
|
||||
orig->attach_count--;
|
||||
/*
|
||||
@@ -558,8 +547,7 @@ int core_dev_add_lun(
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = core_tpg_add_lun(tpg, lun,
|
||||
TRANSPORT_LUNFLAGS_READ_WRITE, dev);
|
||||
rc = core_tpg_add_lun(tpg, lun, false, dev);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
@@ -635,7 +623,7 @@ int core_dev_add_initiator_node_lun_acl(
|
||||
struct se_portal_group *tpg,
|
||||
struct se_lun_acl *lacl,
|
||||
struct se_lun *lun,
|
||||
u32 lun_access)
|
||||
bool lun_access_ro)
|
||||
{
|
||||
struct se_node_acl *nacl = lacl->se_lun_nacl;
|
||||
/*
|
||||
@@ -647,20 +635,19 @@ int core_dev_add_initiator_node_lun_acl(
|
||||
if (!nacl)
|
||||
return -EINVAL;
|
||||
|
||||
if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
|
||||
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
if (lun->lun_access_ro)
|
||||
lun_access_ro = true;
|
||||
|
||||
lacl->se_lun = lun;
|
||||
|
||||
if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
|
||||
lun_access, nacl, tpg) < 0)
|
||||
lun_access_ro, nacl, tpg) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
|
||||
" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
|
||||
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
|
||||
lun_access_ro ? "RO" : "RW",
|
||||
nacl->initiatorname);
|
||||
/*
|
||||
* Check to see if there are any existing persistent reservation APTPL
|
||||
|
@@ -78,7 +78,7 @@ static int target_fabric_mappedlun_link(
|
||||
struct se_lun_acl, se_lun_group);
|
||||
struct se_portal_group *se_tpg;
|
||||
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
|
||||
int lun_access;
|
||||
bool lun_access_ro;
|
||||
|
||||
if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
|
||||
pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
|
||||
@@ -115,19 +115,18 @@ static int target_fabric_mappedlun_link(
|
||||
}
|
||||
/*
|
||||
* If this struct se_node_acl was dynamically generated with
|
||||
* tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
|
||||
* which be will write protected (READ-ONLY) when
|
||||
* tpg_1/attrib/generate_node_acls=1, use the existing
|
||||
* deve->lun_access_ro value, which will be true when
|
||||
* tpg_1/attrib/demo_mode_write_protect=1
|
||||
*/
|
||||
rcu_read_lock();
|
||||
deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
|
||||
if (deve)
|
||||
lun_access = deve->lun_flags;
|
||||
lun_access_ro = deve->lun_access_ro;
|
||||
else
|
||||
lun_access =
|
||||
lun_access_ro =
|
||||
(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
|
||||
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
|
||||
TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
se_tpg)) ? true : false;
|
||||
rcu_read_unlock();
|
||||
/*
|
||||
* Determine the actual mapped LUN value user wants..
|
||||
@@ -135,7 +134,7 @@ static int target_fabric_mappedlun_link(
|
||||
* This value is what the SCSI Initiator actually sees the
|
||||
* $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
|
||||
*/
|
||||
return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access);
|
||||
return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
|
||||
}
|
||||
|
||||
static int target_fabric_mappedlun_unlink(
|
||||
@@ -167,8 +166,7 @@ static ssize_t target_fabric_mappedlun_write_protect_show(
|
||||
rcu_read_lock();
|
||||
deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
|
||||
if (deve) {
|
||||
len = sprintf(page, "%d\n",
|
||||
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
|
||||
len = sprintf(page, "%d\n", deve->lun_access_ro);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@@ -181,25 +179,23 @@ static ssize_t target_fabric_mappedlun_write_protect_store(
|
||||
struct se_lun_acl *lacl = item_to_lun_acl(item);
|
||||
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
|
||||
struct se_portal_group *se_tpg = se_nacl->se_tpg;
|
||||
unsigned long op;
|
||||
unsigned long wp;
|
||||
int ret;
|
||||
|
||||
ret = kstrtoul(page, 0, &op);
|
||||
ret = kstrtoul(page, 0, &wp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((op != 1) && (op != 0))
|
||||
if ((wp != 1) && (wp != 0))
|
||||
return -EINVAL;
|
||||
|
||||
core_update_device_list_access(lacl->mapped_lun, (op) ?
|
||||
TRANSPORT_LUNFLAGS_READ_ONLY :
|
||||
TRANSPORT_LUNFLAGS_READ_WRITE,
|
||||
lacl->se_lun_nacl);
|
||||
/* wp=1 means lun_access_ro=true */
|
||||
core_update_device_list_access(lacl->mapped_lun, wp, lacl->se_lun_nacl);
|
||||
|
||||
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
|
||||
" Mapped LUN: %llu Write Protect bit to %s\n",
|
||||
se_tpg->se_tpg_tfo->get_fabric_name(),
|
||||
se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
|
||||
se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
|
||||
|
||||
return count;
|
||||
|
||||
|
@@ -412,9 +412,40 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct scatterlist *sg = &cmd->t_data_sg[0];
|
||||
struct page *page = NULL;
|
||||
int ret;
|
||||
|
||||
if (sg->offset) {
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (!page)
|
||||
return TCM_OUT_OF_RESOURCES;
|
||||
sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page),
|
||||
dev->dev_attrib.block_size);
|
||||
}
|
||||
|
||||
ret = blkdev_issue_write_same(bdev,
|
||||
target_to_linux_sector(dev, cmd->t_task_lba),
|
||||
target_to_linux_sector(dev,
|
||||
sbc_get_write_same_sectors(cmd)),
|
||||
GFP_KERNEL, page ? page : sg_page(sg));
|
||||
if (page)
|
||||
__free_page(page);
|
||||
if (ret)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
target_complete_cmd(cmd, GOOD);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
iblock_execute_write_same(struct se_cmd *cmd)
|
||||
{
|
||||
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
|
||||
struct iblock_req *ibr;
|
||||
struct scatterlist *sg;
|
||||
struct bio *bio;
|
||||
@@ -439,6 +470,9 @@ iblock_execute_write_same(struct se_cmd *cmd)
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
|
||||
if (bdev_write_same(bdev))
|
||||
return iblock_execute_write_same_direct(bdev, cmd);
|
||||
|
||||
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
|
||||
if (!ibr)
|
||||
goto fail;
|
||||
|
@@ -59,10 +59,10 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
|
||||
void target_pr_kref_release(struct kref *);
|
||||
void core_free_device_list_for_node(struct se_node_acl *,
|
||||
struct se_portal_group *);
|
||||
void core_update_device_list_access(u64, u32, struct se_node_acl *);
|
||||
void core_update_device_list_access(u64, bool, struct se_node_acl *);
|
||||
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
|
||||
int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
|
||||
u64, u32, struct se_node_acl *, struct se_portal_group *);
|
||||
u64, bool, struct se_node_acl *, struct se_portal_group *);
|
||||
void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
|
||||
struct se_node_acl *, struct se_portal_group *);
|
||||
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
|
||||
@@ -72,7 +72,7 @@ void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
|
||||
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
|
||||
struct se_node_acl *, u64, int *);
|
||||
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
|
||||
struct se_lun_acl *, struct se_lun *lun, u32);
|
||||
struct se_lun_acl *, struct se_lun *lun, bool);
|
||||
int core_dev_del_initiator_node_lun_acl(struct se_lun *,
|
||||
struct se_lun_acl *);
|
||||
void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
|
||||
@@ -118,7 +118,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
|
||||
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
|
||||
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
|
||||
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
|
||||
u32, struct se_device *);
|
||||
bool, struct se_device *);
|
||||
void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
|
||||
struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
|
||||
const char *initiatorname);
|
||||
|
@@ -997,7 +997,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
|
||||
int length = 0;
|
||||
int ret;
|
||||
int i;
|
||||
bool read_only = target_lun_is_rdonly(cmd);;
|
||||
|
||||
memset(buf, 0, SE_MODE_PAGE_BUF);
|
||||
|
||||
@@ -1008,7 +1007,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
|
||||
length = ten ? 3 : 2;
|
||||
|
||||
/* DEVICE-SPECIFIC PARAMETER */
|
||||
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only)
|
||||
if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
|
||||
spc_modesense_write_protect(&buf[length], type);
|
||||
|
||||
/*
|
||||
|
@@ -121,7 +121,7 @@ void core_tpg_add_node_to_devs(
|
||||
struct se_portal_group *tpg,
|
||||
struct se_lun *lun_orig)
|
||||
{
|
||||
u32 lun_access = 0;
|
||||
bool lun_access_ro = true;
|
||||
struct se_lun *lun;
|
||||
struct se_device *dev;
|
||||
|
||||
@@ -137,27 +137,26 @@ void core_tpg_add_node_to_devs(
|
||||
* demo_mode_write_protect is ON, or READ_ONLY;
|
||||
*/
|
||||
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
lun_access_ro = false;
|
||||
} else {
|
||||
/*
|
||||
* Allow only optical drives to issue R/W in default RO
|
||||
* demo mode.
|
||||
*/
|
||||
if (dev->transport->get_device_type(dev) == TYPE_DISK)
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
lun_access_ro = true;
|
||||
else
|
||||
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
|
||||
lun_access_ro = false;
|
||||
}
|
||||
|
||||
pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
|
||||
" access for LUN in Demo Mode\n",
|
||||
tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
|
||||
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
|
||||
"READ-WRITE" : "READ-ONLY");
|
||||
lun_access_ro ? "READ-ONLY" : "READ-WRITE");
|
||||
|
||||
core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
|
||||
lun_access, acl, tpg);
|
||||
lun_access_ro, acl, tpg);
|
||||
/*
|
||||
* Check to see if there are any existing persistent reservation
|
||||
* APTPL pre-registrations that need to be enabled for this dynamic
|
||||
@@ -522,7 +521,7 @@ int core_tpg_register(
|
||||
return PTR_ERR(se_tpg->tpg_virt_lun0);
|
||||
|
||||
ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
|
||||
TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
|
||||
true, g_lun0_dev);
|
||||
if (ret < 0) {
|
||||
kfree(se_tpg->tpg_virt_lun0);
|
||||
return ret;
|
||||
@@ -616,7 +615,7 @@ struct se_lun *core_tpg_alloc_lun(
|
||||
int core_tpg_add_lun(
|
||||
struct se_portal_group *tpg,
|
||||
struct se_lun *lun,
|
||||
u32 lun_access,
|
||||
bool lun_access_ro,
|
||||
struct se_device *dev)
|
||||
{
|
||||
int ret;
|
||||
@@ -644,9 +643,9 @@ int core_tpg_add_lun(
|
||||
spin_unlock(&dev->se_port_lock);
|
||||
|
||||
if (dev->dev_flags & DF_READ_ONLY)
|
||||
lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
|
||||
lun->lun_access_ro = true;
|
||||
else
|
||||
lun->lun_access = lun_access;
|
||||
lun->lun_access_ro = lun_access_ro;
|
||||
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
|
||||
hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
|
||||
mutex_unlock(&tpg->tpg_lun_mutex);
|
||||
|
@@ -281,6 +281,17 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
|
||||
struct se_session *se_sess;
|
||||
int rc;
|
||||
|
||||
if (tag_num != 0 && !tag_size) {
|
||||
pr_err("init_session_tags called with percpu-ida tag_num:"
|
||||
" %u, but zero tag_size\n", tag_num);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
if (!tag_num && tag_size) {
|
||||
pr_err("init_session_tags called with percpu-ida tag_size:"
|
||||
" %u, but zero tag_num\n", tag_size);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
se_sess = transport_init_session(sup_prot_ops);
|
||||
if (IS_ERR(se_sess))
|
||||
return se_sess;
|
||||
@@ -374,6 +385,51 @@ void transport_register_session(
|
||||
}
|
||||
EXPORT_SYMBOL(transport_register_session);
|
||||
|
||||
struct se_session *
|
||||
target_alloc_session(struct se_portal_group *tpg,
|
||||
unsigned int tag_num, unsigned int tag_size,
|
||||
enum target_prot_op prot_op,
|
||||
const char *initiatorname, void *private,
|
||||
int (*callback)(struct se_portal_group *,
|
||||
struct se_session *, void *))
|
||||
{
|
||||
struct se_session *sess;
|
||||
|
||||
/*
|
||||
* If the fabric driver is using percpu-ida based pre allocation
|
||||
* of I/O descriptor tags, go ahead and perform that setup now..
|
||||
*/
|
||||
if (tag_num != 0)
|
||||
sess = transport_init_session_tags(tag_num, tag_size, prot_op);
|
||||
else
|
||||
sess = transport_init_session(prot_op);
|
||||
|
||||
if (IS_ERR(sess))
|
||||
return sess;
|
||||
|
||||
sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
|
||||
(unsigned char *)initiatorname);
|
||||
if (!sess->se_node_acl) {
|
||||
transport_free_session(sess);
|
||||
return ERR_PTR(-EACCES);
|
||||
}
|
||||
/*
|
||||
* Go ahead and perform any remaining fabric setup that is
|
||||
* required before transport_register_session().
|
||||
*/
|
||||
if (callback != NULL) {
|
||||
int rc = callback(tpg, sess, private);
|
||||
if (rc) {
|
||||
transport_free_session(sess);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
}
|
||||
|
||||
transport_register_session(tpg, sess->se_node_acl, sess, private);
|
||||
return sess;
|
||||
}
|
||||
EXPORT_SYMBOL(target_alloc_session);
|
||||
|
||||
static void target_release_session(struct kref *kref)
|
||||
{
|
||||
struct se_session *se_sess = container_of(kref,
|
||||
@@ -1941,6 +1997,9 @@ static void transport_complete_qf(struct se_cmd *cmd)
|
||||
|
||||
switch (cmd->data_direction) {
|
||||
case DMA_FROM_DEVICE:
|
||||
if (cmd->scsi_status)
|
||||
goto queue_status;
|
||||
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_data_in(cmd);
|
||||
break;
|
||||
@@ -1951,6 +2010,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
|
||||
}
|
||||
/* Fall through for DMA_TO_DEVICE */
|
||||
case DMA_NONE:
|
||||
queue_status:
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
break;
|
||||
@@ -2072,6 +2132,9 @@ static void target_complete_ok_work(struct work_struct *work)
|
||||
queue_rsp:
|
||||
switch (cmd->data_direction) {
|
||||
case DMA_FROM_DEVICE:
|
||||
if (cmd->scsi_status)
|
||||
goto queue_status;
|
||||
|
||||
atomic_long_add(cmd->data_length,
|
||||
&cmd->se_lun->lun_stats.tx_data_octets);
|
||||
/*
|
||||
@@ -2111,6 +2174,7 @@ queue_rsp:
|
||||
}
|
||||
/* Fall through for DMA_TO_DEVICE */
|
||||
case DMA_NONE:
|
||||
queue_status:
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
@@ -2596,8 +2660,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
|
||||
|
||||
list_for_each_entry_safe(se_cmd, tmp_cmd,
|
||||
&se_sess->sess_wait_list, se_cmd_list) {
|
||||
list_del_init(&se_cmd->se_cmd_list);
|
||||
|
||||
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
|
||||
" %d\n", se_cmd, se_cmd->t_state,
|
||||
se_cmd->se_tfo->get_cmd_state(se_cmd));
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/uio_driver.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <net/genetlink.h>
|
||||
#include <scsi/scsi_common.h>
|
||||
#include <scsi/scsi_proto.h>
|
||||
@@ -63,8 +64,11 @@
|
||||
|
||||
#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
|
||||
|
||||
#define DATA_BLOCK_BITS 256
|
||||
#define DATA_BLOCK_SIZE 4096
|
||||
|
||||
#define CMDR_SIZE (16 * 4096)
|
||||
#define DATA_SIZE (257 * 4096)
|
||||
#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
|
||||
|
||||
#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
|
||||
|
||||
@@ -93,12 +97,11 @@ struct tcmu_dev {
|
||||
u32 cmdr_size;
|
||||
u32 cmdr_last_cleaned;
|
||||
/* Offset of data ring from start of mb */
|
||||
/* Must add data_off and mb_addr to get the address */
|
||||
size_t data_off;
|
||||
size_t data_size;
|
||||
/* Ring head + tail values. */
|
||||
/* Must add data_off and mb_addr to get the address */
|
||||
size_t data_head;
|
||||
size_t data_tail;
|
||||
|
||||
DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
wait_queue_head_t wait_cmdr;
|
||||
/* TODO should this be a mutex? */
|
||||
@@ -122,9 +125,9 @@ struct tcmu_cmd {
|
||||
|
||||
uint16_t cmd_id;
|
||||
|
||||
/* Can't use se_cmd->data_length when cleaning up expired cmds, because if
|
||||
/* Can't use se_cmd when cleaning up expired cmds, because if
|
||||
cmd has been completed then accessing se_cmd is off limits */
|
||||
size_t data_length;
|
||||
DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
unsigned long deadline;
|
||||
|
||||
@@ -168,13 +171,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
|
||||
|
||||
tcmu_cmd->se_cmd = se_cmd;
|
||||
tcmu_cmd->tcmu_dev = udev;
|
||||
tcmu_cmd->data_length = se_cmd->data_length;
|
||||
|
||||
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
|
||||
tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
|
||||
}
|
||||
|
||||
tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
@@ -231,105 +227,126 @@ static inline size_t head_to_end(size_t head, size_t size)
|
||||
return size - head;
|
||||
}
|
||||
|
||||
static inline void new_iov(struct iovec **iov, int *iov_cnt,
|
||||
struct tcmu_dev *udev)
|
||||
{
|
||||
struct iovec *iovec;
|
||||
|
||||
if (*iov_cnt != 0)
|
||||
(*iov)++;
|
||||
(*iov_cnt)++;
|
||||
|
||||
iovec = *iov;
|
||||
memset(iovec, 0, sizeof(struct iovec));
|
||||
}
|
||||
|
||||
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
|
||||
|
||||
/* offset is relative to mb_addr */
|
||||
static inline size_t get_block_offset(struct tcmu_dev *dev,
|
||||
int block, int remaining)
|
||||
{
|
||||
return dev->data_off + block * DATA_BLOCK_SIZE +
|
||||
DATA_BLOCK_SIZE - remaining;
|
||||
}
|
||||
|
||||
static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
|
||||
{
|
||||
return (size_t)iov->iov_base + iov->iov_len;
|
||||
}
|
||||
|
||||
static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
|
||||
struct scatterlist *data_sg, unsigned int data_nents,
|
||||
struct iovec **iov, int *iov_cnt, bool copy_data)
|
||||
{
|
||||
int i;
|
||||
int i, block;
|
||||
int block_remaining = 0;
|
||||
void *from, *to;
|
||||
size_t copy_bytes;
|
||||
size_t copy_bytes, to_offset;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(data_sg, sg, data_nents, i) {
|
||||
copy_bytes = min_t(size_t, sg->length,
|
||||
head_to_end(udev->data_head, udev->data_size));
|
||||
int sg_remaining = sg->length;
|
||||
from = kmap_atomic(sg_page(sg)) + sg->offset;
|
||||
to = (void *) udev->mb_addr + udev->data_off + udev->data_head;
|
||||
|
||||
if (copy_data) {
|
||||
memcpy(to, from, copy_bytes);
|
||||
tcmu_flush_dcache_range(to, copy_bytes);
|
||||
}
|
||||
|
||||
/* Even iov_base is relative to mb_addr */
|
||||
(*iov)->iov_len = copy_bytes;
|
||||
(*iov)->iov_base = (void __user *) udev->data_off +
|
||||
udev->data_head;
|
||||
(*iov_cnt)++;
|
||||
(*iov)++;
|
||||
|
||||
UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
|
||||
|
||||
/* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
|
||||
if (sg->length != copy_bytes) {
|
||||
void *from_skip = from + copy_bytes;
|
||||
|
||||
copy_bytes = sg->length - copy_bytes;
|
||||
|
||||
(*iov)->iov_len = copy_bytes;
|
||||
(*iov)->iov_base = (void __user *) udev->data_off +
|
||||
udev->data_head;
|
||||
|
||||
while (sg_remaining > 0) {
|
||||
if (block_remaining == 0) {
|
||||
block = find_first_zero_bit(udev->data_bitmap,
|
||||
DATA_BLOCK_BITS);
|
||||
block_remaining = DATA_BLOCK_SIZE;
|
||||
set_bit(block, udev->data_bitmap);
|
||||
}
|
||||
copy_bytes = min_t(size_t, sg_remaining,
|
||||
block_remaining);
|
||||
to_offset = get_block_offset(udev, block,
|
||||
block_remaining);
|
||||
to = (void *)udev->mb_addr + to_offset;
|
||||
if (*iov_cnt != 0 &&
|
||||
to_offset == iov_tail(udev, *iov)) {
|
||||
(*iov)->iov_len += copy_bytes;
|
||||
} else {
|
||||
new_iov(iov, iov_cnt, udev);
|
||||
(*iov)->iov_base = (void __user *) to_offset;
|
||||
(*iov)->iov_len = copy_bytes;
|
||||
}
|
||||
if (copy_data) {
|
||||
to = (void *) udev->mb_addr +
|
||||
udev->data_off + udev->data_head;
|
||||
memcpy(to, from_skip, copy_bytes);
|
||||
memcpy(to, from + sg->length - sg_remaining,
|
||||
copy_bytes);
|
||||
tcmu_flush_dcache_range(to, copy_bytes);
|
||||
}
|
||||
|
||||
(*iov_cnt)++;
|
||||
(*iov)++;
|
||||
|
||||
UPDATE_HEAD(udev->data_head,
|
||||
copy_bytes, udev->data_size);
|
||||
sg_remaining -= copy_bytes;
|
||||
block_remaining -= copy_bytes;
|
||||
}
|
||||
|
||||
kunmap_atomic(from - sg->offset);
|
||||
}
|
||||
}
|
||||
|
||||
static void gather_and_free_data_area(struct tcmu_dev *udev,
|
||||
struct scatterlist *data_sg, unsigned int data_nents)
|
||||
static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
|
||||
{
|
||||
int i;
|
||||
bitmap_xor(udev->data_bitmap, udev->data_bitmap, cmd->data_bitmap,
|
||||
DATA_BLOCK_BITS);
|
||||
}
|
||||
|
||||
static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
|
||||
struct scatterlist *data_sg, unsigned int data_nents)
|
||||
{
|
||||
int i, block;
|
||||
int block_remaining = 0;
|
||||
void *from, *to;
|
||||
size_t copy_bytes;
|
||||
size_t copy_bytes, from_offset;
|
||||
struct scatterlist *sg;
|
||||
|
||||
/* It'd be easier to look at entry's iovec again, but UAM */
|
||||
for_each_sg(data_sg, sg, data_nents, i) {
|
||||
copy_bytes = min_t(size_t, sg->length,
|
||||
head_to_end(udev->data_tail, udev->data_size));
|
||||
|
||||
int sg_remaining = sg->length;
|
||||
to = kmap_atomic(sg_page(sg)) + sg->offset;
|
||||
WARN_ON(sg->length + sg->offset > PAGE_SIZE);
|
||||
from = (void *) udev->mb_addr +
|
||||
udev->data_off + udev->data_tail;
|
||||
tcmu_flush_dcache_range(from, copy_bytes);
|
||||
memcpy(to, from, copy_bytes);
|
||||
|
||||
UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
|
||||
|
||||
/* Uh oh, wrapped the data buffer for this sg's data */
|
||||
if (sg->length != copy_bytes) {
|
||||
void *to_skip = to + copy_bytes;
|
||||
|
||||
from = (void *) udev->mb_addr +
|
||||
udev->data_off + udev->data_tail;
|
||||
WARN_ON(udev->data_tail);
|
||||
copy_bytes = sg->length - copy_bytes;
|
||||
while (sg_remaining > 0) {
|
||||
if (block_remaining == 0) {
|
||||
block = find_first_bit(cmd_bitmap,
|
||||
DATA_BLOCK_BITS);
|
||||
block_remaining = DATA_BLOCK_SIZE;
|
||||
clear_bit(block, cmd_bitmap);
|
||||
}
|
||||
copy_bytes = min_t(size_t, sg_remaining,
|
||||
block_remaining);
|
||||
from_offset = get_block_offset(udev, block,
|
||||
block_remaining);
|
||||
from = (void *) udev->mb_addr + from_offset;
|
||||
tcmu_flush_dcache_range(from, copy_bytes);
|
||||
memcpy(to_skip, from, copy_bytes);
|
||||
memcpy(to + sg->length - sg_remaining, from,
|
||||
copy_bytes);
|
||||
|
||||
UPDATE_HEAD(udev->data_tail,
|
||||
copy_bytes, udev->data_size);
|
||||
sg_remaining -= copy_bytes;
|
||||
block_remaining -= copy_bytes;
|
||||
}
|
||||
kunmap_atomic(to - sg->offset);
|
||||
}
|
||||
}
|
||||
|
||||
static inline size_t spc_bitmap_free(unsigned long *bitmap)
|
||||
{
|
||||
return DATA_BLOCK_SIZE * (DATA_BLOCK_BITS -
|
||||
bitmap_weight(bitmap, DATA_BLOCK_BITS));
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't queue a command until we have space available on the cmd ring *and*
|
||||
* space available on the data ring.
|
||||
@@ -339,9 +356,8 @@ static void gather_and_free_data_area(struct tcmu_dev *udev,
|
||||
static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
|
||||
{
|
||||
struct tcmu_mailbox *mb = udev->mb_addr;
|
||||
size_t space;
|
||||
size_t space, cmd_needed;
|
||||
u32 cmd_head;
|
||||
size_t cmd_needed;
|
||||
|
||||
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
||||
|
||||
@@ -363,10 +379,10 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
|
||||
return false;
|
||||
}
|
||||
|
||||
space = spc_free(udev->data_head, udev->data_tail, udev->data_size);
|
||||
space = spc_bitmap_free(udev->data_bitmap);
|
||||
if (space < data_needed) {
|
||||
pr_debug("no data space: %zu %zu %zu\n", udev->data_head,
|
||||
udev->data_tail, udev->data_size);
|
||||
pr_debug("no data space: only %zu available, but ask for %zu\n",
|
||||
space, data_needed);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -385,6 +401,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
uint32_t cmd_head;
|
||||
uint64_t cdb_off;
|
||||
bool copy_to_data_area;
|
||||
size_t data_length;
|
||||
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
|
||||
return -EINVAL;
|
||||
@@ -393,12 +411,12 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
* Must be a certain minimum size for response sense info, but
|
||||
* also may be larger if the iov array is large.
|
||||
*
|
||||
* iovs = sgl_nents+1, for end-of-ring case, plus another 1
|
||||
* b/c size == offsetof one-past-element.
|
||||
* We prepare way too many iovs for potential uses here, because it's
|
||||
* expensive to tell how many regions are freed in the bitmap
|
||||
*/
|
||||
base_command_size = max(offsetof(struct tcmu_cmd_entry,
|
||||
req.iov[se_cmd->t_bidi_data_nents +
|
||||
se_cmd->t_data_nents + 2]),
|
||||
req.iov[se_cmd->t_bidi_data_nents +
|
||||
se_cmd->t_data_nents]),
|
||||
sizeof(struct tcmu_cmd_entry));
|
||||
command_size = base_command_size
|
||||
+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
|
||||
@@ -409,13 +427,18 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
|
||||
mb = udev->mb_addr;
|
||||
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
|
||||
data_length = se_cmd->data_length;
|
||||
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
|
||||
data_length += se_cmd->t_bidi_data_sg->length;
|
||||
}
|
||||
if ((command_size > (udev->cmdr_size / 2))
|
||||
|| tcmu_cmd->data_length > (udev->data_size - 1))
|
||||
|| data_length > udev->data_size)
|
||||
pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
|
||||
"cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
|
||||
"cmd/data ring buffers\n", command_size, data_length,
|
||||
udev->cmdr_size, udev->data_size);
|
||||
|
||||
while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
|
||||
while (!is_ring_space_avail(udev, command_size, data_length)) {
|
||||
int ret;
|
||||
DEFINE_WAIT(__wait);
|
||||
|
||||
@@ -462,6 +485,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
entry->hdr.kflags = 0;
|
||||
entry->hdr.uflags = 0;
|
||||
|
||||
bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
/*
|
||||
* Fix up iovecs, and handle if allocation in data ring wrapped.
|
||||
*/
|
||||
@@ -480,6 +505,10 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
|
||||
entry->req.iov_bidi_cnt = iov_cnt;
|
||||
|
||||
/* cmd's data_bitmap is what changed in process */
|
||||
bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
|
||||
DATA_BLOCK_BITS);
|
||||
|
||||
/* All offsets relative to mb_addr, not start of entry! */
|
||||
cdb_off = CMDR_OFF + cmd_head + base_command_size;
|
||||
memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
|
||||
@@ -530,35 +559,42 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
|
||||
struct tcmu_dev *udev = cmd->tcmu_dev;
|
||||
|
||||
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
||||
/* cmd has been completed already from timeout, just reclaim data
|
||||
ring space */
|
||||
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
|
||||
/*
|
||||
* cmd has been completed already from timeout, just reclaim
|
||||
* data ring space and free cmd
|
||||
*/
|
||||
free_data_area(udev, cmd);
|
||||
|
||||
kmem_cache_free(tcmu_cmd_cache, cmd);
|
||||
return;
|
||||
}
|
||||
|
||||
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
|
||||
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
|
||||
free_data_area(udev, cmd);
|
||||
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
|
||||
cmd->se_cmd);
|
||||
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
|
||||
} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
|
||||
memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
|
||||
se_cmd->scsi_sense_length);
|
||||
|
||||
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||
/* Discard data_out buffer */
|
||||
UPDATE_HEAD(udev->data_tail,
|
||||
(size_t)se_cmd->t_data_sg->length, udev->data_size);
|
||||
DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
/* Get Data-In buffer */
|
||||
gather_and_free_data_area(udev,
|
||||
/* Get Data-In buffer before clean up */
|
||||
bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
|
||||
gather_data_area(udev, bitmap,
|
||||
se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
|
||||
gather_and_free_data_area(udev,
|
||||
DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
|
||||
gather_data_area(udev, bitmap,
|
||||
se_cmd->t_data_sg, se_cmd->t_data_nents);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
|
||||
UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->data_direction != DMA_NONE) {
|
||||
pr_warn("TCMU: data direction was %d!\n",
|
||||
se_cmd->data_direction);
|
||||
@@ -894,11 +930,13 @@ static int tcmu_configure_device(struct se_device *dev)
|
||||
|
||||
mb = udev->mb_addr;
|
||||
mb->version = TCMU_MAILBOX_VERSION;
|
||||
mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
|
||||
mb->cmdr_off = CMDR_OFF;
|
||||
mb->cmdr_size = udev->cmdr_size;
|
||||
|
||||
WARN_ON(!PAGE_ALIGNED(udev->data_off));
|
||||
WARN_ON(udev->data_size % PAGE_SIZE);
|
||||
WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
|
||||
|
||||
info->version = __stringify(TCMU_MAILBOX_VERSION);
|
||||
|
||||
@@ -942,12 +980,12 @@ err_vzalloc:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tcmu_check_pending_cmd(int id, void *p, void *data)
|
||||
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
|
||||
{
|
||||
struct tcmu_cmd *cmd = p;
|
||||
|
||||
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
|
||||
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
||||
kmem_cache_free(tcmu_cmd_cache, cmd);
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -962,6 +1000,8 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
|
||||
static void tcmu_free_device(struct se_device *dev)
|
||||
{
|
||||
struct tcmu_dev *udev = TCMU_DEV(dev);
|
||||
struct tcmu_cmd *cmd;
|
||||
bool all_expired = true;
|
||||
int i;
|
||||
|
||||
del_timer_sync(&udev->timeout);
|
||||
@@ -970,10 +1010,13 @@ static void tcmu_free_device(struct se_device *dev)
|
||||
|
||||
/* Upper layer should drain all requests before calling this */
|
||||
spin_lock_irq(&udev->commands_lock);
|
||||
i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL);
|
||||
idr_for_each_entry(&udev->commands, cmd, i) {
|
||||
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
|
||||
all_expired = false;
|
||||
}
|
||||
idr_destroy(&udev->commands);
|
||||
spin_unlock_irq(&udev->commands_lock);
|
||||
WARN_ON(i);
|
||||
WARN_ON(!all_expired);
|
||||
|
||||
/* Device was configured */
|
||||
if (udev->uio_info.uio_dev) {
|
||||
|
@@ -107,8 +107,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
|
||||
|
||||
int ft_check_stop_free(struct se_cmd *se_cmd)
|
||||
{
|
||||
transport_generic_free_cmd(se_cmd, 0);
|
||||
return 1;
|
||||
return transport_generic_free_cmd(se_cmd, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -179,6 +178,12 @@ int ft_queue_status(struct se_cmd *se_cmd)
|
||||
return -ENOMEM;
|
||||
}
|
||||
lport->tt.exch_done(cmd->seq);
|
||||
/*
|
||||
* Drop the extra ACK_KREF reference taken by target_submit_cmd()
|
||||
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
|
||||
* final se_cmd->cmd_kref put.
|
||||
*/
|
||||
target_put_sess_cmd(&cmd->se_cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -387,7 +392,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
|
||||
/* FIXME: Add referenced task tag for ABORT_TASK */
|
||||
rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
|
||||
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
|
||||
cmd, tm_func, GFP_KERNEL, 0, 0);
|
||||
cmd, tm_func, GFP_KERNEL, 0, TARGET_SCF_ACK_KREF);
|
||||
if (rc < 0)
|
||||
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
|
||||
}
|
||||
@@ -422,6 +427,12 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
|
||||
pr_debug("tmr fn %d resp %d fcp code %d\n",
|
||||
tmr->function, tmr->response, code);
|
||||
ft_send_resp_code(cmd, code);
|
||||
/*
|
||||
* Drop the extra ACK_KREF reference taken by target_submit_tmr()
|
||||
* ahead of ft_check_stop_free() -> transport_generic_free_cmd()
|
||||
* final se_cmd->cmd_kref put.
|
||||
*/
|
||||
target_put_sess_cmd(&cmd->se_cmd);
|
||||
}
|
||||
|
||||
void ft_aborted_task(struct se_cmd *se_cmd)
|
||||
@@ -560,7 +571,8 @@ static void ft_send_work(struct work_struct *work)
|
||||
*/
|
||||
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
|
||||
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
|
||||
ntohl(fcp->fc_dl), task_attr, data_dir, 0))
|
||||
ntohl(fcp->fc_dl), task_attr, data_dir,
|
||||
TARGET_SCF_ACK_KREF))
|
||||
goto err;
|
||||
|
||||
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
|
||||
|
@@ -186,6 +186,20 @@ out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
|
||||
struct se_session *se_sess, void *p)
|
||||
{
|
||||
struct ft_sess *sess = p;
|
||||
struct ft_tport *tport = sess->tport;
|
||||
struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
|
||||
|
||||
pr_debug("port_id %x sess %p\n", sess->port_id, sess);
|
||||
hlist_add_head_rcu(&sess->hash, head);
|
||||
tport->sess_count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate session and enter it in the hash for the local port.
|
||||
* Caller holds ft_lport_lock.
|
||||
@@ -194,7 +208,6 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
|
||||
struct fc_rport_priv *rdata)
|
||||
{
|
||||
struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
|
||||
struct se_node_acl *se_acl;
|
||||
struct ft_sess *sess;
|
||||
struct hlist_head *head;
|
||||
unsigned char initiatorname[TRANSPORT_IQN_LEN];
|
||||
@@ -210,31 +223,18 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
|
||||
if (!sess)
|
||||
return NULL;
|
||||
|
||||
sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
|
||||
sizeof(struct ft_cmd),
|
||||
TARGET_PROT_NORMAL);
|
||||
kref_init(&sess->kref); /* ref for table entry */
|
||||
sess->tport = tport;
|
||||
sess->port_id = port_id;
|
||||
|
||||
sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS,
|
||||
sizeof(struct ft_cmd),
|
||||
TARGET_PROT_NORMAL, &initiatorname[0],
|
||||
sess, ft_sess_alloc_cb);
|
||||
if (IS_ERR(sess->se_sess)) {
|
||||
kfree(sess);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
se_acl = core_tpg_get_initiator_node_acl(se_tpg, &initiatorname[0]);
|
||||
if (!se_acl) {
|
||||
transport_free_session(sess->se_sess);
|
||||
kfree(sess);
|
||||
return NULL;
|
||||
}
|
||||
sess->se_sess->se_node_acl = se_acl;
|
||||
sess->tport = tport;
|
||||
sess->port_id = port_id;
|
||||
kref_init(&sess->kref); /* ref for table entry */
|
||||
hlist_add_head_rcu(&sess->hash, head);
|
||||
tport->sess_count++;
|
||||
|
||||
pr_debug("port_id %x sess %p\n", port_id, sess);
|
||||
|
||||
transport_register_session(&tport->tpg->se_tpg, se_acl,
|
||||
sess->se_sess, sess);
|
||||
return sess;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user