mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "Things have been quiet this round with mostly bugfixes, percpu conversions, and other minor iscsi-target conformance testing changes. The highlights include: - Add demo_mode_discovery attribute for iscsi-target (Thomas) - Convert tcm_fc(FCoE) to use percpu-ida pre-allocation - Add send completion interrupt coalescing for ib_isert - Convert target-core to use percpu-refcounting for se_lun - Fix mutex_trylock usage bug in iscsit_increment_maxcmdsn - tcm_loop updates (Hannes) - target-core ALUA cleanups + prep for v3.14 SCSI Referrals support (Hannes) v3.14 is currently shaping to be a busy development cycle in target land, with initial support for T10 Referrals and T10 DIF currently on the roadmap" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (40 commits) iscsi-target: chap auth shouldn't match username with trailing garbage iscsi-target: fix extract_param to handle buffer length corner case iscsi-target: Expose default_erl as TPG attribute target_core_configfs: split up ALUA supported states target_core_alua: Make supported states configurable target_core_alua: Store supported ALUA states target_core_alua: Rename ALUA_ACCESS_STATE_OPTIMIZED target_core_alua: spellcheck target core: rename (ex,im)plict -> (ex,im)plicit percpu-refcount: Add percpu-refcount.o to obj-y iscsi-target: Do not reject non-immediate CmdSNs exceeding MaxCmdSN iscsi-target: Convert iscsi_session statistics to atomic_long_t target: Convert se_device statistics to atomic_long_t target: Fix delayed Task Aborted Status (TAS) handling bug iscsi-target: Reject unsupported multi PDU text command sequence ib_isert: Avoid duplicate iscsit_increment_maxcmdsn call iscsi-target: Fix mutex_trylock usage in iscsit_increment_maxcmdsn target: Core does not need blkdev.h target: Pass through I/O topology for block backstores iser-target: Avoid using FRMR for single dma entry requests ...
This commit is contained in:
commit
b0e3636f65
|
@ -440,15 +440,15 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
|||
buf += " /*\n"
|
||||
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
|
||||
buf += " */\n"
|
||||
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
|
||||
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
|
||||
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
|
||||
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
|
||||
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
|
||||
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
|
||||
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
|
||||
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
|
||||
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
|
||||
buf += " fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
|
||||
buf += " fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;\n"
|
||||
buf += " fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
|
||||
buf += " fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;\n"
|
||||
buf += " fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
|
||||
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
|
||||
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
|
||||
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
|
||||
buf += " fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
|
||||
buf += " /*\n"
|
||||
buf += " * Register the fabric for use within TCM\n"
|
||||
buf += " */\n"
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/socket.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/llist.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/rdma_cm.h>
|
||||
#include <target/target_core_base.h>
|
||||
|
@ -489,6 +490,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
|
|||
kref_init(&isert_conn->conn_kref);
|
||||
kref_get(&isert_conn->conn_kref);
|
||||
mutex_init(&isert_conn->conn_mutex);
|
||||
mutex_init(&isert_conn->conn_comp_mutex);
|
||||
spin_lock_init(&isert_conn->conn_lock);
|
||||
|
||||
cma_id->context = isert_conn;
|
||||
|
@ -843,14 +845,32 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
|
|||
}
|
||||
|
||||
static void
|
||||
isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
|
||||
isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
|
||||
struct ib_send_wr *send_wr, bool coalesce)
|
||||
{
|
||||
struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
|
||||
|
||||
isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
|
||||
send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
|
||||
send_wr->opcode = IB_WR_SEND;
|
||||
send_wr->send_flags = IB_SEND_SIGNALED;
|
||||
send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
|
||||
send_wr->sg_list = &tx_desc->tx_sg[0];
|
||||
send_wr->num_sge = isert_cmd->tx_desc.num_sge;
|
||||
/*
|
||||
* Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
|
||||
* bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
|
||||
*/
|
||||
mutex_lock(&isert_conn->conn_comp_mutex);
|
||||
if (coalesce &&
|
||||
++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
|
||||
llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
|
||||
mutex_unlock(&isert_conn->conn_comp_mutex);
|
||||
return;
|
||||
}
|
||||
isert_conn->conn_comp_batch = 0;
|
||||
tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
|
||||
mutex_unlock(&isert_conn->conn_comp_mutex);
|
||||
|
||||
send_wr->send_flags = IB_SEND_SIGNALED;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1582,8 +1602,8 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
|
|||
}
|
||||
|
||||
static void
|
||||
isert_send_completion(struct iser_tx_desc *tx_desc,
|
||||
struct isert_conn *isert_conn)
|
||||
__isert_send_completion(struct iser_tx_desc *tx_desc,
|
||||
struct isert_conn *isert_conn)
|
||||
{
|
||||
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
|
||||
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
|
||||
|
@ -1623,6 +1643,24 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
isert_send_completion(struct iser_tx_desc *tx_desc,
|
||||
struct isert_conn *isert_conn)
|
||||
{
|
||||
struct llist_node *llnode = tx_desc->comp_llnode_batch;
|
||||
struct iser_tx_desc *t;
|
||||
/*
|
||||
* Drain coalesced completion llist starting from comp_llnode_batch
|
||||
* setup in isert_init_send_wr(), and then complete trailing tx_desc.
|
||||
*/
|
||||
while (llnode) {
|
||||
t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
|
||||
llnode = llist_next(llnode);
|
||||
__isert_send_completion(t, isert_conn);
|
||||
}
|
||||
__isert_send_completion(tx_desc, isert_conn);
|
||||
}
|
||||
|
||||
static void
|
||||
isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
|
||||
{
|
||||
|
@ -1793,7 +1831,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|||
isert_cmd->tx_desc.num_sge = 2;
|
||||
}
|
||||
|
||||
isert_init_send_wr(isert_cmd, send_wr);
|
||||
isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
|
||||
|
||||
pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
||||
|
||||
|
@ -1813,7 +1851,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
|
|||
&isert_cmd->tx_desc.iscsi_header,
|
||||
nopout_response);
|
||||
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
||||
isert_init_send_wr(isert_cmd, send_wr);
|
||||
isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
|
||||
|
||||
pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
||||
|
||||
|
@ -1831,7 +1869,7 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|||
iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
|
||||
&isert_cmd->tx_desc.iscsi_header);
|
||||
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
||||
isert_init_send_wr(isert_cmd, send_wr);
|
||||
isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
|
||||
|
||||
pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
||||
|
||||
|
@ -1849,7 +1887,7 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|||
iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
|
||||
&isert_cmd->tx_desc.iscsi_header);
|
||||
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
||||
isert_init_send_wr(isert_cmd, send_wr);
|
||||
isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
|
||||
|
||||
pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
||||
|
||||
|
@ -1881,7 +1919,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|||
tx_dsg->lkey = isert_conn->conn_mr->lkey;
|
||||
isert_cmd->tx_desc.num_sge = 2;
|
||||
|
||||
isert_init_send_wr(isert_cmd, send_wr);
|
||||
isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
|
||||
|
||||
pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
||||
|
||||
|
@ -1921,7 +1959,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|||
tx_dsg->lkey = isert_conn->conn_mr->lkey;
|
||||
isert_cmd->tx_desc.num_sge = 2;
|
||||
}
|
||||
isert_init_send_wr(isert_cmd, send_wr);
|
||||
isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
|
||||
|
||||
pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
|
||||
|
||||
|
@ -1991,8 +2029,6 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
|
||||
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
|
||||
data_left = se_cmd->data_length;
|
||||
iscsit_increment_maxcmdsn(cmd, conn->sess);
|
||||
cmd->stat_sn = conn->stat_sn++;
|
||||
} else {
|
||||
sg_off = cmd->write_data_done / PAGE_SIZE;
|
||||
data_left = se_cmd->data_length - cmd->write_data_done;
|
||||
|
@ -2204,8 +2240,6 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
|
||||
if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
|
||||
data_left = se_cmd->data_length;
|
||||
iscsit_increment_maxcmdsn(cmd, conn->sess);
|
||||
cmd->stat_sn = conn->stat_sn++;
|
||||
} else {
|
||||
sg_off = cmd->write_data_done / PAGE_SIZE;
|
||||
data_left = se_cmd->data_length - cmd->write_data_done;
|
||||
|
@ -2259,18 +2293,26 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
data_len = min(data_left, rdma_write_max);
|
||||
wr->cur_rdma_length = data_len;
|
||||
|
||||
spin_lock_irqsave(&isert_conn->conn_lock, flags);
|
||||
fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
|
||||
struct fast_reg_descriptor, list);
|
||||
list_del(&fr_desc->list);
|
||||
spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
|
||||
wr->fr_desc = fr_desc;
|
||||
/* if there is a single dma entry, dma mr is sufficient */
|
||||
if (count == 1) {
|
||||
ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
|
||||
ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
|
||||
ib_sge->lkey = isert_conn->conn_mr->lkey;
|
||||
wr->fr_desc = NULL;
|
||||
} else {
|
||||
spin_lock_irqsave(&isert_conn->conn_lock, flags);
|
||||
fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
|
||||
struct fast_reg_descriptor, list);
|
||||
list_del(&fr_desc->list);
|
||||
spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
|
||||
wr->fr_desc = fr_desc;
|
||||
|
||||
ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
|
||||
ib_sge, offset, data_len);
|
||||
if (ret) {
|
||||
list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
|
||||
goto unmap_sg;
|
||||
ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
|
||||
ib_sge, offset, data_len);
|
||||
if (ret) {
|
||||
list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
|
||||
goto unmap_sg;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -2306,10 +2348,11 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|||
* Build isert_conn->tx_desc for iSCSI response PDU and attach
|
||||
*/
|
||||
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
|
||||
iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
|
||||
iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
|
||||
&isert_cmd->tx_desc.iscsi_header);
|
||||
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
|
||||
isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
|
||||
isert_init_send_wr(isert_conn, isert_cmd,
|
||||
&isert_cmd->tx_desc.send_wr, true);
|
||||
|
||||
atomic_inc(&isert_conn->post_send_buf_count);
|
||||
|
||||
|
|
|
@ -43,6 +43,8 @@ struct iser_tx_desc {
|
|||
struct ib_sge tx_sg[2];
|
||||
int num_sge;
|
||||
struct isert_cmd *isert_cmd;
|
||||
struct llist_node *comp_llnode_batch;
|
||||
struct llist_node comp_llnode;
|
||||
struct ib_send_wr send_wr;
|
||||
} __packed;
|
||||
|
||||
|
@ -121,6 +123,10 @@ struct isert_conn {
|
|||
int conn_frwr_pool_size;
|
||||
/* lock to protect frwr_pool */
|
||||
spinlock_t conn_lock;
|
||||
#define ISERT_COMP_BATCH_COUNT 8
|
||||
int conn_comp_batch;
|
||||
struct llist_head conn_comp_llist;
|
||||
struct mutex conn_comp_mutex;
|
||||
};
|
||||
|
||||
#define ISERT_MAX_CQ 64
|
||||
|
|
|
@ -1352,11 +1352,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
|
|||
|
||||
/* XXX(hch): this is a horrible layering violation.. */
|
||||
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
|
||||
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
|
||||
ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
|
||||
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
|
||||
|
||||
complete(&ioctx->cmd.transport_lun_stop_comp);
|
||||
break;
|
||||
case SRPT_STATE_CMD_RSP_SENT:
|
||||
/*
|
||||
|
@ -1364,9 +1361,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
|
|||
* not been received in time.
|
||||
*/
|
||||
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
|
||||
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
|
||||
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
|
||||
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
|
||||
target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
|
||||
break;
|
||||
case SRPT_STATE_MGMT_RSP_SENT:
|
||||
|
@ -1476,7 +1470,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
|
|||
{
|
||||
struct se_cmd *cmd;
|
||||
enum srpt_command_state state;
|
||||
unsigned long flags;
|
||||
|
||||
cmd = &ioctx->cmd;
|
||||
state = srpt_get_cmd_state(ioctx);
|
||||
|
@ -1496,9 +1489,6 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
|
|||
__func__, __LINE__, state);
|
||||
break;
|
||||
case SRPT_RDMA_WRITE_LAST:
|
||||
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
|
||||
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
|
||||
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
|
||||
|
|
|
@ -330,7 +330,7 @@ static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
|
|||
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
|
||||
struct tcm_qla2xxx_tpg, se_tpg);
|
||||
|
||||
return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
|
||||
return tpg->tpg_attrib.generate_node_acls;
|
||||
}
|
||||
|
||||
static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
|
||||
|
@ -338,7 +338,7 @@ static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
|
|||
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
|
||||
struct tcm_qla2xxx_tpg, se_tpg);
|
||||
|
||||
return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
|
||||
return tpg->tpg_attrib.cache_dynamic_acls;
|
||||
}
|
||||
|
||||
static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
|
||||
|
@ -346,7 +346,7 @@ static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
|
|||
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
|
||||
struct tcm_qla2xxx_tpg, se_tpg);
|
||||
|
||||
return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
|
||||
return tpg->tpg_attrib.demo_mode_write_protect;
|
||||
}
|
||||
|
||||
static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
|
||||
|
@ -354,7 +354,7 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
|
|||
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
|
||||
struct tcm_qla2xxx_tpg, se_tpg);
|
||||
|
||||
return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
|
||||
return tpg->tpg_attrib.prod_mode_write_protect;
|
||||
}
|
||||
|
||||
static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
|
||||
|
@ -362,7 +362,7 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg
|
|||
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
|
||||
struct tcm_qla2xxx_tpg, se_tpg);
|
||||
|
||||
return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only;
|
||||
return tpg->tpg_attrib.demo_mode_login_only;
|
||||
}
|
||||
|
||||
static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
|
||||
|
@ -847,7 +847,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
|
|||
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
|
||||
struct tcm_qla2xxx_tpg, se_tpg); \
|
||||
\
|
||||
return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
|
||||
return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
|
||||
} \
|
||||
\
|
||||
static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
|
||||
|
@ -1027,10 +1027,10 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
|
|||
* By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
|
||||
* NodeACLs
|
||||
*/
|
||||
QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
|
||||
QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
|
||||
QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
|
||||
QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1;
|
||||
tpg->tpg_attrib.generate_node_acls = 1;
|
||||
tpg->tpg_attrib.demo_mode_write_protect = 1;
|
||||
tpg->tpg_attrib.cache_dynamic_acls = 1;
|
||||
tpg->tpg_attrib.demo_mode_login_only = 1;
|
||||
|
||||
ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
|
||||
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
|
||||
|
@ -1830,16 +1830,16 @@ static int tcm_qla2xxx_register_configfs(void)
|
|||
/*
|
||||
* Setup default attribute lists for various fabric->tf_cit_tmpl
|
||||
*/
|
||||
TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
|
||||
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs =
|
||||
tcm_qla2xxx_tpg_attrib_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
/*
|
||||
* Register the fabric for use within TCM
|
||||
*/
|
||||
|
@ -1870,15 +1870,15 @@ static int tcm_qla2xxx_register_configfs(void)
|
|||
/*
|
||||
* Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
|
||||
*/
|
||||
TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
|
||||
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
/*
|
||||
* Register the npiv_fabric for use within TCM
|
||||
*/
|
||||
|
|
|
@ -45,8 +45,6 @@ struct tcm_qla2xxx_tpg {
|
|||
struct se_portal_group se_tpg;
|
||||
};
|
||||
|
||||
#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
|
||||
|
||||
struct tcm_qla2xxx_fc_loopid {
|
||||
struct se_node_acl *se_nacl;
|
||||
};
|
||||
|
|
|
@ -805,14 +805,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
int iscsi_task_attr;
|
||||
int sam_task_attr;
|
||||
|
||||
spin_lock_bh(&conn->sess->session_stats_lock);
|
||||
conn->sess->cmd_pdus++;
|
||||
if (conn->sess->se_sess->se_node_acl) {
|
||||
spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
|
||||
conn->sess->se_sess->se_node_acl->num_cmds++;
|
||||
spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
|
||||
}
|
||||
spin_unlock_bh(&conn->sess->session_stats_lock);
|
||||
atomic_long_inc(&conn->sess->cmd_pdus);
|
||||
|
||||
hdr = (struct iscsi_scsi_req *) buf;
|
||||
payload_length = ntoh24(hdr->dlength);
|
||||
|
@ -1254,20 +1247,12 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
|
|||
int rc;
|
||||
|
||||
if (!payload_length) {
|
||||
pr_err("DataOUT payload is ZERO, protocol error.\n");
|
||||
return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
|
||||
buf);
|
||||
pr_warn("DataOUT payload is ZERO, ignoring.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* iSCSI write */
|
||||
spin_lock_bh(&conn->sess->session_stats_lock);
|
||||
conn->sess->rx_data_octets += payload_length;
|
||||
if (conn->sess->se_sess->se_node_acl) {
|
||||
spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
|
||||
conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
|
||||
spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
|
||||
}
|
||||
spin_unlock_bh(&conn->sess->session_stats_lock);
|
||||
atomic_long_add(payload_length, &conn->sess->rx_data_octets);
|
||||
|
||||
if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
|
||||
pr_err("DataSegmentLength: %u is greater than"
|
||||
|
@ -1486,7 +1471,7 @@ EXPORT_SYMBOL(iscsit_check_dataout_payload);
|
|||
|
||||
static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
|
||||
{
|
||||
struct iscsi_cmd *cmd;
|
||||
struct iscsi_cmd *cmd = NULL;
|
||||
struct iscsi_data *hdr = (struct iscsi_data *)buf;
|
||||
int rc;
|
||||
bool data_crc_failed = false;
|
||||
|
@ -1954,6 +1939,13 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
(unsigned char *)hdr);
|
||||
}
|
||||
|
||||
if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
|
||||
(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
|
||||
pr_err("Multi sequence text commands currently not supported\n");
|
||||
return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
|
||||
(unsigned char *)hdr);
|
||||
}
|
||||
|
||||
pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
|
||||
" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
|
||||
hdr->exp_statsn, payload_length);
|
||||
|
@ -2630,14 +2622,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
|
|||
return -1;
|
||||
}
|
||||
|
||||
spin_lock_bh(&conn->sess->session_stats_lock);
|
||||
conn->sess->tx_data_octets += datain.length;
|
||||
if (conn->sess->se_sess->se_node_acl) {
|
||||
spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
|
||||
conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
|
||||
spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
|
||||
}
|
||||
spin_unlock_bh(&conn->sess->session_stats_lock);
|
||||
atomic_long_add(datain.length, &conn->sess->tx_data_octets);
|
||||
/*
|
||||
* Special case for successfully execution w/ both DATAIN
|
||||
* and Sense Data.
|
||||
|
@ -3162,9 +3147,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
|
|||
if (inc_stat_sn)
|
||||
cmd->stat_sn = conn->stat_sn++;
|
||||
|
||||
spin_lock_bh(&conn->sess->session_stats_lock);
|
||||
conn->sess->rsp_pdus++;
|
||||
spin_unlock_bh(&conn->sess->session_stats_lock);
|
||||
atomic_long_inc(&conn->sess->rsp_pdus);
|
||||
|
||||
memset(hdr, 0, ISCSI_HDR_LEN);
|
||||
hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
|
||||
|
@ -3374,6 +3357,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
|
|||
struct iscsi_tiqn *tiqn;
|
||||
struct iscsi_tpg_np *tpg_np;
|
||||
int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
|
||||
int target_name_printed;
|
||||
unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
|
||||
unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
|
||||
|
||||
|
@ -3411,19 +3395,23 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
|
|||
continue;
|
||||
}
|
||||
|
||||
len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
|
||||
len += 1;
|
||||
|
||||
if ((len + payload_len) > buffer_len) {
|
||||
end_of_buf = 1;
|
||||
goto eob;
|
||||
}
|
||||
memcpy(payload + payload_len, buf, len);
|
||||
payload_len += len;
|
||||
target_name_printed = 0;
|
||||
|
||||
spin_lock(&tiqn->tiqn_tpg_lock);
|
||||
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
|
||||
|
||||
/* If demo_mode_discovery=0 and generate_node_acls=0
|
||||
* (demo mode dislabed) do not return
|
||||
* TargetName+TargetAddress unless a NodeACL exists.
|
||||
*/
|
||||
|
||||
if ((tpg->tpg_attrib.generate_node_acls == 0) &&
|
||||
(tpg->tpg_attrib.demo_mode_discovery == 0) &&
|
||||
(!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg,
|
||||
cmd->conn->sess->sess_ops->InitiatorName))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
spin_lock(&tpg->tpg_state_lock);
|
||||
if ((tpg->tpg_state == TPG_STATE_FREE) ||
|
||||
(tpg->tpg_state == TPG_STATE_INACTIVE)) {
|
||||
|
@ -3438,6 +3426,22 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
|
|||
struct iscsi_np *np = tpg_np->tpg_np;
|
||||
bool inaddr_any = iscsit_check_inaddr_any(np);
|
||||
|
||||
if (!target_name_printed) {
|
||||
len = sprintf(buf, "TargetName=%s",
|
||||
tiqn->tiqn);
|
||||
len += 1;
|
||||
|
||||
if ((len + payload_len) > buffer_len) {
|
||||
spin_unlock(&tpg->tpg_np_lock);
|
||||
spin_unlock(&tiqn->tiqn_tpg_lock);
|
||||
end_of_buf = 1;
|
||||
goto eob;
|
||||
}
|
||||
memcpy(payload + payload_len, buf, len);
|
||||
payload_len += len;
|
||||
target_name_printed = 1;
|
||||
}
|
||||
|
||||
len = sprintf(buf, "TargetAddress="
|
||||
"%s:%hu,%hu",
|
||||
(inaddr_any == false) ?
|
||||
|
@ -4092,9 +4096,7 @@ int iscsi_target_rx_thread(void *arg)
|
|||
* hit default in the switch below.
|
||||
*/
|
||||
memset(buffer, 0xff, ISCSI_HDR_LEN);
|
||||
spin_lock_bh(&conn->sess->session_stats_lock);
|
||||
conn->sess->conn_digest_errors++;
|
||||
spin_unlock_bh(&conn->sess->session_stats_lock);
|
||||
atomic_long_inc(&conn->sess->conn_digest_errors);
|
||||
} else {
|
||||
pr_debug("Got HeaderDigest CRC32C"
|
||||
" 0x%08x\n", checksum);
|
||||
|
@ -4381,7 +4383,7 @@ int iscsit_close_connection(
|
|||
|
||||
int iscsit_close_session(struct iscsi_session *sess)
|
||||
{
|
||||
struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
|
||||
struct iscsi_portal_group *tpg = sess->tpg;
|
||||
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
|
||||
|
||||
if (atomic_read(&sess->nconn)) {
|
||||
|
|
|
@ -111,7 +111,7 @@ static struct iscsi_chap *chap_server_open(
|
|||
/*
|
||||
* Set Identifier.
|
||||
*/
|
||||
chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
|
||||
chap->id = conn->tpg->tpg_chap_id++;
|
||||
*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
|
||||
*aic_len += 1;
|
||||
pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
|
||||
|
@ -146,6 +146,7 @@ static int chap_server_compute_md5(
|
|||
unsigned char client_digest[MD5_SIGNATURE_SIZE];
|
||||
unsigned char server_digest[MD5_SIGNATURE_SIZE];
|
||||
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
|
||||
size_t compare_len;
|
||||
struct iscsi_chap *chap = conn->auth_protocol;
|
||||
struct crypto_hash *tfm;
|
||||
struct hash_desc desc;
|
||||
|
@ -184,7 +185,9 @@ static int chap_server_compute_md5(
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
|
||||
/* Include the terminating NULL in the compare */
|
||||
compare_len = strlen(auth->userid) + 1;
|
||||
if (strncmp(chap_n, auth->userid, compare_len) != 0) {
|
||||
pr_err("CHAP_N values do not match!\n");
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -372,7 +372,7 @@ static ssize_t iscsi_nacl_attrib_show_##name( \
|
|||
struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
|
||||
se_node_acl); \
|
||||
\
|
||||
return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
|
||||
return sprintf(page, "%u\n", nacl->node_attrib.name); \
|
||||
} \
|
||||
\
|
||||
static ssize_t iscsi_nacl_attrib_store_##name( \
|
||||
|
@ -897,7 +897,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
|
|||
if (!se_nacl_new)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
|
||||
cmdsn_depth = tpg->tpg_attrib.default_cmdsn_depth;
|
||||
/*
|
||||
* se_nacl_new may be released by core_tpg_add_initiator_node_acl()
|
||||
* when converting a NdoeACL from demo mode -> explict
|
||||
|
@ -920,9 +920,9 @@ static struct se_node_acl *lio_target_make_nodeacl(
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
|
||||
stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;
|
||||
stats_cg->default_groups[1] = NULL;
|
||||
config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
|
||||
config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
|
||||
"iscsi_sess_stats", &iscsi_stat_sess_cit);
|
||||
|
||||
return se_nacl;
|
||||
|
@ -967,7 +967,7 @@ static ssize_t iscsi_tpg_attrib_show_##name( \
|
|||
if (iscsit_get_tpg(tpg) < 0) \
|
||||
return -EINVAL; \
|
||||
\
|
||||
rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
|
||||
rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \
|
||||
iscsit_put_tpg(tpg); \
|
||||
return rb; \
|
||||
} \
|
||||
|
@ -1041,6 +1041,16 @@ TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
|
|||
*/
|
||||
DEF_TPG_ATTRIB(prod_mode_write_protect);
|
||||
TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
|
||||
/*
|
||||
* Define iscsi_tpg_attrib_s_demo_mode_discovery,
|
||||
*/
|
||||
DEF_TPG_ATTRIB(demo_mode_discovery);
|
||||
TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
|
||||
/*
|
||||
* Define iscsi_tpg_attrib_s_default_erl
|
||||
*/
|
||||
DEF_TPG_ATTRIB(default_erl);
|
||||
TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
|
||||
|
||||
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
|
||||
&iscsi_tpg_attrib_authentication.attr,
|
||||
|
@ -1051,6 +1061,8 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
|
|||
&iscsi_tpg_attrib_cache_dynamic_acls.attr,
|
||||
&iscsi_tpg_attrib_demo_mode_write_protect.attr,
|
||||
&iscsi_tpg_attrib_prod_mode_write_protect.attr,
|
||||
&iscsi_tpg_attrib_demo_mode_discovery.attr,
|
||||
&iscsi_tpg_attrib_default_erl.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -1514,21 +1526,21 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
|
||||
stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
|
||||
stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
|
||||
stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
|
||||
stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
|
||||
stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group;
|
||||
stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group;
|
||||
stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group;
|
||||
stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group;
|
||||
stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group;
|
||||
stats_cg->default_groups[5] = NULL;
|
||||
config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
|
||||
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
|
||||
"iscsi_instance", &iscsi_stat_instance_cit);
|
||||
config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
|
||||
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
|
||||
"iscsi_sess_err", &iscsi_stat_sess_err_cit);
|
||||
config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
|
||||
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
|
||||
"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
|
||||
config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
|
||||
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
|
||||
"iscsi_login_stats", &iscsi_stat_login_cit);
|
||||
config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
|
||||
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
|
||||
"iscsi_logout_stats", &iscsi_stat_logout_cit);
|
||||
|
||||
pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
|
||||
|
@ -1784,6 +1796,11 @@ static int lio_queue_status(struct se_cmd *se_cmd)
|
|||
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
|
||||
|
||||
cmd->i_state = ISTATE_SEND_STATUS;
|
||||
|
||||
if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
|
||||
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
|
||||
return 0;
|
||||
}
|
||||
cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
|
||||
|
||||
return 0;
|
||||
|
@ -1815,21 +1832,21 @@ static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
|
|||
{
|
||||
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
|
||||
|
||||
return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
|
||||
return tpg->tpg_attrib.default_cmdsn_depth;
|
||||
}
|
||||
|
||||
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
|
||||
{
|
||||
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
|
||||
|
||||
return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
|
||||
return tpg->tpg_attrib.generate_node_acls;
|
||||
}
|
||||
|
||||
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
|
||||
{
|
||||
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
|
||||
|
||||
return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
|
||||
return tpg->tpg_attrib.cache_dynamic_acls;
|
||||
}
|
||||
|
||||
static int lio_tpg_check_demo_mode_write_protect(
|
||||
|
@ -1837,7 +1854,7 @@ static int lio_tpg_check_demo_mode_write_protect(
|
|||
{
|
||||
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
|
||||
|
||||
return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
|
||||
return tpg->tpg_attrib.demo_mode_write_protect;
|
||||
}
|
||||
|
||||
static int lio_tpg_check_prod_mode_write_protect(
|
||||
|
@ -1845,7 +1862,7 @@ static int lio_tpg_check_prod_mode_write_protect(
|
|||
{
|
||||
struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
|
||||
|
||||
return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
|
||||
return tpg->tpg_attrib.prod_mode_write_protect;
|
||||
}
|
||||
|
||||
static void lio_tpg_release_fabric_acl(
|
||||
|
@ -1908,9 +1925,12 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
|
|||
{
|
||||
struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
|
||||
se_node_acl);
|
||||
struct se_portal_group *se_tpg = se_acl->se_tpg;
|
||||
struct iscsi_portal_group *tpg = container_of(se_tpg,
|
||||
struct iscsi_portal_group, tpg_se_tpg);
|
||||
|
||||
ISCSI_NODE_ATTRIB(acl)->nacl = acl;
|
||||
iscsit_set_default_node_attribues(acl);
|
||||
acl->node_attrib.nacl = acl;
|
||||
iscsit_set_default_node_attribues(acl, tpg);
|
||||
}
|
||||
|
||||
static int lio_check_stop_free(struct se_cmd *se_cmd)
|
||||
|
@ -1995,17 +2015,17 @@ int iscsi_target_register_configfs(void)
|
|||
* Setup default attribute lists for various fabric->tf_cit_tmpl
|
||||
* sturct config_item_type's
|
||||
*/
|
||||
TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
|
||||
|
||||
ret = target_fabric_configfs_register(fabric);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -37,9 +37,6 @@
|
|||
#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
|
||||
#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
|
||||
#define NA_RANDOM_R2T_OFFSETS 0
|
||||
#define NA_DEFAULT_ERL 0
|
||||
#define NA_DEFAULT_ERL_MAX 2
|
||||
#define NA_DEFAULT_ERL_MIN 0
|
||||
|
||||
/* struct iscsi_tpg_attrib sanity values */
|
||||
#define TA_AUTHENTICATION 1
|
||||
|
@ -58,6 +55,8 @@
|
|||
#define TA_DEMO_MODE_WRITE_PROTECT 1
|
||||
/* Disabled by default in production mode w/ explict ACLs */
|
||||
#define TA_PROD_MODE_WRITE_PROTECT 0
|
||||
#define TA_DEMO_MODE_DISCOVERY 1
|
||||
#define TA_DEFAULT_ERL 0
|
||||
#define TA_CACHE_CORE_NPS 0
|
||||
|
||||
|
||||
|
@ -192,6 +191,7 @@ enum recover_cmdsn_ret_table {
|
|||
CMDSN_NORMAL_OPERATION = 0,
|
||||
CMDSN_LOWER_THAN_EXP = 1,
|
||||
CMDSN_HIGHER_THAN_EXP = 2,
|
||||
CMDSN_MAXCMDSN_OVERRUN = 3,
|
||||
};
|
||||
|
||||
/* Used for iscsi_handle_immediate_data() return values */
|
||||
|
@ -650,14 +650,13 @@ struct iscsi_session {
|
|||
/* Used for session reference counting */
|
||||
int session_usage_count;
|
||||
int session_waiting_on_uc;
|
||||
u32 cmd_pdus;
|
||||
u32 rsp_pdus;
|
||||
u64 tx_data_octets;
|
||||
u64 rx_data_octets;
|
||||
u32 conn_digest_errors;
|
||||
u32 conn_timeout_errors;
|
||||
atomic_long_t cmd_pdus;
|
||||
atomic_long_t rsp_pdus;
|
||||
atomic_long_t tx_data_octets;
|
||||
atomic_long_t rx_data_octets;
|
||||
atomic_long_t conn_digest_errors;
|
||||
atomic_long_t conn_timeout_errors;
|
||||
u64 creation_time;
|
||||
spinlock_t session_stats_lock;
|
||||
/* Number of active connections */
|
||||
atomic_t nconn;
|
||||
atomic_t session_continuation;
|
||||
|
@ -755,11 +754,6 @@ struct iscsi_node_acl {
|
|||
struct se_node_acl se_node_acl;
|
||||
};
|
||||
|
||||
#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
|
||||
|
||||
#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
|
||||
#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
|
||||
|
||||
struct iscsi_tpg_attrib {
|
||||
u32 authentication;
|
||||
u32 login_timeout;
|
||||
|
@ -769,6 +763,8 @@ struct iscsi_tpg_attrib {
|
|||
u32 default_cmdsn_depth;
|
||||
u32 demo_mode_write_protect;
|
||||
u32 prod_mode_write_protect;
|
||||
u32 demo_mode_discovery;
|
||||
u32 default_erl;
|
||||
struct iscsi_portal_group *tpg;
|
||||
};
|
||||
|
||||
|
@ -835,12 +831,6 @@ struct iscsi_portal_group {
|
|||
struct list_head tpg_list;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
|
||||
#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
|
||||
#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
|
||||
#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
|
||||
#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
|
||||
|
||||
struct iscsi_wwn_stat_grps {
|
||||
struct config_group iscsi_stat_group;
|
||||
struct config_group iscsi_instance_group;
|
||||
|
@ -871,8 +861,6 @@ struct iscsi_tiqn {
|
|||
struct iscsi_logout_stats logout_stats;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
|
||||
|
||||
struct iscsit_global {
|
||||
/* In core shutdown */
|
||||
u32 in_shutdown;
|
||||
|
|
|
@ -58,11 +58,7 @@ void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess
|
|||
|
||||
cmd->maxcmdsn_inc = 1;
|
||||
|
||||
if (!mutex_trylock(&sess->cmdsn_mutex)) {
|
||||
sess->max_cmd_sn += 1;
|
||||
pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&sess->cmdsn_mutex);
|
||||
sess->max_cmd_sn += 1;
|
||||
pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
|
||||
mutex_unlock(&sess->cmdsn_mutex);
|
||||
|
|
|
@ -757,7 +757,7 @@ int iscsit_check_post_dataout(
|
|||
static void iscsit_handle_time2retain_timeout(unsigned long data)
|
||||
{
|
||||
struct iscsi_session *sess = (struct iscsi_session *) data;
|
||||
struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
|
||||
struct iscsi_portal_group *tpg = sess->tpg;
|
||||
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
|
||||
|
||||
spin_lock_bh(&se_tpg->session_lock);
|
||||
|
@ -785,7 +785,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
|
|||
tiqn->sess_err_stats.last_sess_failure_type =
|
||||
ISCSI_SESS_ERR_CXN_TIMEOUT;
|
||||
tiqn->sess_err_stats.cxn_timeout_errors++;
|
||||
sess->conn_timeout_errors++;
|
||||
atomic_long_inc(&sess->conn_timeout_errors);
|
||||
spin_unlock(&tiqn->sess_err_stats.lock);
|
||||
}
|
||||
}
|
||||
|
@ -801,9 +801,9 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
|
|||
* Only start Time2Retain timer when the associated TPG is still in
|
||||
* an ACTIVE (eg: not disabled or shutdown) state.
|
||||
*/
|
||||
spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
|
||||
tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
|
||||
spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
|
||||
spin_lock(&sess->tpg->tpg_state_lock);
|
||||
tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE);
|
||||
spin_unlock(&sess->tpg->tpg_state_lock);
|
||||
|
||||
if (!tpg_active)
|
||||
return;
|
||||
|
@ -829,7 +829,7 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
|
|||
*/
|
||||
int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
|
||||
{
|
||||
struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
|
||||
struct iscsi_portal_group *tpg = sess->tpg;
|
||||
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
|
||||
|
||||
if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
|
||||
|
|
|
@ -305,7 +305,6 @@ static int iscsi_login_zero_tsih_s1(
|
|||
}
|
||||
|
||||
sess->creation_time = get_jiffies_64();
|
||||
spin_lock_init(&sess->session_stats_lock);
|
||||
/*
|
||||
* The FFP CmdSN window values will be allocated from the TPG's
|
||||
* Initiator Node's ACL once the login has been successfully completed.
|
||||
|
@ -347,15 +346,15 @@ static int iscsi_login_zero_tsih_s2(
|
|||
* Assign a new TPG Session Handle. Note this is protected with
|
||||
* struct iscsi_portal_group->np_login_sem from iscsit_access_np().
|
||||
*/
|
||||
sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
|
||||
sess->tsih = ++sess->tpg->ntsih;
|
||||
if (!sess->tsih)
|
||||
sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
|
||||
sess->tsih = ++sess->tpg->ntsih;
|
||||
|
||||
/*
|
||||
* Create the default params from user defined values..
|
||||
*/
|
||||
if (iscsi_copy_param_list(&conn->param_list,
|
||||
ISCSI_TPG_C(conn)->param_list, 1) < 0) {
|
||||
conn->tpg->param_list, 1) < 0) {
|
||||
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
|
||||
ISCSI_LOGIN_STATUS_NO_RESOURCES);
|
||||
return -1;
|
||||
|
@ -380,7 +379,7 @@ static int iscsi_login_zero_tsih_s2(
|
|||
* In our case, we have already located the struct iscsi_tiqn at this point.
|
||||
*/
|
||||
memset(buf, 0, 32);
|
||||
sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
|
||||
sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
|
||||
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
|
||||
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
|
||||
ISCSI_LOGIN_STATUS_NO_RESOURCES);
|
||||
|
@ -575,7 +574,7 @@ static int iscsi_login_non_zero_tsih_s2(
|
|||
iscsi_login_set_conn_values(sess, conn, pdu->cid);
|
||||
|
||||
if (iscsi_copy_param_list(&conn->param_list,
|
||||
ISCSI_TPG_C(conn)->param_list, 0) < 0) {
|
||||
conn->tpg->param_list, 0) < 0) {
|
||||
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
|
||||
ISCSI_LOGIN_STATUS_NO_RESOURCES);
|
||||
return -1;
|
||||
|
@ -593,7 +592,7 @@ static int iscsi_login_non_zero_tsih_s2(
|
|||
* In our case, we have already located the struct iscsi_tiqn at this point.
|
||||
*/
|
||||
memset(buf, 0, 32);
|
||||
sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
|
||||
sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
|
||||
if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
|
||||
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
|
||||
ISCSI_LOGIN_STATUS_NO_RESOURCES);
|
||||
|
@ -691,7 +690,7 @@ int iscsi_post_login_handler(
|
|||
int stop_timer = 0;
|
||||
struct iscsi_session *sess = conn->sess;
|
||||
struct se_session *se_sess = sess->se_sess;
|
||||
struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
|
||||
struct iscsi_portal_group *tpg = sess->tpg;
|
||||
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
|
||||
struct iscsi_thread_set *ts;
|
||||
|
||||
|
@ -1154,7 +1153,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
|
|||
spin_lock_bh(&conn->sess->conn_lock);
|
||||
if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
|
||||
struct se_portal_group *se_tpg =
|
||||
&ISCSI_TPG_C(conn)->tpg_se_tpg;
|
||||
&conn->tpg->tpg_se_tpg;
|
||||
|
||||
atomic_set(&conn->sess->session_continuation, 0);
|
||||
spin_unlock_bh(&conn->sess->conn_lock);
|
||||
|
|
|
@ -88,7 +88,7 @@ int extract_param(
|
|||
if (len < 0)
|
||||
return -1;
|
||||
|
||||
if (len > max_length) {
|
||||
if (len >= max_length) {
|
||||
pr_err("Length of input: %d exceeds max_length:"
|
||||
" %d\n", len, max_length);
|
||||
return -1;
|
||||
|
@ -140,7 +140,7 @@ static u32 iscsi_handle_authentication(
|
|||
iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
|
||||
se_node_acl);
|
||||
|
||||
auth = ISCSI_NODE_AUTH(iscsi_nacl);
|
||||
auth = &iscsi_nacl->node_auth;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
|
@ -789,7 +789,7 @@ static int iscsi_target_handle_csg_zero(
|
|||
return -1;
|
||||
|
||||
if (!iscsi_check_negotiated_keys(conn->param_list)) {
|
||||
if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
|
||||
if (conn->tpg->tpg_attrib.authentication &&
|
||||
!strncmp(param->value, NONE, 4)) {
|
||||
pr_err("Initiator sent AuthMethod=None but"
|
||||
" Target is enforcing iSCSI Authentication,"
|
||||
|
@ -799,7 +799,7 @@ static int iscsi_target_handle_csg_zero(
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
|
||||
if (conn->tpg->tpg_attrib.authentication &&
|
||||
!login->auth_complete)
|
||||
return 0;
|
||||
|
||||
|
@ -862,7 +862,7 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
|
|||
}
|
||||
|
||||
if (!login->auth_complete &&
|
||||
ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
|
||||
conn->tpg->tpg_attrib.authentication) {
|
||||
pr_err("Initiator is requesting CSG: 1, has not been"
|
||||
" successfully authenticated, and the Target is"
|
||||
" enforcing iSCSI Authentication, login failed.\n");
|
||||
|
|
|
@ -33,7 +33,8 @@ static inline char *iscsit_na_get_initiatorname(
|
|||
}
|
||||
|
||||
void iscsit_set_default_node_attribues(
|
||||
struct iscsi_node_acl *acl)
|
||||
struct iscsi_node_acl *acl,
|
||||
struct iscsi_portal_group *tpg)
|
||||
{
|
||||
struct iscsi_node_attrib *a = &acl->node_attrib;
|
||||
|
||||
|
@ -44,7 +45,7 @@ void iscsit_set_default_node_attribues(
|
|||
a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
|
||||
a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
|
||||
a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
|
||||
a->default_erl = NA_DEFAULT_ERL;
|
||||
a->default_erl = tpg->tpg_attrib.default_erl;
|
||||
}
|
||||
|
||||
int iscsit_na_dataout_timeout(
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
#ifndef ISCSI_TARGET_NODEATTRIB_H
|
||||
#define ISCSI_TARGET_NODEATTRIB_H
|
||||
|
||||
extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
|
||||
extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
|
||||
struct iscsi_portal_group *);
|
||||
extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
|
||||
extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
|
||||
extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
|
||||
|
|
|
@ -792,7 +792,8 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
|
|||
if (se_sess) {
|
||||
sess = se_sess->fabric_sess_ptr;
|
||||
if (sess)
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
|
||||
ret = snprintf(page, PAGE_SIZE, "%lu\n",
|
||||
atomic_long_read(&sess->cmd_pdus));
|
||||
}
|
||||
spin_unlock_bh(&se_nacl->nacl_sess_lock);
|
||||
|
||||
|
@ -815,7 +816,8 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
|
|||
if (se_sess) {
|
||||
sess = se_sess->fabric_sess_ptr;
|
||||
if (sess)
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
|
||||
ret = snprintf(page, PAGE_SIZE, "%lu\n",
|
||||
atomic_long_read(&sess->rsp_pdus));
|
||||
}
|
||||
spin_unlock_bh(&se_nacl->nacl_sess_lock);
|
||||
|
||||
|
@ -838,8 +840,8 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
|
|||
if (se_sess) {
|
||||
sess = se_sess->fabric_sess_ptr;
|
||||
if (sess)
|
||||
ret = snprintf(page, PAGE_SIZE, "%llu\n",
|
||||
(unsigned long long)sess->tx_data_octets);
|
||||
ret = snprintf(page, PAGE_SIZE, "%lu\n",
|
||||
atomic_long_read(&sess->tx_data_octets));
|
||||
}
|
||||
spin_unlock_bh(&se_nacl->nacl_sess_lock);
|
||||
|
||||
|
@ -862,8 +864,8 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
|
|||
if (se_sess) {
|
||||
sess = se_sess->fabric_sess_ptr;
|
||||
if (sess)
|
||||
ret = snprintf(page, PAGE_SIZE, "%llu\n",
|
||||
(unsigned long long)sess->rx_data_octets);
|
||||
ret = snprintf(page, PAGE_SIZE, "%lu\n",
|
||||
atomic_long_read(&sess->rx_data_octets));
|
||||
}
|
||||
spin_unlock_bh(&se_nacl->nacl_sess_lock);
|
||||
|
||||
|
@ -886,8 +888,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
|
|||
if (se_sess) {
|
||||
sess = se_sess->fabric_sess_ptr;
|
||||
if (sess)
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n",
|
||||
sess->conn_digest_errors);
|
||||
ret = snprintf(page, PAGE_SIZE, "%lu\n",
|
||||
atomic_long_read(&sess->conn_digest_errors));
|
||||
}
|
||||
spin_unlock_bh(&se_nacl->nacl_sess_lock);
|
||||
|
||||
|
@ -910,8 +912,8 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
|
|||
if (se_sess) {
|
||||
sess = se_sess->fabric_sess_ptr;
|
||||
if (sess)
|
||||
ret = snprintf(page, PAGE_SIZE, "%u\n",
|
||||
sess->conn_timeout_errors);
|
||||
ret = snprintf(page, PAGE_SIZE, "%lu\n",
|
||||
atomic_long_read(&sess->conn_timeout_errors));
|
||||
}
|
||||
spin_unlock_bh(&se_nacl->nacl_sess_lock);
|
||||
|
||||
|
|
|
@ -223,6 +223,8 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
|
|||
a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
|
||||
a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
|
||||
a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
|
||||
a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
|
||||
a->default_erl = TA_DEFAULT_ERL;
|
||||
}
|
||||
|
||||
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
|
||||
|
@ -237,7 +239,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro
|
|||
if (iscsi_create_default_params(&tpg->param_list) < 0)
|
||||
goto err_out;
|
||||
|
||||
ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
|
||||
tpg->tpg_attrib.tpg = tpg;
|
||||
|
||||
spin_lock(&tpg->tpg_state_lock);
|
||||
tpg->tpg_state = TPG_STATE_INACTIVE;
|
||||
|
@ -330,7 +332,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
|
||||
if (tpg->tpg_attrib.authentication) {
|
||||
if (!strcmp(param->value, NONE)) {
|
||||
ret = iscsi_update_param_value(param, CHAP);
|
||||
if (ret)
|
||||
|
@ -820,3 +822,39 @@ int iscsit_ta_prod_mode_write_protect(
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iscsit_ta_demo_mode_discovery(
|
||||
struct iscsi_portal_group *tpg,
|
||||
u32 flag)
|
||||
{
|
||||
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
|
||||
|
||||
if ((flag != 0) && (flag != 1)) {
|
||||
pr_err("Illegal value %d\n", flag);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
a->demo_mode_discovery = flag;
|
||||
pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:"
|
||||
" %s\n", tpg->tpgt, (a->demo_mode_discovery) ?
|
||||
"ON" : "OFF");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iscsit_ta_default_erl(
|
||||
struct iscsi_portal_group *tpg,
|
||||
u32 default_erl)
|
||||
{
|
||||
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
|
||||
|
||||
if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) {
|
||||
pr_err("Illegal value for default_erl: %u\n", default_erl);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
a->default_erl = default_erl;
|
||||
pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -37,5 +37,7 @@ extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
|
|||
extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
|
||||
extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
|
||||
extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
|
||||
extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
|
||||
extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
|
||||
|
||||
#endif /* ISCSI_TARGET_TPG_H */
|
||||
|
|
|
@ -242,9 +242,9 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
|
|||
*/
|
||||
if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
|
||||
pr_err("Received CmdSN: 0x%08x is greater than"
|
||||
" MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
|
||||
" MaxCmdSN: 0x%08x, ignoring.\n", cmdsn,
|
||||
sess->max_cmd_sn);
|
||||
ret = CMDSN_ERROR_CANNOT_RECOVER;
|
||||
ret = CMDSN_MAXCMDSN_OVERRUN;
|
||||
|
||||
} else if (cmdsn == sess->exp_cmd_sn) {
|
||||
sess->exp_cmd_sn++;
|
||||
|
@ -303,14 +303,16 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
ret = CMDSN_HIGHER_THAN_EXP;
|
||||
break;
|
||||
case CMDSN_LOWER_THAN_EXP:
|
||||
case CMDSN_MAXCMDSN_OVERRUN:
|
||||
default:
|
||||
cmd->i_state = ISTATE_REMOVE;
|
||||
iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
|
||||
ret = cmdsn_ret;
|
||||
break;
|
||||
default:
|
||||
reason = ISCSI_REASON_PROTOCOL_ERROR;
|
||||
reject = true;
|
||||
ret = cmdsn_ret;
|
||||
/*
|
||||
* Existing callers for iscsit_sequence_cmd() will silently
|
||||
* ignore commands with CMDSN_LOWER_THAN_EXP, so force this
|
||||
* return for CMDSN_MAXCMDSN_OVERRUN as well..
|
||||
*/
|
||||
ret = CMDSN_LOWER_THAN_EXP;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&conn->sess->cmdsn_mutex);
|
||||
|
@ -980,7 +982,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
|
|||
tiqn->sess_err_stats.last_sess_failure_type =
|
||||
ISCSI_SESS_ERR_CXN_TIMEOUT;
|
||||
tiqn->sess_err_stats.cxn_timeout_errors++;
|
||||
conn->sess->conn_timeout_errors++;
|
||||
atomic_long_inc(&conn->sess->conn_timeout_errors);
|
||||
spin_unlock_bh(&tiqn->sess_err_stats.lock);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -135,6 +135,21 @@ static int tcm_loop_change_queue_depth(
|
|||
return sdev->queue_depth;
|
||||
}
|
||||
|
||||
static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
|
||||
{
|
||||
if (sdev->tagged_supported) {
|
||||
scsi_set_tag_type(sdev, tag);
|
||||
|
||||
if (tag)
|
||||
scsi_activate_tcq(sdev, sdev->queue_depth);
|
||||
else
|
||||
scsi_deactivate_tcq(sdev, sdev->queue_depth);
|
||||
} else
|
||||
tag = 0;
|
||||
|
||||
return tag;
|
||||
}
|
||||
|
||||
/*
|
||||
* Locate the SAM Task Attr from struct scsi_cmnd *
|
||||
*/
|
||||
|
@ -178,7 +193,10 @@ static void tcm_loop_submission_work(struct work_struct *work)
|
|||
set_host_byte(sc, DID_NO_CONNECT);
|
||||
goto out_done;
|
||||
}
|
||||
|
||||
if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
|
||||
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
|
||||
goto out_done;
|
||||
}
|
||||
tl_nexus = tl_hba->tl_nexus;
|
||||
if (!tl_nexus) {
|
||||
scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
|
||||
|
@ -233,6 +251,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
|
|||
}
|
||||
|
||||
tl_cmd->sc = sc;
|
||||
tl_cmd->sc_cmd_tag = sc->tag;
|
||||
INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
|
||||
queue_work(tcm_loop_workqueue, &tl_cmd->work);
|
||||
return 0;
|
||||
|
@ -242,17 +261,81 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
|
|||
* Called from SCSI EH process context to issue a LUN_RESET TMR
|
||||
* to struct scsi_device
|
||||
*/
|
||||
static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
||||
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
|
||||
struct tcm_loop_nexus *tl_nexus,
|
||||
int lun, int task, enum tcm_tmreq_table tmr)
|
||||
{
|
||||
struct se_cmd *se_cmd = NULL;
|
||||
struct se_portal_group *se_tpg;
|
||||
struct se_session *se_sess;
|
||||
struct se_portal_group *se_tpg;
|
||||
struct tcm_loop_cmd *tl_cmd = NULL;
|
||||
struct tcm_loop_tmr *tl_tmr = NULL;
|
||||
int ret = TMR_FUNCTION_FAILED, rc;
|
||||
|
||||
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
|
||||
if (!tl_cmd) {
|
||||
pr_err("Unable to allocate memory for tl_cmd\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
|
||||
if (!tl_tmr) {
|
||||
pr_err("Unable to allocate memory for tl_tmr\n");
|
||||
goto release;
|
||||
}
|
||||
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
|
||||
|
||||
se_cmd = &tl_cmd->tl_se_cmd;
|
||||
se_tpg = &tl_tpg->tl_se_tpg;
|
||||
se_sess = tl_nexus->se_sess;
|
||||
/*
|
||||
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
|
||||
*/
|
||||
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
|
||||
DMA_NONE, MSG_SIMPLE_TAG,
|
||||
&tl_cmd->tl_sense_buf[0]);
|
||||
|
||||
rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
|
||||
if (rc < 0)
|
||||
goto release;
|
||||
|
||||
if (tmr == TMR_ABORT_TASK)
|
||||
se_cmd->se_tmr_req->ref_task_tag = task;
|
||||
|
||||
/*
|
||||
* Locate the underlying TCM struct se_lun
|
||||
*/
|
||||
if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
|
||||
ret = TMR_LUN_DOES_NOT_EXIST;
|
||||
goto release;
|
||||
}
|
||||
/*
|
||||
* Queue the TMR to TCM Core and sleep waiting for
|
||||
* tcm_loop_queue_tm_rsp() to wake us up.
|
||||
*/
|
||||
transport_generic_handle_tmr(se_cmd);
|
||||
wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
|
||||
/*
|
||||
* The TMR LUN_RESET has completed, check the response status and
|
||||
* then release allocations.
|
||||
*/
|
||||
ret = se_cmd->se_tmr_req->response;
|
||||
release:
|
||||
if (se_cmd)
|
||||
transport_generic_free_cmd(se_cmd, 1);
|
||||
else
|
||||
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
|
||||
kfree(tl_tmr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct tcm_loop_hba *tl_hba;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
struct tcm_loop_tmr *tl_tmr = NULL;
|
||||
struct tcm_loop_tpg *tl_tpg;
|
||||
int ret = FAILED, rc;
|
||||
int ret = FAILED;
|
||||
|
||||
/*
|
||||
* Locate the tcm_loop_hba_t pointer
|
||||
*/
|
||||
|
@ -266,61 +349,72 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
|||
" active I_T Nexus\n");
|
||||
return FAILED;
|
||||
}
|
||||
se_sess = tl_nexus->se_sess;
|
||||
|
||||
/*
|
||||
* Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
|
||||
* Locate the tl_tpg pointer from TargetID in sc->device->id
|
||||
*/
|
||||
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
|
||||
se_tpg = &tl_tpg->tl_se_tpg;
|
||||
ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
|
||||
sc->tag, TMR_ABORT_TASK);
|
||||
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
|
||||
}
|
||||
|
||||
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
|
||||
if (!tl_cmd) {
|
||||
pr_err("Unable to allocate memory for tl_cmd\n");
|
||||
/*
|
||||
* Called from SCSI EH process context to issue a LUN_RESET TMR
|
||||
* to struct scsi_device
|
||||
*/
|
||||
static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct tcm_loop_hba *tl_hba;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
struct tcm_loop_tpg *tl_tpg;
|
||||
int ret = FAILED;
|
||||
|
||||
/*
|
||||
* Locate the tcm_loop_hba_t pointer
|
||||
*/
|
||||
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
|
||||
/*
|
||||
* Locate the tl_nexus and se_sess pointers
|
||||
*/
|
||||
tl_nexus = tl_hba->tl_nexus;
|
||||
if (!tl_nexus) {
|
||||
pr_err("Unable to perform device reset without"
|
||||
" active I_T Nexus\n");
|
||||
return FAILED;
|
||||
}
|
||||
/*
|
||||
* Locate the tl_tpg pointer from TargetID in sc->device->id
|
||||
*/
|
||||
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
|
||||
ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
|
||||
0, TMR_LUN_RESET);
|
||||
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
|
||||
}
|
||||
|
||||
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
|
||||
if (!tl_tmr) {
|
||||
pr_err("Unable to allocate memory for tl_tmr\n");
|
||||
goto release;
|
||||
static int tcm_loop_target_reset(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct tcm_loop_hba *tl_hba;
|
||||
struct tcm_loop_tpg *tl_tpg;
|
||||
|
||||
/*
|
||||
* Locate the tcm_loop_hba_t pointer
|
||||
*/
|
||||
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
|
||||
if (!tl_hba) {
|
||||
pr_err("Unable to perform device reset without"
|
||||
" active I_T Nexus\n");
|
||||
return FAILED;
|
||||
}
|
||||
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
|
||||
|
||||
se_cmd = &tl_cmd->tl_se_cmd;
|
||||
/*
|
||||
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
|
||||
* Locate the tl_tpg pointer from TargetID in sc->device->id
|
||||
*/
|
||||
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
|
||||
DMA_NONE, MSG_SIMPLE_TAG,
|
||||
&tl_cmd->tl_sense_buf[0]);
|
||||
|
||||
rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL);
|
||||
if (rc < 0)
|
||||
goto release;
|
||||
/*
|
||||
* Locate the underlying TCM struct se_lun from sc->device->lun
|
||||
*/
|
||||
if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
|
||||
goto release;
|
||||
/*
|
||||
* Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
|
||||
* to wake us up.
|
||||
*/
|
||||
transport_generic_handle_tmr(se_cmd);
|
||||
wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
|
||||
/*
|
||||
* The TMR LUN_RESET has completed, check the response status and
|
||||
* then release allocations.
|
||||
*/
|
||||
ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
|
||||
SUCCESS : FAILED;
|
||||
release:
|
||||
if (se_cmd)
|
||||
transport_generic_free_cmd(se_cmd, 1);
|
||||
else
|
||||
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
|
||||
kfree(tl_tmr);
|
||||
return ret;
|
||||
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
|
||||
if (tl_tpg) {
|
||||
tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
|
||||
return SUCCESS;
|
||||
}
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
static int tcm_loop_slave_alloc(struct scsi_device *sd)
|
||||
|
@ -331,6 +425,15 @@ static int tcm_loop_slave_alloc(struct scsi_device *sd)
|
|||
|
||||
static int tcm_loop_slave_configure(struct scsi_device *sd)
|
||||
{
|
||||
if (sd->tagged_supported) {
|
||||
scsi_activate_tcq(sd, sd->queue_depth);
|
||||
scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
|
||||
sd->host->cmd_per_lun);
|
||||
} else {
|
||||
scsi_adjust_queue_depth(sd, 0,
|
||||
sd->host->cmd_per_lun);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -340,7 +443,10 @@ static struct scsi_host_template tcm_loop_driver_template = {
|
|||
.name = "TCM_Loopback",
|
||||
.queuecommand = tcm_loop_queuecommand,
|
||||
.change_queue_depth = tcm_loop_change_queue_depth,
|
||||
.change_queue_type = tcm_loop_change_queue_type,
|
||||
.eh_abort_handler = tcm_loop_abort_task,
|
||||
.eh_device_reset_handler = tcm_loop_device_reset,
|
||||
.eh_target_reset_handler = tcm_loop_target_reset,
|
||||
.can_queue = 1024,
|
||||
.this_id = -1,
|
||||
.sg_tablesize = 256,
|
||||
|
@ -699,7 +805,10 @@ static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
|
|||
|
||||
static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
|
||||
{
|
||||
return 1;
|
||||
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
|
||||
struct tcm_loop_cmd, tl_se_cmd);
|
||||
|
||||
return tl_cmd->sc_cmd_tag;
|
||||
}
|
||||
|
||||
static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
|
||||
|
@ -932,7 +1041,10 @@ static int tcm_loop_drop_nexus(
|
|||
struct tcm_loop_nexus *tl_nexus;
|
||||
struct tcm_loop_hba *tl_hba = tpg->tl_hba;
|
||||
|
||||
tl_nexus = tpg->tl_hba->tl_nexus;
|
||||
if (!tl_hba)
|
||||
return -ENODEV;
|
||||
|
||||
tl_nexus = tl_hba->tl_nexus;
|
||||
if (!tl_nexus)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -1061,8 +1173,56 @@ static ssize_t tcm_loop_tpg_store_nexus(
|
|||
|
||||
TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
|
||||
|
||||
static ssize_t tcm_loop_tpg_show_transport_status(
|
||||
struct se_portal_group *se_tpg,
|
||||
char *page)
|
||||
{
|
||||
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
|
||||
struct tcm_loop_tpg, tl_se_tpg);
|
||||
const char *status = NULL;
|
||||
ssize_t ret = -EINVAL;
|
||||
|
||||
switch (tl_tpg->tl_transport_status) {
|
||||
case TCM_TRANSPORT_ONLINE:
|
||||
status = "online";
|
||||
break;
|
||||
case TCM_TRANSPORT_OFFLINE:
|
||||
status = "offline";
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (status)
|
||||
ret = snprintf(page, PAGE_SIZE, "%s\n", status);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t tcm_loop_tpg_store_transport_status(
|
||||
struct se_portal_group *se_tpg,
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
|
||||
struct tcm_loop_tpg, tl_se_tpg);
|
||||
|
||||
if (!strncmp(page, "online", 6)) {
|
||||
tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
|
||||
return count;
|
||||
}
|
||||
if (!strncmp(page, "offline", 7)) {
|
||||
tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
|
||||
return count;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
|
||||
|
||||
static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
|
||||
&tcm_loop_tpg_nexus.attr,
|
||||
&tcm_loop_tpg_transport_status.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -1334,11 +1494,11 @@ static int tcm_loop_register_configfs(void)
|
|||
/*
|
||||
* Setup default attribute lists for various fabric->tf_cit_tmpl
|
||||
*/
|
||||
TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
/*
|
||||
* Once fabric->tf_ops has been setup, now register the fabric for
|
||||
* use within TCM
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
struct tcm_loop_cmd {
|
||||
/* State of Linux/SCSI CDB+Data descriptor */
|
||||
u32 sc_cmd_state;
|
||||
/* Tagged command queueing */
|
||||
u32 sc_cmd_tag;
|
||||
/* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
|
||||
struct scsi_cmnd *sc;
|
||||
/* The TCM I/O descriptor that is accessed via container_of() */
|
||||
|
@ -40,8 +42,12 @@ struct tcm_loop_nacl {
|
|||
struct se_node_acl se_node_acl;
|
||||
};
|
||||
|
||||
#define TCM_TRANSPORT_ONLINE 0
|
||||
#define TCM_TRANSPORT_OFFLINE 1
|
||||
|
||||
struct tcm_loop_tpg {
|
||||
unsigned short tl_tpgt;
|
||||
unsigned short tl_transport_status;
|
||||
atomic_t tl_tpg_port_count;
|
||||
struct se_portal_group tl_se_tpg;
|
||||
struct tcm_loop_hba *tl_hba;
|
||||
|
|
|
@ -2556,15 +2556,15 @@ static int sbp_register_configfs(void)
|
|||
/*
|
||||
* Setup default attribute lists for various fabric->tf_cit_tmpl
|
||||
*/
|
||||
TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
|
||||
ret = target_fabric_configfs_register(fabric);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
static sense_reason_t core_alua_check_transition(int state, int *primary);
|
||||
static int core_alua_set_tg_pt_secondary_state(
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
|
||||
struct se_port *port, int explict, int offline);
|
||||
struct se_port *port, int explicit, int offline);
|
||||
|
||||
static u16 alua_lu_gps_counter;
|
||||
static u32 alua_lu_gps_count;
|
||||
|
@ -117,12 +117,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
|||
/*
|
||||
* Set supported ASYMMETRIC ACCESS State bits
|
||||
*/
|
||||
buf[off] = 0x80; /* T_SUP */
|
||||
buf[off] |= 0x40; /* O_SUP */
|
||||
buf[off] |= 0x8; /* U_SUP */
|
||||
buf[off] |= 0x4; /* S_SUP */
|
||||
buf[off] |= 0x2; /* AN_SUP */
|
||||
buf[off++] |= 0x1; /* AO_SUP */
|
||||
buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
|
||||
/*
|
||||
* TARGET PORT GROUP
|
||||
*/
|
||||
|
@ -175,7 +170,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
|||
if (ext_hdr != 0) {
|
||||
buf[4] = 0x10;
|
||||
/*
|
||||
* Set the implict transition time (in seconds) for the application
|
||||
* Set the implicit transition time (in seconds) for the application
|
||||
* client to use as a base for it's transition timeout value.
|
||||
*
|
||||
* Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
|
||||
|
@ -188,7 +183,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
|||
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
|
||||
if (tg_pt_gp)
|
||||
buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs;
|
||||
buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
|
||||
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
}
|
||||
}
|
||||
|
@ -199,7 +194,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
|||
}
|
||||
|
||||
/*
|
||||
* SET_TARGET_PORT_GROUPS for explict ALUA operation.
|
||||
* SET_TARGET_PORT_GROUPS for explicit ALUA operation.
|
||||
*
|
||||
* See spc4r17 section 6.35
|
||||
*/
|
||||
|
@ -232,7 +227,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
|||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
/*
|
||||
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
|
||||
* Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
|
||||
* for the local tg_pt_gp.
|
||||
*/
|
||||
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
|
||||
|
@ -251,9 +246,9 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
|||
}
|
||||
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
|
||||
if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) {
|
||||
if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
|
||||
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
|
||||
" while TPGS_EXPLICT_ALUA is disabled\n");
|
||||
" while TPGS_EXPLICIT_ALUA is disabled\n");
|
||||
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
goto out;
|
||||
}
|
||||
|
@ -330,7 +325,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
|||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
} else {
|
||||
/*
|
||||
* Extact the RELATIVE TARGET PORT IDENTIFIER to identify
|
||||
* Extract the RELATIVE TARGET PORT IDENTIFIER to identify
|
||||
* the Target Port in question for the the incoming
|
||||
* SET_TARGET_PORT_GROUPS op.
|
||||
*/
|
||||
|
@ -487,7 +482,7 @@ static inline int core_alua_state_transition(
|
|||
u8 *alua_ascq)
|
||||
{
|
||||
/*
|
||||
* Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
|
||||
* Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
|
||||
* spc4r17 section 5.9.2.5
|
||||
*/
|
||||
switch (cdb[0]) {
|
||||
|
@ -515,9 +510,9 @@ static inline int core_alua_state_transition(
|
|||
}
|
||||
|
||||
/*
|
||||
* return 1: Is used to signal LUN not accecsable, and check condition/not ready
|
||||
* return 1: Is used to signal LUN not accessible, and check condition/not ready
|
||||
* return 0: Used to signal success
|
||||
* reutrn -1: Used to signal failure, and invalid cdb field
|
||||
* return -1: Used to signal failure, and invalid cdb field
|
||||
*/
|
||||
sense_reason_t
|
||||
target_alua_state_check(struct se_cmd *cmd)
|
||||
|
@ -566,12 +561,12 @@ target_alua_state_check(struct se_cmd *cmd)
|
|||
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
|
||||
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
/*
|
||||
* Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
|
||||
* Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
|
||||
* statement so the compiler knows explicitly to check this case first.
|
||||
* For the Optimized ALUA access state case, we want to process the
|
||||
* incoming fabric cmd ASAP..
|
||||
*/
|
||||
if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
|
||||
if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
|
||||
return 0;
|
||||
|
||||
switch (out_alua_state) {
|
||||
|
@ -620,13 +615,13 @@ target_alua_state_check(struct se_cmd *cmd)
|
|||
}
|
||||
|
||||
/*
|
||||
* Check implict and explict ALUA state change request.
|
||||
* Check implicit and explicit ALUA state change request.
|
||||
*/
|
||||
static sense_reason_t
|
||||
core_alua_check_transition(int state, int *primary)
|
||||
{
|
||||
switch (state) {
|
||||
case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
|
||||
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
|
||||
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
|
||||
case ALUA_ACCESS_STATE_STANDBY:
|
||||
case ALUA_ACCESS_STATE_UNAVAILABLE:
|
||||
|
@ -654,7 +649,7 @@ core_alua_check_transition(int state, int *primary)
|
|||
static char *core_alua_dump_state(int state)
|
||||
{
|
||||
switch (state) {
|
||||
case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
|
||||
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
|
||||
return "Active/Optimized";
|
||||
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
|
||||
return "Active/NonOptimized";
|
||||
|
@ -676,10 +671,10 @@ char *core_alua_dump_status(int status)
|
|||
switch (status) {
|
||||
case ALUA_STATUS_NONE:
|
||||
return "None";
|
||||
case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
|
||||
return "Altered by Explict STPG";
|
||||
case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
|
||||
return "Altered by Implict ALUA";
|
||||
case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
|
||||
return "Altered by Explicit STPG";
|
||||
case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
|
||||
return "Altered by Implicit ALUA";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
|
@ -770,7 +765,7 @@ static int core_alua_do_transition_tg_pt(
|
|||
struct se_node_acl *nacl,
|
||||
unsigned char *md_buf,
|
||||
int new_state,
|
||||
int explict)
|
||||
int explicit)
|
||||
{
|
||||
struct se_dev_entry *se_deve;
|
||||
struct se_lun_acl *lacl;
|
||||
|
@ -784,9 +779,9 @@ static int core_alua_do_transition_tg_pt(
|
|||
old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
ALUA_ACCESS_STATE_TRANSITION);
|
||||
tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
|
||||
ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
|
||||
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
|
||||
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
|
||||
/*
|
||||
* Check for the optional ALUA primary state transition delay
|
||||
*/
|
||||
|
@ -802,7 +797,7 @@ static int core_alua_do_transition_tg_pt(
|
|||
* change, a device server shall establish a unit attention
|
||||
* condition for the initiator port associated with every I_T
|
||||
* nexus with the additional sense code set to ASYMMETRIC
|
||||
* ACCESS STATE CHAGED.
|
||||
* ACCESS STATE CHANGED.
|
||||
*
|
||||
* After an explicit target port asymmetric access state
|
||||
* change, a device server shall establish a unit attention
|
||||
|
@ -821,12 +816,12 @@ static int core_alua_do_transition_tg_pt(
|
|||
lacl = se_deve->se_lun_acl;
|
||||
/*
|
||||
* se_deve->se_lun_acl pointer may be NULL for a
|
||||
* entry created without explict Node+MappedLUN ACLs
|
||||
* entry created without explicit Node+MappedLUN ACLs
|
||||
*/
|
||||
if (!lacl)
|
||||
continue;
|
||||
|
||||
if (explict &&
|
||||
if (explicit &&
|
||||
(nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
|
||||
(l_port != NULL) && (l_port == port))
|
||||
continue;
|
||||
|
@ -866,8 +861,8 @@ static int core_alua_do_transition_tg_pt(
|
|||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
|
||||
|
||||
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
|
||||
" from primary access state %s to %s\n", (explict) ? "explict" :
|
||||
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
|
||||
" from primary access state %s to %s\n", (explicit) ? "explicit" :
|
||||
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
|
||||
tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
|
||||
core_alua_dump_state(new_state));
|
||||
|
||||
|
@ -880,7 +875,7 @@ int core_alua_do_port_transition(
|
|||
struct se_port *l_port,
|
||||
struct se_node_acl *l_nacl,
|
||||
int new_state,
|
||||
int explict)
|
||||
int explicit)
|
||||
{
|
||||
struct se_device *dev;
|
||||
struct se_port *port;
|
||||
|
@ -917,7 +912,7 @@ int core_alua_do_port_transition(
|
|||
* success.
|
||||
*/
|
||||
core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
|
||||
md_buf, new_state, explict);
|
||||
md_buf, new_state, explicit);
|
||||
atomic_dec(&lu_gp->lu_gp_ref_cnt);
|
||||
smp_mb__after_atomic_dec();
|
||||
kfree(md_buf);
|
||||
|
@ -946,7 +941,7 @@ int core_alua_do_port_transition(
|
|||
continue;
|
||||
/*
|
||||
* If the target behavior port asymmetric access state
|
||||
* is changed for any target port group accessiable via
|
||||
* is changed for any target port group accessible via
|
||||
* a logical unit within a LU group, the target port
|
||||
* behavior group asymmetric access states for the same
|
||||
* target port group accessible via other logical units
|
||||
|
@ -970,7 +965,7 @@ int core_alua_do_port_transition(
|
|||
* success.
|
||||
*/
|
||||
core_alua_do_transition_tg_pt(tg_pt_gp, port,
|
||||
nacl, md_buf, new_state, explict);
|
||||
nacl, md_buf, new_state, explicit);
|
||||
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
|
@ -987,7 +982,7 @@ int core_alua_do_port_transition(
|
|||
pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
|
||||
" Group IDs: %hu %s transition to primary state: %s\n",
|
||||
config_item_name(&lu_gp->lu_gp_group.cg_item),
|
||||
l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
|
||||
l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit",
|
||||
core_alua_dump_state(new_state));
|
||||
|
||||
atomic_dec(&lu_gp->lu_gp_ref_cnt);
|
||||
|
@ -1034,7 +1029,7 @@ static int core_alua_update_tpg_secondary_metadata(
|
|||
static int core_alua_set_tg_pt_secondary_state(
|
||||
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
|
||||
struct se_port *port,
|
||||
int explict,
|
||||
int explicit,
|
||||
int offline)
|
||||
{
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
||||
|
@ -1061,13 +1056,13 @@ static int core_alua_set_tg_pt_secondary_state(
|
|||
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
|
||||
|
||||
md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
|
||||
port->sep_tg_pt_secondary_stat = (explict) ?
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
|
||||
ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
|
||||
port->sep_tg_pt_secondary_stat = (explicit) ?
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
|
||||
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
|
||||
|
||||
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
|
||||
" to secondary access state: %s\n", (explict) ? "explict" :
|
||||
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
|
||||
" to secondary access state: %s\n", (explicit) ? "explicit" :
|
||||
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
|
||||
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
|
||||
|
||||
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
|
@ -1232,7 +1227,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
|
|||
* struct se_device is released via core_alua_free_lu_gp_mem().
|
||||
*
|
||||
* If the passed lu_gp does NOT match the default_lu_gp, assume
|
||||
* we want to re-assocate a given lu_gp_mem with default_lu_gp.
|
||||
* we want to re-associate a given lu_gp_mem with default_lu_gp.
|
||||
*/
|
||||
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
|
||||
if (lu_gp != default_lu_gp)
|
||||
|
@ -1354,18 +1349,25 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
|
|||
tg_pt_gp->tg_pt_gp_dev = dev;
|
||||
tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
|
||||
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
|
||||
/*
|
||||
* Enable both explict and implict ALUA support by default
|
||||
* Enable both explicit and implicit ALUA support by default
|
||||
*/
|
||||
tg_pt_gp->tg_pt_gp_alua_access_type =
|
||||
TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
|
||||
TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
|
||||
/*
|
||||
* Set the default Active/NonOptimized Delay in milliseconds
|
||||
*/
|
||||
tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
|
||||
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
|
||||
tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
|
||||
tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
|
||||
|
||||
/*
|
||||
* Enable all supported states
|
||||
*/
|
||||
tg_pt_gp->tg_pt_gp_alua_supported_states =
|
||||
ALUA_T_SUP | ALUA_O_SUP |
|
||||
ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
|
||||
|
||||
if (def_group) {
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
@ -1465,7 +1467,7 @@ void core_alua_free_tg_pt_gp(
|
|||
* been called from target_core_alua_drop_tg_pt_gp().
|
||||
*
|
||||
* Here we remove *tg_pt_gp from the global list so that
|
||||
* no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
|
||||
* no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
|
||||
* can be made while we are releasing struct t10_alua_tg_pt_gp.
|
||||
*/
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
@ -1501,7 +1503,7 @@ void core_alua_free_tg_pt_gp(
|
|||
* core_alua_free_tg_pt_gp_mem().
|
||||
*
|
||||
* If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
|
||||
* assume we want to re-assocate a given tg_pt_gp_mem with
|
||||
* assume we want to re-associate a given tg_pt_gp_mem with
|
||||
* default_tg_pt_gp.
|
||||
*/
|
||||
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
||||
|
@ -1740,13 +1742,13 @@ ssize_t core_alua_show_access_type(
|
|||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
char *page)
|
||||
{
|
||||
if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
|
||||
(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
|
||||
return sprintf(page, "Implict and Explict\n");
|
||||
else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
|
||||
return sprintf(page, "Implict\n");
|
||||
else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
|
||||
return sprintf(page, "Explict\n");
|
||||
if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
|
||||
(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
|
||||
return sprintf(page, "Implicit and Explicit\n");
|
||||
else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
|
||||
return sprintf(page, "Implicit\n");
|
||||
else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
|
||||
return sprintf(page, "Explicit\n");
|
||||
else
|
||||
return sprintf(page, "None\n");
|
||||
}
|
||||
|
@ -1771,11 +1773,11 @@ ssize_t core_alua_store_access_type(
|
|||
}
|
||||
if (tmp == 3)
|
||||
tg_pt_gp->tg_pt_gp_alua_access_type =
|
||||
TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
|
||||
TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
|
||||
else if (tmp == 2)
|
||||
tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
|
||||
tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
|
||||
else if (tmp == 1)
|
||||
tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
|
||||
tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
|
||||
else
|
||||
tg_pt_gp->tg_pt_gp_alua_access_type = 0;
|
||||
|
||||
|
@ -1844,14 +1846,14 @@ ssize_t core_alua_store_trans_delay_msecs(
|
|||
return count;
|
||||
}
|
||||
|
||||
ssize_t core_alua_show_implict_trans_secs(
|
||||
ssize_t core_alua_show_implicit_trans_secs(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
char *page)
|
||||
{
|
||||
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs);
|
||||
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
|
||||
}
|
||||
|
||||
ssize_t core_alua_store_implict_trans_secs(
|
||||
ssize_t core_alua_store_implicit_trans_secs(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
const char *page,
|
||||
size_t count)
|
||||
|
@ -1861,16 +1863,16 @@ ssize_t core_alua_store_implict_trans_secs(
|
|||
|
||||
ret = kstrtoul(page, 0, &tmp);
|
||||
if (ret < 0) {
|
||||
pr_err("Unable to extract implict_trans_secs\n");
|
||||
pr_err("Unable to extract implicit_trans_secs\n");
|
||||
return ret;
|
||||
}
|
||||
if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
|
||||
pr_err("Passed implict_trans_secs: %lu, exceeds"
|
||||
" ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp,
|
||||
ALUA_MAX_IMPLICT_TRANS_SECS);
|
||||
if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
|
||||
pr_err("Passed implicit_trans_secs: %lu, exceeds"
|
||||
" ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
|
||||
ALUA_MAX_IMPLICIT_TRANS_SECS);
|
||||
return -EINVAL;
|
||||
}
|
||||
tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp;
|
||||
tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -1970,8 +1972,8 @@ ssize_t core_alua_store_secondary_status(
|
|||
return ret;
|
||||
}
|
||||
if ((tmp != ALUA_STATUS_NONE) &&
|
||||
(tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
|
||||
(tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
|
||||
(tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
|
||||
(tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
|
||||
pr_err("Illegal value for alua_tg_pt_status: %lu\n",
|
||||
tmp);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -7,29 +7,40 @@
|
|||
* from spc4r17 section 6.4.2 Table 135
|
||||
*/
|
||||
#define TPGS_NO_ALUA 0x00
|
||||
#define TPGS_IMPLICT_ALUA 0x10
|
||||
#define TPGS_EXPLICT_ALUA 0x20
|
||||
#define TPGS_IMPLICIT_ALUA 0x10
|
||||
#define TPGS_EXPLICIT_ALUA 0x20
|
||||
|
||||
/*
|
||||
* ASYMMETRIC ACCESS STATE field
|
||||
*
|
||||
* from spc4r17 section 6.27 Table 245
|
||||
*/
|
||||
#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
|
||||
#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
|
||||
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
|
||||
#define ALUA_ACCESS_STATE_STANDBY 0x2
|
||||
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
|
||||
#define ALUA_ACCESS_STATE_OFFLINE 0xe
|
||||
#define ALUA_ACCESS_STATE_TRANSITION 0xf
|
||||
|
||||
/*
|
||||
* from spc4r36j section 6.37 Table 306
|
||||
*/
|
||||
#define ALUA_T_SUP 0x80
|
||||
#define ALUA_O_SUP 0x40
|
||||
#define ALUA_LBD_SUP 0x10
|
||||
#define ALUA_U_SUP 0x08
|
||||
#define ALUA_S_SUP 0x04
|
||||
#define ALUA_AN_SUP 0x02
|
||||
#define ALUA_AO_SUP 0x01
|
||||
|
||||
/*
|
||||
* REPORT_TARGET_PORT_GROUP STATUS CODE
|
||||
*
|
||||
* from spc4r17 section 6.27 Table 246
|
||||
*/
|
||||
#define ALUA_STATUS_NONE 0x00
|
||||
#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
|
||||
#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
|
||||
#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01
|
||||
#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02
|
||||
|
||||
/*
|
||||
* From spc4r17, Table D.1: ASC and ASCQ Assignement
|
||||
|
@ -46,17 +57,17 @@
|
|||
#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
|
||||
#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
|
||||
/*
|
||||
* Used for implict and explict ALUA transitional delay, that is disabled
|
||||
* Used for implicit and explicit ALUA transitional delay, that is disabled
|
||||
* by default, and is intended to be used for debugging client side ALUA code.
|
||||
*/
|
||||
#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
|
||||
#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
|
||||
/*
|
||||
* Used for the recommended application client implict transition timeout
|
||||
* Used for the recommended application client implicit transition timeout
|
||||
* in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
|
||||
*/
|
||||
#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0
|
||||
#define ALUA_MAX_IMPLICT_TRANS_SECS 255
|
||||
#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
|
||||
#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
|
||||
/*
|
||||
* Used by core_alua_update_tpg_primary_metadata() and
|
||||
* core_alua_update_tpg_secondary_metadata()
|
||||
|
@ -113,9 +124,9 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
|
|||
char *);
|
||||
extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
|
||||
const char *, size_t);
|
||||
extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *,
|
||||
extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
|
||||
char *);
|
||||
extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *,
|
||||
extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
|
||||
const char *, size_t);
|
||||
extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
|
||||
char *);
|
||||
|
|
|
@ -177,16 +177,16 @@ static struct config_group *target_core_register_fabric(
|
|||
* struct target_fabric_configfs *tf will contain a usage reference.
|
||||
*/
|
||||
pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
|
||||
&TF_CIT_TMPL(tf)->tfc_wwn_cit);
|
||||
&tf->tf_cit_tmpl.tfc_wwn_cit);
|
||||
|
||||
tf->tf_group.default_groups = tf->tf_default_groups;
|
||||
tf->tf_group.default_groups[0] = &tf->tf_disc_group;
|
||||
tf->tf_group.default_groups[1] = NULL;
|
||||
|
||||
config_group_init_type_name(&tf->tf_group, name,
|
||||
&TF_CIT_TMPL(tf)->tfc_wwn_cit);
|
||||
&tf->tf_cit_tmpl.tfc_wwn_cit);
|
||||
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
|
||||
&TF_CIT_TMPL(tf)->tfc_discovery_cit);
|
||||
&tf->tf_cit_tmpl.tfc_discovery_cit);
|
||||
|
||||
pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
|
||||
" %s\n", tf->tf_group.cg_item.ci_name);
|
||||
|
@ -2036,7 +2036,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
|
|||
int new_state, ret;
|
||||
|
||||
if (!tg_pt_gp->tg_pt_gp_valid_id) {
|
||||
pr_err("Unable to do implict ALUA on non valid"
|
||||
pr_err("Unable to do implicit ALUA on non valid"
|
||||
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -2049,9 +2049,9 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
|
|||
}
|
||||
new_state = (int)tmp;
|
||||
|
||||
if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
|
||||
pr_err("Unable to process implict configfs ALUA"
|
||||
" transition while TPGS_IMPLICT_ALUA is disabled\n");
|
||||
if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
|
||||
pr_err("Unable to process implicit configfs ALUA"
|
||||
" transition while TPGS_IMPLICIT_ALUA is disabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -2097,8 +2097,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
|
|||
new_status = (int)tmp;
|
||||
|
||||
if ((new_status != ALUA_STATUS_NONE) &&
|
||||
(new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
|
||||
(new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
|
||||
(new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
|
||||
(new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
|
||||
pr_err("Illegal ALUA access status: 0x%02x\n",
|
||||
new_status);
|
||||
return -EINVAL;
|
||||
|
@ -2130,6 +2130,90 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
|
|||
|
||||
SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
|
||||
|
||||
/*
|
||||
* alua_supported_states
|
||||
*/
|
||||
|
||||
#define SE_DEV_ALUA_SUPPORT_STATE_SHOW(_name, _var, _bit) \
|
||||
static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_##_name( \
|
||||
struct t10_alua_tg_pt_gp *t, char *p) \
|
||||
{ \
|
||||
return sprintf(p, "%d\n", !!(t->_var & _bit)); \
|
||||
}
|
||||
|
||||
#define SE_DEV_ALUA_SUPPORT_STATE_STORE(_name, _var, _bit) \
|
||||
static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_##_name(\
|
||||
struct t10_alua_tg_pt_gp *t, const char *p, size_t c) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int ret; \
|
||||
\
|
||||
if (!t->tg_pt_gp_valid_id) { \
|
||||
pr_err("Unable to do set ##_name ALUA state on non" \
|
||||
" valid tg_pt_gp ID: %hu\n", \
|
||||
t->tg_pt_gp_valid_id); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
\
|
||||
ret = kstrtoul(p, 0, &tmp); \
|
||||
if (ret < 0) { \
|
||||
pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
if (tmp > 1) { \
|
||||
pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
if (!tmp) \
|
||||
t->_var |= _bit; \
|
||||
else \
|
||||
t->_var &= ~_bit; \
|
||||
\
|
||||
return c; \
|
||||
}
|
||||
|
||||
SE_DEV_ALUA_SUPPORT_STATE_SHOW(transitioning,
|
||||
tg_pt_gp_alua_supported_states, ALUA_T_SUP);
|
||||
SE_DEV_ALUA_SUPPORT_STATE_STORE(transitioning,
|
||||
tg_pt_gp_alua_supported_states, ALUA_T_SUP);
|
||||
SE_DEV_ALUA_TG_PT_ATTR(alua_support_transitioning, S_IRUGO | S_IWUSR);
|
||||
|
||||
SE_DEV_ALUA_SUPPORT_STATE_SHOW(offline,
|
||||
tg_pt_gp_alua_supported_states, ALUA_O_SUP);
|
||||
SE_DEV_ALUA_SUPPORT_STATE_STORE(offline,
|
||||
tg_pt_gp_alua_supported_states, ALUA_O_SUP);
|
||||
SE_DEV_ALUA_TG_PT_ATTR(alua_support_offline, S_IRUGO | S_IWUSR);
|
||||
|
||||
SE_DEV_ALUA_SUPPORT_STATE_SHOW(lba_dependent,
|
||||
tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
|
||||
SE_DEV_ALUA_SUPPORT_STATE_STORE(lba_dependent,
|
||||
tg_pt_gp_alua_supported_states, ALUA_LBD_SUP);
|
||||
SE_DEV_ALUA_TG_PT_ATTR(alua_support_lba_dependent, S_IRUGO | S_IWUSR);
|
||||
|
||||
SE_DEV_ALUA_SUPPORT_STATE_SHOW(unavailable,
|
||||
tg_pt_gp_alua_supported_states, ALUA_U_SUP);
|
||||
SE_DEV_ALUA_SUPPORT_STATE_STORE(unavailable,
|
||||
tg_pt_gp_alua_supported_states, ALUA_U_SUP);
|
||||
SE_DEV_ALUA_TG_PT_ATTR(alua_support_unavailable, S_IRUGO | S_IWUSR);
|
||||
|
||||
SE_DEV_ALUA_SUPPORT_STATE_SHOW(standby,
|
||||
tg_pt_gp_alua_supported_states, ALUA_S_SUP);
|
||||
SE_DEV_ALUA_SUPPORT_STATE_STORE(standby,
|
||||
tg_pt_gp_alua_supported_states, ALUA_S_SUP);
|
||||
SE_DEV_ALUA_TG_PT_ATTR(alua_support_standby, S_IRUGO | S_IWUSR);
|
||||
|
||||
SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_optimized,
|
||||
tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
|
||||
SE_DEV_ALUA_SUPPORT_STATE_STORE(active_optimized,
|
||||
tg_pt_gp_alua_supported_states, ALUA_AO_SUP);
|
||||
SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_optimized, S_IRUGO | S_IWUSR);
|
||||
|
||||
SE_DEV_ALUA_SUPPORT_STATE_SHOW(active_nonoptimized,
|
||||
tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
|
||||
SE_DEV_ALUA_SUPPORT_STATE_STORE(active_nonoptimized,
|
||||
tg_pt_gp_alua_supported_states, ALUA_AN_SUP);
|
||||
SE_DEV_ALUA_TG_PT_ATTR(alua_support_active_nonoptimized, S_IRUGO | S_IWUSR);
|
||||
|
||||
/*
|
||||
* alua_write_metadata
|
||||
*/
|
||||
|
@ -2210,24 +2294,24 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
|
|||
SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
|
||||
|
||||
/*
|
||||
* implict_trans_secs
|
||||
* implicit_trans_secs
|
||||
*/
|
||||
static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs(
|
||||
static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
char *page)
|
||||
{
|
||||
return core_alua_show_implict_trans_secs(tg_pt_gp, page);
|
||||
return core_alua_show_implicit_trans_secs(tg_pt_gp, page);
|
||||
}
|
||||
|
||||
static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs(
|
||||
static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
const char *page,
|
||||
size_t count)
|
||||
{
|
||||
return core_alua_store_implict_trans_secs(tg_pt_gp, page, count);
|
||||
return core_alua_store_implicit_trans_secs(tg_pt_gp, page, count);
|
||||
}
|
||||
|
||||
SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR);
|
||||
SE_DEV_ALUA_TG_PT_ATTR(implicit_trans_secs, S_IRUGO | S_IWUSR);
|
||||
|
||||
/*
|
||||
* preferred
|
||||
|
@ -2350,10 +2434,17 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
|
|||
&target_core_alua_tg_pt_gp_alua_access_state.attr,
|
||||
&target_core_alua_tg_pt_gp_alua_access_status.attr,
|
||||
&target_core_alua_tg_pt_gp_alua_access_type.attr,
|
||||
&target_core_alua_tg_pt_gp_alua_support_transitioning.attr,
|
||||
&target_core_alua_tg_pt_gp_alua_support_offline.attr,
|
||||
&target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr,
|
||||
&target_core_alua_tg_pt_gp_alua_support_unavailable.attr,
|
||||
&target_core_alua_tg_pt_gp_alua_support_standby.attr,
|
||||
&target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr,
|
||||
&target_core_alua_tg_pt_gp_alua_support_active_optimized.attr,
|
||||
&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
|
||||
&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
|
||||
&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
|
||||
&target_core_alua_tg_pt_gp_implict_trans_secs.attr,
|
||||
&target_core_alua_tg_pt_gp_implicit_trans_secs.attr,
|
||||
&target_core_alua_tg_pt_gp_preferred.attr,
|
||||
&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
|
||||
&target_core_alua_tg_pt_gp_members.attr,
|
||||
|
|
|
@ -92,6 +92,9 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
|
|||
se_cmd->pr_res_key = deve->pr_res_key;
|
||||
se_cmd->orig_fe_lun = unpacked_lun;
|
||||
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
|
||||
|
||||
percpu_ref_get(&se_lun->lun_ref);
|
||||
se_cmd->lun_ref_active = true;
|
||||
}
|
||||
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
|
||||
|
||||
|
@ -119,24 +122,20 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
|
|||
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
|
||||
se_cmd->orig_fe_lun = 0;
|
||||
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
|
||||
|
||||
percpu_ref_get(&se_lun->lun_ref);
|
||||
se_cmd->lun_ref_active = true;
|
||||
}
|
||||
|
||||
/* Directly associate cmd with se_dev */
|
||||
se_cmd->se_dev = se_lun->lun_se_dev;
|
||||
|
||||
/* TODO: get rid of this and use atomics for stats */
|
||||
dev = se_lun->lun_se_dev;
|
||||
spin_lock_irqsave(&dev->stats_lock, flags);
|
||||
dev->num_cmds++;
|
||||
atomic_long_inc(&dev->num_cmds);
|
||||
if (se_cmd->data_direction == DMA_TO_DEVICE)
|
||||
dev->write_bytes += se_cmd->data_length;
|
||||
atomic_long_add(se_cmd->data_length, &dev->write_bytes);
|
||||
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
|
||||
dev->read_bytes += se_cmd->data_length;
|
||||
spin_unlock_irqrestore(&dev->stats_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
|
||||
list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
|
||||
spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
|
||||
atomic_long_add(se_cmd->data_length, &dev->read_bytes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -314,14 +313,14 @@ int core_enable_device_list_for_node(
|
|||
deve = nacl->device_list[mapped_lun];
|
||||
|
||||
/*
|
||||
* Check if the call is handling demo mode -> explict LUN ACL
|
||||
* Check if the call is handling demo mode -> explicit LUN ACL
|
||||
* transition. This transition must be for the same struct se_lun
|
||||
* + mapped_lun that was setup in demo mode..
|
||||
*/
|
||||
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
|
||||
if (deve->se_lun_acl != NULL) {
|
||||
pr_err("struct se_dev_entry->se_lun_acl"
|
||||
" already set for demo mode -> explict"
|
||||
" already set for demo mode -> explicit"
|
||||
" LUN ACL transition\n");
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
return -EINVAL;
|
||||
|
@ -329,7 +328,7 @@ int core_enable_device_list_for_node(
|
|||
if (deve->se_lun != lun) {
|
||||
pr_err("struct se_dev_entry->se_lun does"
|
||||
" match passed struct se_lun for demo mode"
|
||||
" -> explict LUN ACL transition\n");
|
||||
" -> explicit LUN ACL transition\n");
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1407,6 +1406,7 @@ static void scsi_dump_inquiry(struct se_device *dev)
|
|||
struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
||||
{
|
||||
struct se_device *dev;
|
||||
struct se_lun *xcopy_lun;
|
||||
|
||||
dev = hba->transport->alloc_device(hba, name);
|
||||
if (!dev)
|
||||
|
@ -1423,7 +1423,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|||
INIT_LIST_HEAD(&dev->state_list);
|
||||
INIT_LIST_HEAD(&dev->qf_cmd_list);
|
||||
INIT_LIST_HEAD(&dev->g_dev_node);
|
||||
spin_lock_init(&dev->stats_lock);
|
||||
spin_lock_init(&dev->execute_task_lock);
|
||||
spin_lock_init(&dev->delayed_cmd_lock);
|
||||
spin_lock_init(&dev->dev_reservation_lock);
|
||||
|
@ -1469,6 +1468,14 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
|||
dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
|
||||
dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
|
||||
|
||||
xcopy_lun = &dev->xcopy_lun;
|
||||
xcopy_lun->lun_se_dev = dev;
|
||||
init_completion(&xcopy_lun->lun_shutdown_comp);
|
||||
INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
|
||||
spin_lock_init(&xcopy_lun->lun_acl_lock);
|
||||
spin_lock_init(&xcopy_lun->lun_sep_lock);
|
||||
init_completion(&xcopy_lun->lun_ref_comp);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
|
|
|
@ -385,9 +385,9 @@ static struct config_group *target_fabric_make_mappedlun(
|
|||
}
|
||||
|
||||
config_group_init_type_name(&lacl->se_lun_group, name,
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit);
|
||||
config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
|
||||
"statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit);
|
||||
"statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit);
|
||||
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
|
||||
lacl_cg->default_groups[1] = NULL;
|
||||
|
||||
|
@ -504,16 +504,16 @@ static struct config_group *target_fabric_make_nodeacl(
|
|||
nacl_cg->default_groups[4] = NULL;
|
||||
|
||||
config_group_init_type_name(&se_nacl->acl_group, name,
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit);
|
||||
config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit);
|
||||
config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit);
|
||||
config_group_init_type_name(&se_nacl->acl_param_group, "param",
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit);
|
||||
config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
|
||||
"fabric_statistics",
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit);
|
||||
|
||||
return &se_nacl->acl_group;
|
||||
}
|
||||
|
@ -595,7 +595,7 @@ static struct config_group *target_fabric_make_np(
|
|||
|
||||
se_tpg_np->tpg_np_parent = se_tpg;
|
||||
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_np_base_cit);
|
||||
|
||||
return &se_tpg_np->tpg_np_group;
|
||||
}
|
||||
|
@ -899,9 +899,9 @@ static struct config_group *target_fabric_make_lun(
|
|||
}
|
||||
|
||||
config_group_init_type_name(&lun->lun_group, name,
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_port_cit);
|
||||
config_group_init_type_name(&lun->port_stat_grps.stat_group,
|
||||
"statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit);
|
||||
"statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit);
|
||||
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
|
||||
lun_cg->default_groups[1] = NULL;
|
||||
|
||||
|
@ -1056,19 +1056,19 @@ static struct config_group *target_fabric_make_tpg(
|
|||
se_tpg->tpg_group.default_groups[6] = NULL;
|
||||
|
||||
config_group_init_type_name(&se_tpg->tpg_group, name,
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_base_cit);
|
||||
config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_lun_cit);
|
||||
config_group_init_type_name(&se_tpg->tpg_np_group, "np",
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_np_cit);
|
||||
config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_nacl_cit);
|
||||
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_attrib_cit);
|
||||
config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_auth_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_auth_cit);
|
||||
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_param_cit);
|
||||
|
||||
return &se_tpg->tpg_group;
|
||||
}
|
||||
|
@ -1155,9 +1155,9 @@ static struct config_group *target_fabric_make_wwn(
|
|||
wwn->wwn_group.default_groups[1] = NULL;
|
||||
|
||||
config_group_init_type_name(&wwn->wwn_group, name,
|
||||
&TF_CIT_TMPL(tf)->tfc_tpg_cit);
|
||||
&tf->tf_cit_tmpl.tfc_tpg_cit);
|
||||
config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
|
||||
&TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit);
|
||||
&tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit);
|
||||
|
||||
return &wwn->wwn_group;
|
||||
}
|
||||
|
|
|
@ -562,7 +562,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||
} else {
|
||||
ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
|
||||
/*
|
||||
* Perform implict vfs_fsync_range() for fd_do_writev() ops
|
||||
* Perform implicit vfs_fsync_range() for fd_do_writev() ops
|
||||
* for SCSI WRITEs with Forced Unit Access (FUA) set.
|
||||
* Allow this to happen independent of WCE=0 setting.
|
||||
*/
|
||||
|
|
|
@ -710,6 +710,45 @@ static sector_t iblock_get_blocks(struct se_device *dev)
|
|||
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
|
||||
}
|
||||
|
||||
static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
|
||||
{
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bd = ib_dev->ibd_bd;
|
||||
int ret;
|
||||
|
||||
ret = bdev_alignment_offset(bd);
|
||||
if (ret == -1)
|
||||
return 0;
|
||||
|
||||
/* convert offset-bytes to offset-lbas */
|
||||
return ret / bdev_logical_block_size(bd);
|
||||
}
|
||||
|
||||
static unsigned int iblock_get_lbppbe(struct se_device *dev)
|
||||
{
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bd = ib_dev->ibd_bd;
|
||||
int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
|
||||
|
||||
return ilog2(logs_per_phys);
|
||||
}
|
||||
|
||||
static unsigned int iblock_get_io_min(struct se_device *dev)
|
||||
{
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bd = ib_dev->ibd_bd;
|
||||
|
||||
return bdev_io_min(bd);
|
||||
}
|
||||
|
||||
static unsigned int iblock_get_io_opt(struct se_device *dev)
|
||||
{
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bd = ib_dev->ibd_bd;
|
||||
|
||||
return bdev_io_opt(bd);
|
||||
}
|
||||
|
||||
static struct sbc_ops iblock_sbc_ops = {
|
||||
.execute_rw = iblock_execute_rw,
|
||||
.execute_sync_cache = iblock_execute_sync_cache,
|
||||
|
@ -749,6 +788,10 @@ static struct se_subsystem_api iblock_template = {
|
|||
.show_configfs_dev_params = iblock_show_configfs_dev_params,
|
||||
.get_device_type = sbc_get_device_type,
|
||||
.get_blocks = iblock_get_blocks,
|
||||
.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
|
||||
.get_lbppbe = iblock_get_lbppbe,
|
||||
.get_io_min = iblock_get_io_min,
|
||||
.get_io_opt = iblock_get_io_opt,
|
||||
.get_write_cache = iblock_get_write_cache,
|
||||
};
|
||||
|
||||
|
|
|
@ -75,8 +75,6 @@ extern struct se_device *g_lun0_dev;
|
|||
|
||||
struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
|
||||
const char *);
|
||||
struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
|
||||
unsigned char *);
|
||||
void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
|
||||
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
|
||||
struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
|
||||
|
@ -102,7 +100,7 @@ int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
|
|||
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
|
||||
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
|
||||
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
|
||||
int transport_clear_lun_from_sessions(struct se_lun *);
|
||||
int transport_clear_lun_ref(struct se_lun *);
|
||||
void transport_send_task_abort(struct se_cmd *);
|
||||
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
|
||||
void target_qf_do_work(struct work_struct *work);
|
||||
|
|
|
@ -474,7 +474,7 @@ static int core_scsi3_pr_seq_non_holder(
|
|||
* statement.
|
||||
*/
|
||||
if (!ret && !other_cdb) {
|
||||
pr_debug("Allowing explict CDB: 0x%02x for %s"
|
||||
pr_debug("Allowing explicit CDB: 0x%02x for %s"
|
||||
" reservation holder\n", cdb[0],
|
||||
core_scsi3_pr_dump_type(pr_reg_type));
|
||||
|
||||
|
@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder(
|
|||
*/
|
||||
|
||||
if (!registered_nexus) {
|
||||
pr_debug("Allowing implict CDB: 0x%02x"
|
||||
pr_debug("Allowing implicit CDB: 0x%02x"
|
||||
" for %s reservation on unregistered"
|
||||
" nexus\n", cdb[0],
|
||||
core_scsi3_pr_dump_type(pr_reg_type));
|
||||
|
@ -522,7 +522,7 @@ static int core_scsi3_pr_seq_non_holder(
|
|||
* allow commands from registered nexuses.
|
||||
*/
|
||||
|
||||
pr_debug("Allowing implict CDB: 0x%02x for %s"
|
||||
pr_debug("Allowing implicit CDB: 0x%02x for %s"
|
||||
" reservation\n", cdb[0],
|
||||
core_scsi3_pr_dump_type(pr_reg_type));
|
||||
|
||||
|
@ -683,7 +683,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
|
|||
alua_port_list) {
|
||||
/*
|
||||
* This pointer will be NULL for demo mode MappedLUNs
|
||||
* that have not been make explict via a ConfigFS
|
||||
* that have not been make explicit via a ConfigFS
|
||||
* MappedLUN group for the SCSI Initiator Node ACL.
|
||||
*/
|
||||
if (!deve_tmp->se_lun_acl)
|
||||
|
@ -1158,7 +1158,7 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
|
|||
smp_mb__after_atomic_dec();
|
||||
}
|
||||
|
||||
static int core_scsi3_check_implict_release(
|
||||
static int core_scsi3_check_implicit_release(
|
||||
struct se_device *dev,
|
||||
struct t10_pr_registration *pr_reg)
|
||||
{
|
||||
|
@ -1174,7 +1174,7 @@ static int core_scsi3_check_implict_release(
|
|||
}
|
||||
if (pr_res_holder == pr_reg) {
|
||||
/*
|
||||
* Perform an implict RELEASE if the registration that
|
||||
* Perform an implicit RELEASE if the registration that
|
||||
* is being released is holding the reservation.
|
||||
*
|
||||
* From spc4r17, section 5.7.11.1:
|
||||
|
@ -1192,7 +1192,7 @@ static int core_scsi3_check_implict_release(
|
|||
* For 'All Registrants' reservation types, all existing
|
||||
* registrations are still processed as reservation holders
|
||||
* in core_scsi3_pr_seq_non_holder() after the initial
|
||||
* reservation holder is implictly released here.
|
||||
* reservation holder is implicitly released here.
|
||||
*/
|
||||
} else if (pr_reg->pr_reg_all_tg_pt &&
|
||||
(!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
|
||||
|
@ -2125,7 +2125,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
|
|||
/*
|
||||
* sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
|
||||
*/
|
||||
pr_holder = core_scsi3_check_implict_release(
|
||||
pr_holder = core_scsi3_check_implicit_release(
|
||||
cmd->se_dev, pr_reg);
|
||||
if (pr_holder < 0) {
|
||||
ret = TCM_RESERVATION_CONFLICT;
|
||||
|
@ -2402,7 +2402,7 @@ static void __core_scsi3_complete_pro_release(
|
|||
struct se_device *dev,
|
||||
struct se_node_acl *se_nacl,
|
||||
struct t10_pr_registration *pr_reg,
|
||||
int explict)
|
||||
int explicit)
|
||||
{
|
||||
struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
|
||||
char i_buf[PR_REG_ISID_ID_LEN];
|
||||
|
@ -2416,7 +2416,7 @@ static void __core_scsi3_complete_pro_release(
|
|||
|
||||
pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
|
||||
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
|
||||
tfo->get_fabric_name(), (explict) ? "explict" : "implict",
|
||||
tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
|
||||
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
|
||||
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
|
||||
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
|
||||
|
@ -2692,7 +2692,7 @@ static void __core_scsi3_complete_pro_preempt(
|
|||
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
|
||||
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
|
||||
/*
|
||||
* Do an implict RELEASE of the existing reservation.
|
||||
* Do an implicit RELEASE of the existing reservation.
|
||||
*/
|
||||
if (dev->dev_pr_res_holder)
|
||||
__core_scsi3_complete_pro_release(dev, nacl,
|
||||
|
@ -2845,7 +2845,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
|
|||
* 5.7.11.4 Preempting, Table 52 and Figure 7.
|
||||
*
|
||||
* For a ZERO SA Reservation key, release
|
||||
* all other registrations and do an implict
|
||||
* all other registrations and do an implicit
|
||||
* release of active persistent reservation.
|
||||
*
|
||||
* For a non-ZERO SA Reservation key, only
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/parser.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <scsi/scsi.h>
|
||||
|
|
|
@ -105,12 +105,22 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
|
|||
buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
|
||||
buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
|
||||
buf[11] = dev->dev_attrib.block_size & 0xff;
|
||||
|
||||
if (dev->transport->get_lbppbe)
|
||||
buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
|
||||
|
||||
if (dev->transport->get_alignment_offset_lbas) {
|
||||
u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
|
||||
buf[14] = (lalba >> 8) & 0x3f;
|
||||
buf[15] = lalba & 0xff;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set Thin Provisioning Enable bit following sbc3r22 in section
|
||||
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
|
||||
*/
|
||||
if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
|
||||
buf[14] = 0x80;
|
||||
buf[14] |= 0x80;
|
||||
|
||||
rbuf = transport_kmap_data_sg(cmd);
|
||||
if (rbuf) {
|
||||
|
|
|
@ -48,7 +48,7 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
|
|||
buf[5] = 0x80;
|
||||
|
||||
/*
|
||||
* Set TPGS field for explict and/or implict ALUA access type
|
||||
* Set TPGS field for explicit and/or implicit ALUA access type
|
||||
* and opteration.
|
||||
*
|
||||
* See spc4r17 section 6.4.2 Table 135
|
||||
|
@ -452,6 +452,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
|||
struct se_device *dev = cmd->se_dev;
|
||||
u32 max_sectors;
|
||||
int have_tp = 0;
|
||||
int opt, min;
|
||||
|
||||
/*
|
||||
* Following spc3r22 section 6.5.3 Block Limits VPD page, when
|
||||
|
@ -475,7 +476,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
|||
/*
|
||||
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
|
||||
*/
|
||||
put_unaligned_be16(1, &buf[6]);
|
||||
if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
|
||||
put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
|
||||
else
|
||||
put_unaligned_be16(1, &buf[6]);
|
||||
|
||||
/*
|
||||
* Set MAXIMUM TRANSFER LENGTH
|
||||
|
@ -487,7 +491,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
|
|||
/*
|
||||
* Set OPTIMAL TRANSFER LENGTH
|
||||
*/
|
||||
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
|
||||
if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
|
||||
put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
|
||||
else
|
||||
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
|
||||
|
||||
/*
|
||||
* Exit now if we don't support TP.
|
||||
|
@ -1250,7 +1257,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
|||
*size = (cdb[3] << 8) + cdb[4];
|
||||
|
||||
/*
|
||||
* Do implict HEAD_OF_QUEUE processing for INQUIRY.
|
||||
* Do implicit HEAD_OF_QUEUE processing for INQUIRY.
|
||||
* See spc4r17 section 5.3
|
||||
*/
|
||||
cmd->sam_task_attr = MSG_HEAD_TAG;
|
||||
|
@ -1284,7 +1291,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
|||
cmd->execute_cmd = spc_emulate_report_luns;
|
||||
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
|
||||
/*
|
||||
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
|
||||
* Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
|
||||
* See spc4r17 section 5.3
|
||||
*/
|
||||
cmd->sam_task_attr = MSG_HEAD_TAG;
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <linux/utsname.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/configfs.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
|
@ -214,7 +213,8 @@ static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
|
|||
struct se_device *dev =
|
||||
container_of(sgrps, struct se_device, dev_stat_grps);
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
|
||||
return snprintf(page, PAGE_SIZE, "%lu\n",
|
||||
atomic_long_read(&dev->num_resets));
|
||||
}
|
||||
DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
|
||||
|
||||
|
@ -397,8 +397,8 @@ static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
|
|||
container_of(sgrps, struct se_device, dev_stat_grps);
|
||||
|
||||
/* scsiLuNumCommands */
|
||||
return snprintf(page, PAGE_SIZE, "%llu\n",
|
||||
(unsigned long long)dev->num_cmds);
|
||||
return snprintf(page, PAGE_SIZE, "%lu\n",
|
||||
atomic_long_read(&dev->num_cmds));
|
||||
}
|
||||
DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
|
||||
|
||||
|
@ -409,7 +409,8 @@ static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
|
|||
container_of(sgrps, struct se_device, dev_stat_grps);
|
||||
|
||||
/* scsiLuReadMegaBytes */
|
||||
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
|
||||
return snprintf(page, PAGE_SIZE, "%lu\n",
|
||||
atomic_long_read(&dev->read_bytes) >> 20);
|
||||
}
|
||||
DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
|
||||
|
||||
|
@ -420,7 +421,8 @@ static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
|
|||
container_of(sgrps, struct se_device, dev_stat_grps);
|
||||
|
||||
/* scsiLuWrittenMegaBytes */
|
||||
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
|
||||
return snprintf(page, PAGE_SIZE, "%lu\n",
|
||||
atomic_long_read(&dev->write_bytes) >> 20);
|
||||
}
|
||||
DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
|
||||
|
||||
|
@ -431,7 +433,7 @@ static ssize_t target_stat_scsi_lu_show_attr_resets(
|
|||
container_of(sgrps, struct se_device, dev_stat_grps);
|
||||
|
||||
/* scsiLuInResets */
|
||||
return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
|
||||
return snprintf(page, PAGE_SIZE, "%lu\n", atomic_long_read(&dev->num_resets));
|
||||
}
|
||||
DEV_STAT_SCSI_LU_ATTR_RO(resets);
|
||||
|
||||
|
|
|
@ -386,9 +386,7 @@ int core_tmr_lun_reset(
|
|||
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
|
||||
}
|
||||
|
||||
spin_lock_irq(&dev->stats_lock);
|
||||
dev->num_resets++;
|
||||
spin_unlock_irq(&dev->stats_lock);
|
||||
atomic_long_inc(&dev->num_resets);
|
||||
|
||||
pr_debug("LUN_RESET: %s for [%s] Complete\n",
|
||||
(preempt_and_abort_list) ? "Preempt" : "TMR",
|
||||
|
|
|
@ -116,6 +116,7 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
|
|||
|
||||
return acl;
|
||||
}
|
||||
EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
|
||||
|
||||
/* core_tpg_add_node_to_devs():
|
||||
*
|
||||
|
@ -633,6 +634,13 @@ int core_tpg_set_initiator_node_tag(
|
|||
}
|
||||
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
|
||||
|
||||
static void core_tpg_lun_ref_release(struct percpu_ref *ref)
|
||||
{
|
||||
struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
|
||||
|
||||
complete(&lun->lun_ref_comp);
|
||||
}
|
||||
|
||||
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
|
||||
{
|
||||
/* Set in core_dev_setup_virtual_lun0() */
|
||||
|
@ -646,15 +654,20 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
|
|||
atomic_set(&lun->lun_acl_count, 0);
|
||||
init_completion(&lun->lun_shutdown_comp);
|
||||
INIT_LIST_HEAD(&lun->lun_acl_list);
|
||||
INIT_LIST_HEAD(&lun->lun_cmd_list);
|
||||
spin_lock_init(&lun->lun_acl_lock);
|
||||
spin_lock_init(&lun->lun_cmd_lock);
|
||||
spin_lock_init(&lun->lun_sep_lock);
|
||||
init_completion(&lun->lun_ref_comp);
|
||||
|
||||
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
|
||||
ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
|
||||
if (ret < 0) {
|
||||
percpu_ref_cancel_init(&lun->lun_ref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -691,10 +704,9 @@ int core_tpg_register(
|
|||
atomic_set(&lun->lun_acl_count, 0);
|
||||
init_completion(&lun->lun_shutdown_comp);
|
||||
INIT_LIST_HEAD(&lun->lun_acl_list);
|
||||
INIT_LIST_HEAD(&lun->lun_cmd_list);
|
||||
spin_lock_init(&lun->lun_acl_lock);
|
||||
spin_lock_init(&lun->lun_cmd_lock);
|
||||
spin_lock_init(&lun->lun_sep_lock);
|
||||
init_completion(&lun->lun_ref_comp);
|
||||
}
|
||||
|
||||
se_tpg->se_tpg_type = se_tpg_type;
|
||||
|
@ -815,10 +827,16 @@ int core_tpg_post_addlun(
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = core_dev_export(lun_ptr, tpg, lun);
|
||||
ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = core_dev_export(lun_ptr, tpg, lun);
|
||||
if (ret < 0) {
|
||||
percpu_ref_cancel_init(&lun->lun_ref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock(&tpg->tpg_lun_lock);
|
||||
lun->lun_access = lun_access;
|
||||
lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
|
||||
|
@ -827,14 +845,6 @@ int core_tpg_post_addlun(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void core_tpg_shutdown_lun(
|
||||
struct se_portal_group *tpg,
|
||||
struct se_lun *lun)
|
||||
{
|
||||
core_clear_lun_from_tpg(lun, tpg);
|
||||
transport_clear_lun_from_sessions(lun);
|
||||
}
|
||||
|
||||
struct se_lun *core_tpg_pre_dellun(
|
||||
struct se_portal_group *tpg,
|
||||
u32 unpacked_lun)
|
||||
|
@ -869,7 +879,8 @@ int core_tpg_post_dellun(
|
|||
struct se_portal_group *tpg,
|
||||
struct se_lun *lun)
|
||||
{
|
||||
core_tpg_shutdown_lun(tpg, lun);
|
||||
core_clear_lun_from_tpg(lun, tpg);
|
||||
transport_clear_lun_ref(lun);
|
||||
|
||||
core_dev_unexport(lun->lun_se_dev, tpg, lun);
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/in.h>
|
||||
|
@ -473,7 +472,7 @@ void transport_deregister_session(struct se_session *se_sess)
|
|||
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
|
||||
se_tpg->se_tpg_tfo->get_fabric_name());
|
||||
/*
|
||||
* If last kref is dropping now for an explict NodeACL, awake sleeping
|
||||
* If last kref is dropping now for an explicit NodeACL, awake sleeping
|
||||
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
|
||||
* removal context.
|
||||
*/
|
||||
|
@ -515,23 +514,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
|
|||
if (write_pending)
|
||||
cmd->t_state = TRANSPORT_WRITE_PENDING;
|
||||
|
||||
/*
|
||||
* Determine if IOCTL context caller in requesting the stopping of this
|
||||
* command for LUN shutdown purposes.
|
||||
*/
|
||||
if (cmd->transport_state & CMD_T_LUN_STOP) {
|
||||
pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
|
||||
__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
|
||||
|
||||
cmd->transport_state &= ~CMD_T_ACTIVE;
|
||||
if (remove_from_lists)
|
||||
target_remove_from_state_list(cmd);
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
complete(&cmd->transport_lun_stop_comp);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (remove_from_lists) {
|
||||
target_remove_from_state_list(cmd);
|
||||
|
||||
|
@ -585,15 +567,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
|
|||
static void transport_lun_remove_cmd(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_lun *lun = cmd->se_lun;
|
||||
unsigned long flags;
|
||||
|
||||
if (!lun)
|
||||
if (!lun || !cmd->lun_ref_active)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&lun->lun_cmd_lock, flags);
|
||||
if (!list_empty(&cmd->se_lun_node))
|
||||
list_del_init(&cmd->se_lun_node);
|
||||
spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
|
||||
percpu_ref_put(&lun->lun_ref);
|
||||
}
|
||||
|
||||
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
|
||||
|
@ -668,7 +646,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
|
|||
cmd->transport_state |= CMD_T_FAILED;
|
||||
|
||||
/*
|
||||
* Check for case where an explict ABORT_TASK has been received
|
||||
* Check for case where an explicit ABORT_TASK has been received
|
||||
* and transport_wait_for_tasks() will be waiting for completion..
|
||||
*/
|
||||
if (cmd->transport_state & CMD_T_ABORTED &&
|
||||
|
@ -1092,13 +1070,10 @@ void transport_init_se_cmd(
|
|||
int task_attr,
|
||||
unsigned char *sense_buffer)
|
||||
{
|
||||
INIT_LIST_HEAD(&cmd->se_lun_node);
|
||||
INIT_LIST_HEAD(&cmd->se_delayed_node);
|
||||
INIT_LIST_HEAD(&cmd->se_qf_node);
|
||||
INIT_LIST_HEAD(&cmd->se_cmd_list);
|
||||
INIT_LIST_HEAD(&cmd->state_list);
|
||||
init_completion(&cmd->transport_lun_fe_stop_comp);
|
||||
init_completion(&cmd->transport_lun_stop_comp);
|
||||
init_completion(&cmd->t_transport_stop_comp);
|
||||
init_completion(&cmd->cmd_wait_comp);
|
||||
init_completion(&cmd->task_stop_comp);
|
||||
|
@ -1719,29 +1694,14 @@ void target_execute_cmd(struct se_cmd *cmd)
|
|||
/*
|
||||
* If the received CDB has aleady been aborted stop processing it here.
|
||||
*/
|
||||
if (transport_check_aborted_status(cmd, 1)) {
|
||||
complete(&cmd->transport_lun_stop_comp);
|
||||
if (transport_check_aborted_status(cmd, 1))
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if IOCTL context caller in requesting the stopping of this
|
||||
* command for LUN shutdown purposes.
|
||||
*/
|
||||
spin_lock_irq(&cmd->t_state_lock);
|
||||
if (cmd->transport_state & CMD_T_LUN_STOP) {
|
||||
pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
|
||||
__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
|
||||
|
||||
cmd->transport_state &= ~CMD_T_ACTIVE;
|
||||
spin_unlock_irq(&cmd->t_state_lock);
|
||||
complete(&cmd->transport_lun_stop_comp);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Determine if frontend context caller is requesting the stopping of
|
||||
* this command for frontend exceptions.
|
||||
*/
|
||||
spin_lock_irq(&cmd->t_state_lock);
|
||||
if (cmd->transport_state & CMD_T_STOP) {
|
||||
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
|
||||
__func__, __LINE__,
|
||||
|
@ -2404,164 +2364,23 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
|
|||
}
|
||||
EXPORT_SYMBOL(target_wait_for_sess_cmds);
|
||||
|
||||
/* transport_lun_wait_for_tasks():
|
||||
*
|
||||
* Called from ConfigFS context to stop the passed struct se_cmd to allow
|
||||
* an struct se_lun to be successfully shutdown.
|
||||
*/
|
||||
static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* If the frontend has already requested this struct se_cmd to
|
||||
* be stopped, we can safely ignore this struct se_cmd.
|
||||
*/
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
if (cmd->transport_state & CMD_T_STOP) {
|
||||
cmd->transport_state &= ~CMD_T_LUN_STOP;
|
||||
|
||||
pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
|
||||
cmd->se_tfo->get_task_tag(cmd));
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
transport_cmd_check_stop(cmd, false, false);
|
||||
return -EPERM;
|
||||
}
|
||||
cmd->transport_state |= CMD_T_LUN_FE_STOP;
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
// XXX: audit task_flags checks.
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
if ((cmd->transport_state & CMD_T_BUSY) &&
|
||||
(cmd->transport_state & CMD_T_SENT)) {
|
||||
if (!target_stop_cmd(cmd, &flags))
|
||||
ret++;
|
||||
}
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
pr_debug("ConfigFS: cmd: %p stop tasks ret:"
|
||||
" %d\n", cmd, ret);
|
||||
if (!ret) {
|
||||
pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
|
||||
cmd->se_tfo->get_task_tag(cmd));
|
||||
wait_for_completion(&cmd->transport_lun_stop_comp);
|
||||
pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
|
||||
cmd->se_tfo->get_task_tag(cmd));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __transport_clear_lun_from_sessions(struct se_lun *lun)
|
||||
{
|
||||
struct se_cmd *cmd = NULL;
|
||||
unsigned long lun_flags, cmd_flags;
|
||||
/*
|
||||
* Do exception processing and return CHECK_CONDITION status to the
|
||||
* Initiator Port.
|
||||
*/
|
||||
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
|
||||
while (!list_empty(&lun->lun_cmd_list)) {
|
||||
cmd = list_first_entry(&lun->lun_cmd_list,
|
||||
struct se_cmd, se_lun_node);
|
||||
list_del_init(&cmd->se_lun_node);
|
||||
|
||||
spin_lock(&cmd->t_state_lock);
|
||||
pr_debug("SE_LUN[%d] - Setting cmd->transport"
|
||||
"_lun_stop for ITT: 0x%08x\n",
|
||||
cmd->se_lun->unpacked_lun,
|
||||
cmd->se_tfo->get_task_tag(cmd));
|
||||
cmd->transport_state |= CMD_T_LUN_STOP;
|
||||
spin_unlock(&cmd->t_state_lock);
|
||||
|
||||
spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
|
||||
|
||||
if (!cmd->se_lun) {
|
||||
pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
|
||||
cmd->se_tfo->get_task_tag(cmd),
|
||||
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
|
||||
BUG();
|
||||
}
|
||||
/*
|
||||
* If the Storage engine still owns the iscsi_cmd_t, determine
|
||||
* and/or stop its context.
|
||||
*/
|
||||
pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
|
||||
"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
|
||||
cmd->se_tfo->get_task_tag(cmd));
|
||||
|
||||
if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
|
||||
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
|
||||
"_wait_for_tasks(): SUCCESS\n",
|
||||
cmd->se_lun->unpacked_lun,
|
||||
cmd->se_tfo->get_task_tag(cmd));
|
||||
|
||||
spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
|
||||
if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
|
||||
goto check_cond;
|
||||
}
|
||||
cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
|
||||
target_remove_from_state_list(cmd);
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
|
||||
|
||||
/*
|
||||
* The Storage engine stopped this struct se_cmd before it was
|
||||
* send to the fabric frontend for delivery back to the
|
||||
* Initiator Node. Return this SCSI CDB back with an
|
||||
* CHECK_CONDITION status.
|
||||
*/
|
||||
check_cond:
|
||||
transport_send_check_condition_and_sense(cmd,
|
||||
TCM_NON_EXISTENT_LUN, 0);
|
||||
/*
|
||||
* If the fabric frontend is waiting for this iscsi_cmd_t to
|
||||
* be released, notify the waiting thread now that LU has
|
||||
* finished accessing it.
|
||||
*/
|
||||
spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
|
||||
if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
|
||||
pr_debug("SE_LUN[%d] - Detected FE stop for"
|
||||
" struct se_cmd: %p ITT: 0x%08x\n",
|
||||
lun->unpacked_lun,
|
||||
cmd, cmd->se_tfo->get_task_tag(cmd));
|
||||
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock,
|
||||
cmd_flags);
|
||||
transport_cmd_check_stop(cmd, false, false);
|
||||
complete(&cmd->transport_lun_fe_stop_comp);
|
||||
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
|
||||
continue;
|
||||
}
|
||||
pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
|
||||
lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
|
||||
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
|
||||
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
|
||||
}
|
||||
|
||||
static int transport_clear_lun_thread(void *p)
|
||||
static int transport_clear_lun_ref_thread(void *p)
|
||||
{
|
||||
struct se_lun *lun = p;
|
||||
|
||||
__transport_clear_lun_from_sessions(lun);
|
||||
percpu_ref_kill(&lun->lun_ref);
|
||||
|
||||
wait_for_completion(&lun->lun_ref_comp);
|
||||
complete(&lun->lun_shutdown_comp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int transport_clear_lun_from_sessions(struct se_lun *lun)
|
||||
int transport_clear_lun_ref(struct se_lun *lun)
|
||||
{
|
||||
struct task_struct *kt;
|
||||
|
||||
kt = kthread_run(transport_clear_lun_thread, lun,
|
||||
kt = kthread_run(transport_clear_lun_ref_thread, lun,
|
||||
"tcm_cl_%u", lun->unpacked_lun);
|
||||
if (IS_ERR(kt)) {
|
||||
pr_err("Unable to start clear_lun thread\n");
|
||||
|
@ -2595,43 +2414,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
|
|||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* If we are already stopped due to an external event (ie: LUN shutdown)
|
||||
* sleep until the connection can have the passed struct se_cmd back.
|
||||
* The cmd->transport_lun_stopped_sem will be upped by
|
||||
* transport_clear_lun_from_sessions() once the ConfigFS context caller
|
||||
* has completed its operation on the struct se_cmd.
|
||||
*/
|
||||
if (cmd->transport_state & CMD_T_LUN_STOP) {
|
||||
pr_debug("wait_for_tasks: Stopping"
|
||||
" wait_for_completion(&cmd->t_tasktransport_lun_fe"
|
||||
"_stop_comp); for ITT: 0x%08x\n",
|
||||
cmd->se_tfo->get_task_tag(cmd));
|
||||
/*
|
||||
* There is a special case for WRITES where a FE exception +
|
||||
* LUN shutdown means ConfigFS context is still sleeping on
|
||||
* transport_lun_stop_comp in transport_lun_wait_for_tasks().
|
||||
* We go ahead and up transport_lun_stop_comp just to be sure
|
||||
* here.
|
||||
*/
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
complete(&cmd->transport_lun_stop_comp);
|
||||
wait_for_completion(&cmd->transport_lun_fe_stop_comp);
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
|
||||
target_remove_from_state_list(cmd);
|
||||
/*
|
||||
* At this point, the frontend who was the originator of this
|
||||
* struct se_cmd, now owns the structure and can be released through
|
||||
* normal means below.
|
||||
*/
|
||||
pr_debug("wait_for_tasks: Stopped"
|
||||
" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
|
||||
"stop_comp); for ITT: 0x%08x\n",
|
||||
cmd->se_tfo->get_task_tag(cmd));
|
||||
|
||||
cmd->transport_state &= ~CMD_T_LUN_STOP;
|
||||
}
|
||||
|
||||
if (!(cmd->transport_state & CMD_T_ACTIVE)) {
|
||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
@ -2910,6 +2692,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
|
|||
cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
|
||||
|
||||
cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
|
||||
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
|
||||
trace_target_cmd_complete(cmd);
|
||||
cmd->se_tfo->queue_status(cmd);
|
||||
|
||||
|
@ -2938,6 +2721,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
|
|||
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
|
||||
cmd->transport_state |= CMD_T_ABORTED;
|
||||
smp_mb__after_atomic_inc();
|
||||
return;
|
||||
}
|
||||
}
|
||||
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
|
||||
#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
|
||||
#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
|
||||
#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
|
||||
#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
|
||||
#define ASCQ_2AH_PRIORITY_CHANGED 0x08
|
||||
|
||||
#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
|
||||
|
|
|
@ -405,9 +405,6 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
|
|||
struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
|
||||
struct xcopy_pt_cmd, se_cmd);
|
||||
|
||||
if (xpt_cmd->remote_port)
|
||||
kfree(se_cmd->se_lun);
|
||||
|
||||
kfree(xpt_cmd);
|
||||
}
|
||||
|
||||
|
@ -572,22 +569,10 @@ static int target_xcopy_init_pt_lun(
|
|||
return 0;
|
||||
}
|
||||
|
||||
pt_cmd->se_lun = kzalloc(sizeof(struct se_lun), GFP_KERNEL);
|
||||
if (!pt_cmd->se_lun) {
|
||||
pr_err("Unable to allocate pt_cmd->se_lun\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
init_completion(&pt_cmd->se_lun->lun_shutdown_comp);
|
||||
INIT_LIST_HEAD(&pt_cmd->se_lun->lun_cmd_list);
|
||||
INIT_LIST_HEAD(&pt_cmd->se_lun->lun_acl_list);
|
||||
spin_lock_init(&pt_cmd->se_lun->lun_acl_lock);
|
||||
spin_lock_init(&pt_cmd->se_lun->lun_cmd_lock);
|
||||
spin_lock_init(&pt_cmd->se_lun->lun_sep_lock);
|
||||
|
||||
pt_cmd->se_lun = &se_dev->xcopy_lun;
|
||||
pt_cmd->se_dev = se_dev;
|
||||
|
||||
pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
|
||||
pt_cmd->se_lun->lun_se_dev = se_dev;
|
||||
pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
|
||||
|
||||
pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
|
||||
|
@ -658,8 +643,6 @@ static int target_xcopy_setup_pt_cmd(
|
|||
return 0;
|
||||
|
||||
out:
|
||||
if (remote_port == true)
|
||||
kfree(cmd->se_lun);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
|
||||
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
|
||||
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
|
||||
#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */
|
||||
|
||||
struct ft_transport_id {
|
||||
__u8 format;
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/configfs.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/percpu_ida.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
|
@ -89,16 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
|
|||
{
|
||||
struct fc_frame *fp;
|
||||
struct fc_lport *lport;
|
||||
struct se_session *se_sess;
|
||||
|
||||
if (!cmd)
|
||||
return;
|
||||
se_sess = cmd->sess->se_sess;
|
||||
fp = cmd->req_frame;
|
||||
lport = fr_dev(fp);
|
||||
if (fr_seq(fp))
|
||||
lport->tt.seq_release(fr_seq(fp));
|
||||
fc_frame_free(fp);
|
||||
percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
|
||||
ft_sess_put(cmd->sess); /* undo get from lookup at recv */
|
||||
kfree(cmd);
|
||||
}
|
||||
|
||||
void ft_release_cmd(struct se_cmd *se_cmd)
|
||||
|
@ -432,14 +435,21 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
|
|||
{
|
||||
struct ft_cmd *cmd;
|
||||
struct fc_lport *lport = sess->tport->lport;
|
||||
struct se_session *se_sess = sess->se_sess;
|
||||
int tag;
|
||||
|
||||
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
|
||||
if (!cmd)
|
||||
tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
|
||||
if (tag < 0)
|
||||
goto busy;
|
||||
|
||||
cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
|
||||
memset(cmd, 0, sizeof(struct ft_cmd));
|
||||
|
||||
cmd->se_cmd.map_tag = tag;
|
||||
cmd->sess = sess;
|
||||
cmd->seq = lport->tt.seq_assign(lport, fp);
|
||||
if (!cmd->seq) {
|
||||
kfree(cmd);
|
||||
percpu_ida_free(&se_sess->sess_tag_pool, tag);
|
||||
goto busy;
|
||||
}
|
||||
cmd->req_frame = fp; /* hold frame during cmd */
|
||||
|
|
|
@ -571,16 +571,16 @@ int ft_register_configfs(void)
|
|||
/*
|
||||
* Setup default attribute lists for various fabric->tf_cit_tmpl
|
||||
*/
|
||||
TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
|
||||
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =
|
||||
ft_nacl_base_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
/*
|
||||
* register the fabric for use within TCM
|
||||
*/
|
||||
|
|
|
@ -210,7 +210,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
|
|||
if (!sess)
|
||||
return NULL;
|
||||
|
||||
sess->se_sess = transport_init_session();
|
||||
sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
|
||||
sizeof(struct ft_cmd));
|
||||
if (IS_ERR(sess->se_sess)) {
|
||||
kfree(sess);
|
||||
return NULL;
|
||||
|
|
|
@ -1923,15 +1923,15 @@ static int usbg_register_configfs(void)
|
|||
}
|
||||
|
||||
fabric->tf_ops = usbg_ops;
|
||||
TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
ret = target_fabric_configfs_register(fabric);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "target_fabric_configfs_register() failed"
|
||||
|
|
|
@ -2168,15 +2168,15 @@ static int tcm_vhost_register_configfs(void)
|
|||
/*
|
||||
* Setup default attribute lists for various fabric->tf_cit_tmpl
|
||||
*/
|
||||
TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
|
||||
fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
|
||||
/*
|
||||
* Register the fabric for use within TCM
|
||||
*/
|
||||
|
|
|
@ -34,6 +34,11 @@ struct se_subsystem_api {
|
|||
sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
|
||||
u32 (*get_device_type)(struct se_device *);
|
||||
sector_t (*get_blocks)(struct se_device *);
|
||||
sector_t (*get_alignment_offset_lbas)(struct se_device *);
|
||||
/* lbppbe = logical blocks per physical block exponent. see SBC-3 */
|
||||
unsigned int (*get_lbppbe)(struct se_device *);
|
||||
unsigned int (*get_io_min)(struct se_device *);
|
||||
unsigned int (*get_io_opt)(struct se_device *);
|
||||
unsigned char *(*get_sense_buffer)(struct se_cmd *);
|
||||
bool (*get_write_cache)(struct se_device *);
|
||||
};
|
||||
|
|
|
@ -227,6 +227,7 @@ enum tcm_tmreq_table {
|
|||
|
||||
/* fabric independent task management response values */
|
||||
enum tcm_tmrsp_table {
|
||||
TMR_FUNCTION_FAILED = 0,
|
||||
TMR_FUNCTION_COMPLETE = 1,
|
||||
TMR_TASK_DOES_NOT_EXIST = 2,
|
||||
TMR_LUN_DOES_NOT_EXIST = 3,
|
||||
|
@ -282,11 +283,12 @@ struct t10_alua_lu_gp_member {
|
|||
struct t10_alua_tg_pt_gp {
|
||||
u16 tg_pt_gp_id;
|
||||
int tg_pt_gp_valid_id;
|
||||
int tg_pt_gp_alua_supported_states;
|
||||
int tg_pt_gp_alua_access_status;
|
||||
int tg_pt_gp_alua_access_type;
|
||||
int tg_pt_gp_nonop_delay_msecs;
|
||||
int tg_pt_gp_trans_delay_msecs;
|
||||
int tg_pt_gp_implict_trans_secs;
|
||||
int tg_pt_gp_implicit_trans_secs;
|
||||
int tg_pt_gp_pref;
|
||||
int tg_pt_gp_write_metadata;
|
||||
/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
|
||||
|
@ -442,7 +444,6 @@ struct se_cmd {
|
|||
/* Used for sense data */
|
||||
void *sense_buffer;
|
||||
struct list_head se_delayed_node;
|
||||
struct list_head se_lun_node;
|
||||
struct list_head se_qf_node;
|
||||
struct se_device *se_dev;
|
||||
struct se_dev_entry *se_deve;
|
||||
|
@ -470,15 +471,11 @@ struct se_cmd {
|
|||
#define CMD_T_SENT (1 << 4)
|
||||
#define CMD_T_STOP (1 << 5)
|
||||
#define CMD_T_FAILED (1 << 6)
|
||||
#define CMD_T_LUN_STOP (1 << 7)
|
||||
#define CMD_T_LUN_FE_STOP (1 << 8)
|
||||
#define CMD_T_DEV_ACTIVE (1 << 9)
|
||||
#define CMD_T_REQUEST_STOP (1 << 10)
|
||||
#define CMD_T_BUSY (1 << 11)
|
||||
#define CMD_T_DEV_ACTIVE (1 << 7)
|
||||
#define CMD_T_REQUEST_STOP (1 << 8)
|
||||
#define CMD_T_BUSY (1 << 9)
|
||||
spinlock_t t_state_lock;
|
||||
struct completion t_transport_stop_comp;
|
||||
struct completion transport_lun_fe_stop_comp;
|
||||
struct completion transport_lun_stop_comp;
|
||||
|
||||
struct work_struct work;
|
||||
|
||||
|
@ -498,6 +495,9 @@ struct se_cmd {
|
|||
|
||||
/* backend private data */
|
||||
void *priv;
|
||||
|
||||
/* Used for lun->lun_ref counting */
|
||||
bool lun_ref_active;
|
||||
};
|
||||
|
||||
struct se_ua {
|
||||
|
@ -628,6 +628,34 @@ struct se_dev_attrib {
|
|||
struct config_group da_group;
|
||||
};
|
||||
|
||||
struct se_port_stat_grps {
|
||||
struct config_group stat_group;
|
||||
struct config_group scsi_port_group;
|
||||
struct config_group scsi_tgt_port_group;
|
||||
struct config_group scsi_transport_group;
|
||||
};
|
||||
|
||||
struct se_lun {
|
||||
#define SE_LUN_LINK_MAGIC 0xffff7771
|
||||
u32 lun_link_magic;
|
||||
/* See transport_lun_status_table */
|
||||
enum transport_lun_status_table lun_status;
|
||||
u32 lun_access;
|
||||
u32 lun_flags;
|
||||
u32 unpacked_lun;
|
||||
atomic_t lun_acl_count;
|
||||
spinlock_t lun_acl_lock;
|
||||
spinlock_t lun_sep_lock;
|
||||
struct completion lun_shutdown_comp;
|
||||
struct list_head lun_acl_list;
|
||||
struct se_device *lun_se_dev;
|
||||
struct se_port *lun_sep;
|
||||
struct config_group lun_group;
|
||||
struct se_port_stat_grps port_stat_grps;
|
||||
struct completion lun_ref_comp;
|
||||
struct percpu_ref lun_ref;
|
||||
};
|
||||
|
||||
struct se_dev_stat_grps {
|
||||
struct config_group stat_group;
|
||||
struct config_group scsi_dev_group;
|
||||
|
@ -656,11 +684,10 @@ struct se_device {
|
|||
/* Pointer to transport specific device structure */
|
||||
u32 dev_index;
|
||||
u64 creation_time;
|
||||
u32 num_resets;
|
||||
u64 num_cmds;
|
||||
u64 read_bytes;
|
||||
u64 write_bytes;
|
||||
spinlock_t stats_lock;
|
||||
atomic_long_t num_resets;
|
||||
atomic_long_t num_cmds;
|
||||
atomic_long_t read_bytes;
|
||||
atomic_long_t write_bytes;
|
||||
/* Active commands on this virtual SE device */
|
||||
atomic_t simple_cmds;
|
||||
atomic_t dev_ordered_id;
|
||||
|
@ -711,6 +738,7 @@ struct se_device {
|
|||
struct se_subsystem_api *transport;
|
||||
/* Linked list for struct se_hba struct se_device list */
|
||||
struct list_head dev_list;
|
||||
struct se_lun xcopy_lun;
|
||||
};
|
||||
|
||||
struct se_hba {
|
||||
|
@ -730,34 +758,6 @@ struct se_hba {
|
|||
struct se_subsystem_api *transport;
|
||||
};
|
||||
|
||||
struct se_port_stat_grps {
|
||||
struct config_group stat_group;
|
||||
struct config_group scsi_port_group;
|
||||
struct config_group scsi_tgt_port_group;
|
||||
struct config_group scsi_transport_group;
|
||||
};
|
||||
|
||||
struct se_lun {
|
||||
#define SE_LUN_LINK_MAGIC 0xffff7771
|
||||
u32 lun_link_magic;
|
||||
/* See transport_lun_status_table */
|
||||
enum transport_lun_status_table lun_status;
|
||||
u32 lun_access;
|
||||
u32 lun_flags;
|
||||
u32 unpacked_lun;
|
||||
atomic_t lun_acl_count;
|
||||
spinlock_t lun_acl_lock;
|
||||
spinlock_t lun_cmd_lock;
|
||||
spinlock_t lun_sep_lock;
|
||||
struct completion lun_shutdown_comp;
|
||||
struct list_head lun_cmd_list;
|
||||
struct list_head lun_acl_list;
|
||||
struct se_device *lun_se_dev;
|
||||
struct se_port *lun_sep;
|
||||
struct config_group lun_group;
|
||||
struct se_port_stat_grps port_stat_grps;
|
||||
};
|
||||
|
||||
struct scsi_port_stats {
|
||||
u64 cmd_pdus;
|
||||
u64 tx_data_octets;
|
||||
|
|
|
@ -54,4 +54,3 @@ struct target_fabric_configfs {
|
|||
struct target_fabric_configfs_template tf_cit_tmpl;
|
||||
};
|
||||
|
||||
#define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl)
|
||||
|
|
|
@ -137,6 +137,8 @@ void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
|
|||
void __target_execute_cmd(struct se_cmd *);
|
||||
int transport_lookup_tmr_lun(struct se_cmd *, u32);
|
||||
|
||||
struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
|
||||
unsigned char *);
|
||||
struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
|
||||
unsigned char *);
|
||||
void core_tpg_clear_object_luns(struct se_portal_group *);
|
||||
|
|
|
@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
|||
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
|
||||
proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
|
||||
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
|
||||
earlycpio.o percpu-refcount.o percpu_ida.o
|
||||
earlycpio.o
|
||||
|
||||
obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
|
||||
lib-$(CONFIG_MMU) += ioremap.o
|
||||
|
@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
|
|||
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
|
||||
gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
|
||||
bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
|
||||
percpu_ida.o
|
||||
percpu-refcount.o percpu_ida.o
|
||||
obj-y += string_helpers.o
|
||||
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
|
||||
obj-y += kstrtox.o
|
||||
|
|
|
@ -117,8 +117,7 @@ static inline void alloc_global_tags(struct percpu_ida *pool,
|
|||
min(pool->nr_free, pool->percpu_batch_size));
|
||||
}
|
||||
|
||||
static inline unsigned alloc_local_tag(struct percpu_ida *pool,
|
||||
struct percpu_ida_cpu *tags)
|
||||
static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
|
||||
{
|
||||
int tag = -ENOSPC;
|
||||
|
||||
|
@ -159,7 +158,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
|
|||
tags = this_cpu_ptr(pool->tag_cpu);
|
||||
|
||||
/* Fastpath */
|
||||
tag = alloc_local_tag(pool, tags);
|
||||
tag = alloc_local_tag(tags);
|
||||
if (likely(tag >= 0)) {
|
||||
local_irq_restore(flags);
|
||||
return tag;
|
||||
|
|
Loading…
Reference in New Issue