Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger: "It's been usually busy for summer, with most of the efforts centered around TCMU developments and various target-core + fabric driver bug fixing activities. Not particularly large in terms of LoC, but lots of smaller patches from many different folks. The highlights include: - ibmvscsis logical partition manager support (Michael Cyr + Bryant Ly) - Convert target/iblock WRITE_SAME to blkdev_issue_zeroout (hch + nab) - Add support for TMR percpu LUN reference counting (nab) - Fix a potential deadlock between EXTENDED_COPY and iscsi shutdown (Bart) - Fix COMPARE_AND_WRITE caw_sem leak during se_cmd quiesce (Jiang Yi) - Fix TMCU module removal (Xiubo Li) - Fix iser-target OOPs during login failure (Andrea Righi + Sagi) - Breakup target-core free_device backend driver callback (mnc) - Perform TCMU add/delete/reconfig synchronously (mnc) - Fix TCMU multiple UIO open/close sequences (mnc) - Fix TCMU CHECK_CONDITION sense handling (mnc) - Fix target-core SAM_STAT_BUSY + TASK_SET_FULL handling (mnc + nab) - Introduce TYPE_ZBC support in PSCSI (Damien Le Moal) - Fix possible TCMU memory leak + OOPs when recalculating cmd base size (Xiubo Li + Bryant Ly + Damien Le Moal + mnc) - Add login_keys_workaround attribute for non RFC initiators (Robert LeBlanc + Arun Easi + nab)" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (68 commits) iscsi-target: Add login_keys_workaround attribute for non RFC initiators Revert "qla2xxx: Fix incorrect tcm_qla2xxx_free_cmd use during TMR ABORT" tcmu: clean up the code and with one small fix tcmu: Fix possbile memory leak / OOPs when recalculating cmd base size target: export lio pgr/alua support as device attr target: Fix return sense reason in target_scsi3_emulate_pr_out target: Fix cmd size for PR-OUT in passthrough_parse_cdb tcmu: Fix dev_config_store target: pscsi: Introduce TYPE_ZBC support target: Use macro for WRITE_VERIFY_32 operation codes target: fix SAM_STAT_BUSY/TASK_SET_FULL handling target: remove transport_complete pscsi: finish cmd processing from pscsi_req_done tcmu: fix sense handling during completion target: add helper to copy sense to se_cmd buffer target: do not require a transport_complete for SCF_TRANSPORT_TASK_SENSE target: make device_mutex and device_list static tcmu: Fix flushing cmd entry dcache page tcmu: fix multiple uio open/close sequences tcmu: drop configured check in destroy ...
This commit is contained in:
commit
48ea2cedde
|
@ -1452,7 +1452,7 @@ static void
|
||||||
isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||||
{
|
{
|
||||||
struct isert_conn *isert_conn = wc->qp->qp_context;
|
struct isert_conn *isert_conn = wc->qp->qp_context;
|
||||||
struct ib_device *ib_dev = isert_conn->cm_id->device;
|
struct ib_device *ib_dev = isert_conn->device->ib_device;
|
||||||
|
|
||||||
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
||||||
isert_print_wc(wc, "login recv");
|
isert_print_wc(wc, "login recv");
|
||||||
|
|
|
@ -1157,8 +1157,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&ioctx->spinlock, flags);
|
spin_unlock_irqrestore(&ioctx->spinlock, flags);
|
||||||
|
|
||||||
pr_debug("Aborting cmd with state %d and tag %lld\n", state,
|
pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
|
||||||
ioctx->cmd.tag);
|
ioctx->state, ioctx->cmd.tag);
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case SRPT_STATE_NEW:
|
case SRPT_STATE_NEW:
|
||||||
|
|
|
@ -155,6 +155,9 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
|
||||||
qrc = h_free_crq(vscsi->dds.unit_id);
|
qrc = h_free_crq(vscsi->dds.unit_id);
|
||||||
switch (qrc) {
|
switch (qrc) {
|
||||||
case H_SUCCESS:
|
case H_SUCCESS:
|
||||||
|
spin_lock_bh(&vscsi->intr_lock);
|
||||||
|
vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS;
|
||||||
|
spin_unlock_bh(&vscsi->intr_lock);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case H_HARDWARE:
|
case H_HARDWARE:
|
||||||
|
@ -422,6 +425,9 @@ static void ibmvscsis_disconnect(struct work_struct *work)
|
||||||
new_state = vscsi->new_state;
|
new_state = vscsi->new_state;
|
||||||
vscsi->new_state = 0;
|
vscsi->new_state = 0;
|
||||||
|
|
||||||
|
vscsi->flags |= DISCONNECT_SCHEDULED;
|
||||||
|
vscsi->flags &= ~SCHEDULE_DISCONNECT;
|
||||||
|
|
||||||
pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
|
pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
|
||||||
vscsi->state);
|
vscsi->state);
|
||||||
|
|
||||||
|
@ -802,6 +808,13 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
|
||||||
long rc = ADAPT_SUCCESS;
|
long rc = ADAPT_SUCCESS;
|
||||||
uint format;
|
uint format;
|
||||||
|
|
||||||
|
rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000,
|
||||||
|
0, 0, 0, 0);
|
||||||
|
if (rc == H_SUCCESS)
|
||||||
|
vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
|
||||||
|
else if (rc != H_NOT_FOUND)
|
||||||
|
pr_err("Error from Enable Prepare for Suspend: %ld\n", rc);
|
||||||
|
|
||||||
vscsi->flags &= PRESERVE_FLAG_FIELDS;
|
vscsi->flags &= PRESERVE_FLAG_FIELDS;
|
||||||
vscsi->rsp_q_timer.timer_pops = 0;
|
vscsi->rsp_q_timer.timer_pops = 0;
|
||||||
vscsi->debit = 0;
|
vscsi->debit = 0;
|
||||||
|
@ -950,6 +963,63 @@ static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL
|
||||||
|
* @vscsi: Pointer to our adapter structure
|
||||||
|
* @idle: Indicates whether we were called from adapter_idle. This
|
||||||
|
* is important to know if we need to do a disconnect, since if
|
||||||
|
* we're called from adapter_idle, we're still processing the
|
||||||
|
* current disconnect, so we can't just call post_disconnect.
|
||||||
|
*
|
||||||
|
* This function is called when the adapter is idle when phyp has sent
|
||||||
|
* us a Prepare for Suspend Transport Event.
|
||||||
|
*
|
||||||
|
* EXECUTION ENVIRONMENT:
|
||||||
|
* Process or interrupt environment called with interrupt lock held
|
||||||
|
*/
|
||||||
|
static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
|
||||||
|
{
|
||||||
|
long rc = 0;
|
||||||
|
struct viosrp_crq *crq;
|
||||||
|
|
||||||
|
/* See if there is a Resume event in the queue */
|
||||||
|
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
|
||||||
|
|
||||||
|
pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
|
||||||
|
vscsi->flags, vscsi->state, (int)crq->valid);
|
||||||
|
|
||||||
|
if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
|
||||||
|
rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
|
||||||
|
0, 0);
|
||||||
|
if (rc) {
|
||||||
|
pr_err("Ready for Suspend Vioctl failed: %ld\n", rc);
|
||||||
|
rc = 0;
|
||||||
|
}
|
||||||
|
} else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
|
||||||
|
(vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) ||
|
||||||
|
((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
|
||||||
|
(crq->format != RESUME_FROM_SUSP)))) {
|
||||||
|
if (idle) {
|
||||||
|
vscsi->state = ERR_DISCONNECT_RECONNECT;
|
||||||
|
ibmvscsis_reset_queue(vscsi);
|
||||||
|
rc = -1;
|
||||||
|
} else if (vscsi->state == CONNECTED) {
|
||||||
|
ibmvscsis_post_disconnect(vscsi,
|
||||||
|
ERR_DISCONNECT_RECONNECT, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
|
||||||
|
|
||||||
|
if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
|
||||||
|
(crq->format != RESUME_FROM_SUSP)))
|
||||||
|
pr_err("Invalid element in CRQ after Prepare for Suspend");
|
||||||
|
}
|
||||||
|
|
||||||
|
vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvscsis_trans_event() - Handle a Transport Event
|
* ibmvscsis_trans_event() - Handle a Transport Event
|
||||||
* @vscsi: Pointer to our adapter structure
|
* @vscsi: Pointer to our adapter structure
|
||||||
|
@ -974,18 +1044,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
|
||||||
case PARTNER_FAILED:
|
case PARTNER_FAILED:
|
||||||
case PARTNER_DEREGISTER:
|
case PARTNER_DEREGISTER:
|
||||||
ibmvscsis_delete_client_info(vscsi, true);
|
ibmvscsis_delete_client_info(vscsi, true);
|
||||||
break;
|
if (crq->format == MIGRATED)
|
||||||
|
vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
|
||||||
default:
|
|
||||||
rc = ERROR;
|
|
||||||
dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
|
|
||||||
(uint)crq->format);
|
|
||||||
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
|
|
||||||
RESPONSE_Q_DOWN);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rc == ADAPT_SUCCESS) {
|
|
||||||
switch (vscsi->state) {
|
switch (vscsi->state) {
|
||||||
case NO_QUEUE:
|
case NO_QUEUE:
|
||||||
case ERR_DISCONNECTED:
|
case ERR_DISCONNECTED:
|
||||||
|
@ -1034,6 +1094,60 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
|
||||||
vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
|
vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case PREPARE_FOR_SUSPEND:
|
||||||
|
pr_debug("Prep for Suspend, crq status = 0x%x\n",
|
||||||
|
(int)crq->status);
|
||||||
|
switch (vscsi->state) {
|
||||||
|
case ERR_DISCONNECTED:
|
||||||
|
case WAIT_CONNECTION:
|
||||||
|
case CONNECTED:
|
||||||
|
ibmvscsis_ready_for_suspend(vscsi, false);
|
||||||
|
break;
|
||||||
|
case SRP_PROCESSING:
|
||||||
|
vscsi->resume_state = vscsi->state;
|
||||||
|
vscsi->flags |= PREP_FOR_SUSPEND_PENDING;
|
||||||
|
if (crq->status == CRQ_ENTRY_OVERWRITTEN)
|
||||||
|
vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE;
|
||||||
|
ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
|
||||||
|
break;
|
||||||
|
case NO_QUEUE:
|
||||||
|
case UNDEFINED:
|
||||||
|
case UNCONFIGURING:
|
||||||
|
case WAIT_ENABLED:
|
||||||
|
case ERR_DISCONNECT:
|
||||||
|
case ERR_DISCONNECT_RECONNECT:
|
||||||
|
case WAIT_IDLE:
|
||||||
|
pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
|
||||||
|
vscsi->state);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case RESUME_FROM_SUSP:
|
||||||
|
pr_debug("Resume from Suspend, crq status = 0x%x\n",
|
||||||
|
(int)crq->status);
|
||||||
|
if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
|
||||||
|
vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
|
||||||
|
} else {
|
||||||
|
if ((crq->status == CRQ_ENTRY_OVERWRITTEN) ||
|
||||||
|
(vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) {
|
||||||
|
ibmvscsis_post_disconnect(vscsi,
|
||||||
|
ERR_DISCONNECT_RECONNECT,
|
||||||
|
0);
|
||||||
|
vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
rc = ERROR;
|
||||||
|
dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
|
||||||
|
(uint)crq->format);
|
||||||
|
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
|
||||||
|
RESPONSE_Q_DOWN);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = vscsi->flags & SCHEDULE_DISCONNECT;
|
rc = vscsi->flags & SCHEDULE_DISCONNECT;
|
||||||
|
@ -1201,6 +1315,7 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
|
||||||
static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
|
static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
|
||||||
{
|
{
|
||||||
int free_qs = false;
|
int free_qs = false;
|
||||||
|
long rc = 0;
|
||||||
|
|
||||||
pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
|
pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
|
||||||
vscsi->state);
|
vscsi->state);
|
||||||
|
@ -1240,7 +1355,14 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
|
||||||
vscsi->rsp_q_timer.timer_pops = 0;
|
vscsi->rsp_q_timer.timer_pops = 0;
|
||||||
vscsi->debit = 0;
|
vscsi->debit = 0;
|
||||||
vscsi->credit = 0;
|
vscsi->credit = 0;
|
||||||
if (vscsi->flags & TRANS_EVENT) {
|
if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
|
||||||
|
vscsi->state = vscsi->resume_state;
|
||||||
|
vscsi->resume_state = 0;
|
||||||
|
rc = ibmvscsis_ready_for_suspend(vscsi, true);
|
||||||
|
vscsi->flags &= ~DISCONNECT_SCHEDULED;
|
||||||
|
if (rc)
|
||||||
|
break;
|
||||||
|
} else if (vscsi->flags & TRANS_EVENT) {
|
||||||
vscsi->state = WAIT_CONNECTION;
|
vscsi->state = WAIT_CONNECTION;
|
||||||
vscsi->flags &= PRESERVE_FLAG_FIELDS;
|
vscsi->flags &= PRESERVE_FLAG_FIELDS;
|
||||||
} else {
|
} else {
|
||||||
|
@ -3792,8 +3914,16 @@ static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
|
||||||
{
|
{
|
||||||
struct ibmvscsis_tport *tport =
|
struct ibmvscsis_tport *tport =
|
||||||
container_of(wwn, struct ibmvscsis_tport, tport_wwn);
|
container_of(wwn, struct ibmvscsis_tport, tport_wwn);
|
||||||
|
u16 tpgt;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
if (strstr(name, "tpgt_") != name)
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
rc = kstrtou16(name + 5, 0, &tpgt);
|
||||||
|
if (rc)
|
||||||
|
return ERR_PTR(rc);
|
||||||
|
tport->tport_tpgt = tpgt;
|
||||||
|
|
||||||
tport->releasing = false;
|
tport->releasing = false;
|
||||||
|
|
||||||
rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
|
rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
|
||||||
|
|
|
@ -262,6 +262,14 @@ struct scsi_info {
|
||||||
#define DISCONNECT_SCHEDULED 0x00800
|
#define DISCONNECT_SCHEDULED 0x00800
|
||||||
/* remove function is sleeping */
|
/* remove function is sleeping */
|
||||||
#define CFG_SLEEPING 0x01000
|
#define CFG_SLEEPING 0x01000
|
||||||
|
/* Register for Prepare for Suspend Transport Events */
|
||||||
|
#define PREP_FOR_SUSPEND_ENABLED 0x02000
|
||||||
|
/* Prepare for Suspend event sent */
|
||||||
|
#define PREP_FOR_SUSPEND_PENDING 0x04000
|
||||||
|
/* Resume from Suspend event sent */
|
||||||
|
#define PREP_FOR_SUSPEND_ABORTED 0x08000
|
||||||
|
/* Prepare for Suspend event overwrote another CRQ entry */
|
||||||
|
#define PREP_FOR_SUSPEND_OVERWRITE 0x10000
|
||||||
u32 flags;
|
u32 flags;
|
||||||
/* adapter lock */
|
/* adapter lock */
|
||||||
spinlock_t intr_lock;
|
spinlock_t intr_lock;
|
||||||
|
@ -272,6 +280,7 @@ struct scsi_info {
|
||||||
/* used in crq, to tag what iu the response is for */
|
/* used in crq, to tag what iu the response is for */
|
||||||
u64 empty_iu_tag;
|
u64 empty_iu_tag;
|
||||||
uint new_state;
|
uint new_state;
|
||||||
|
uint resume_state;
|
||||||
/* control block for the response queue timer */
|
/* control block for the response queue timer */
|
||||||
struct timer_cb rsp_q_timer;
|
struct timer_cb rsp_q_timer;
|
||||||
/* keep last client to enable proper accounting */
|
/* keep last client to enable proper accounting */
|
||||||
|
@ -324,8 +333,13 @@ struct scsi_info {
|
||||||
#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \
|
#define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \
|
||||||
((VSCSI)->flags & BLOCK))
|
((VSCSI)->flags & BLOCK))
|
||||||
|
|
||||||
|
#define PREP_FOR_SUSPEND_FLAGS (PREP_FOR_SUSPEND_ENABLED | \
|
||||||
|
PREP_FOR_SUSPEND_PENDING | \
|
||||||
|
PREP_FOR_SUSPEND_ABORTED | \
|
||||||
|
PREP_FOR_SUSPEND_OVERWRITE)
|
||||||
|
|
||||||
/* flag bit that are not reset during disconnect */
|
/* flag bit that are not reset during disconnect */
|
||||||
#define PRESERVE_FLAG_FIELDS 0
|
#define PRESERVE_FLAG_FIELDS (PREP_FOR_SUSPEND_FLAGS)
|
||||||
|
|
||||||
#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf))
|
#define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf))
|
||||||
|
|
||||||
|
@ -335,6 +349,13 @@ struct scsi_info {
|
||||||
#ifndef H_GET_PARTNER_INFO
|
#ifndef H_GET_PARTNER_INFO
|
||||||
#define H_GET_PARTNER_INFO 0x0000000000000008LL
|
#define H_GET_PARTNER_INFO 0x0000000000000008LL
|
||||||
#endif
|
#endif
|
||||||
|
#ifndef H_ENABLE_PREPARE_FOR_SUSPEND
|
||||||
|
#define H_ENABLE_PREPARE_FOR_SUSPEND 0x000000000000001DLL
|
||||||
|
#endif
|
||||||
|
#ifndef H_READY_FOR_SUSPEND
|
||||||
|
#define H_READY_FOR_SUSPEND 0x000000000000001ELL
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#define h_copy_rdma(l, sa, sb, da, db) \
|
#define h_copy_rdma(l, sa, sb, da, db) \
|
||||||
plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
|
plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
|
||||||
|
|
|
@ -30,10 +30,13 @@ enum srp_trans_event {
|
||||||
UNUSED_FORMAT = 0,
|
UNUSED_FORMAT = 0,
|
||||||
PARTNER_FAILED = 1,
|
PARTNER_FAILED = 1,
|
||||||
PARTNER_DEREGISTER = 2,
|
PARTNER_DEREGISTER = 2,
|
||||||
MIGRATED = 6
|
MIGRATED = 6,
|
||||||
|
PREPARE_FOR_SUSPEND = 9,
|
||||||
|
RESUME_FROM_SUSP = 0xA
|
||||||
};
|
};
|
||||||
|
|
||||||
enum srp_status {
|
enum srp_status {
|
||||||
|
CRQ_ENTRY_OVERWRITTEN = 0x20,
|
||||||
HEADER_DESCRIPTOR = 0xF1,
|
HEADER_DESCRIPTOR = 0xF1,
|
||||||
PING = 0xF5,
|
PING = 0xF5,
|
||||||
PING_RESPONSE = 0xF6
|
PING_RESPONSE = 0xF6
|
||||||
|
|
|
@ -1874,36 +1874,13 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
|
||||||
struct abts_recv_from_24xx *abts, struct fc_port *sess)
|
struct abts_recv_from_24xx *abts, struct fc_port *sess)
|
||||||
{
|
{
|
||||||
struct qla_hw_data *ha = vha->hw;
|
struct qla_hw_data *ha = vha->hw;
|
||||||
struct se_session *se_sess = sess->se_sess;
|
|
||||||
struct qla_tgt_mgmt_cmd *mcmd;
|
struct qla_tgt_mgmt_cmd *mcmd;
|
||||||
struct qla_tgt_cmd *cmd;
|
|
||||||
struct se_cmd *se_cmd;
|
|
||||||
int rc;
|
int rc;
|
||||||
bool found_lun = false;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
|
|
||||||
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
|
|
||||||
if (se_cmd->tag == abts->exchange_addr_to_abort) {
|
|
||||||
found_lun = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
|
||||||
|
|
||||||
/* cmd not in LIO lists, look in qla list */
|
|
||||||
if (!found_lun) {
|
|
||||||
if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
|
if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
|
||||||
/* send TASK_ABORT response immediately */
|
/* send TASK_ABORT response immediately */
|
||||||
qlt_24xx_send_abts_resp(ha->base_qpair, abts,
|
qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
|
||||||
FCP_TMF_CMPL, false);
|
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
|
||||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
|
|
||||||
"unable to find cmd in driver or LIO for tag 0x%x\n",
|
|
||||||
abts->exchange_addr_to_abort);
|
|
||||||
return -ENOENT;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
|
||||||
|
@ -1919,14 +1896,17 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
|
||||||
}
|
}
|
||||||
memset(mcmd, 0, sizeof(*mcmd));
|
memset(mcmd, 0, sizeof(*mcmd));
|
||||||
|
|
||||||
cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
|
|
||||||
mcmd->sess = sess;
|
mcmd->sess = sess;
|
||||||
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
|
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
|
||||||
mcmd->reset_count = ha->base_qpair->chip_reset;
|
mcmd->reset_count = ha->base_qpair->chip_reset;
|
||||||
mcmd->tmr_func = QLA_TGT_ABTS;
|
mcmd->tmr_func = QLA_TGT_ABTS;
|
||||||
mcmd->qpair = ha->base_qpair;
|
mcmd->qpair = ha->base_qpair;
|
||||||
|
|
||||||
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func,
|
/*
|
||||||
|
* LUN is looked up by target-core internally based on the passed
|
||||||
|
* abts->exchange_addr_to_abort tag.
|
||||||
|
*/
|
||||||
|
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func,
|
||||||
abts->exchange_addr_to_abort);
|
abts->exchange_addr_to_abort);
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
|
||||||
|
|
|
@ -600,11 +600,13 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
|
||||||
struct fc_port *sess = mcmd->sess;
|
struct fc_port *sess = mcmd->sess;
|
||||||
struct se_cmd *se_cmd = &mcmd->se_cmd;
|
struct se_cmd *se_cmd = &mcmd->se_cmd;
|
||||||
int transl_tmr_func = 0;
|
int transl_tmr_func = 0;
|
||||||
|
int flags = TARGET_SCF_ACK_KREF;
|
||||||
|
|
||||||
switch (tmr_func) {
|
switch (tmr_func) {
|
||||||
case QLA_TGT_ABTS:
|
case QLA_TGT_ABTS:
|
||||||
pr_debug("%ld: ABTS received\n", sess->vha->host_no);
|
pr_debug("%ld: ABTS received\n", sess->vha->host_no);
|
||||||
transl_tmr_func = TMR_ABORT_TASK;
|
transl_tmr_func = TMR_ABORT_TASK;
|
||||||
|
flags |= TARGET_SCF_LOOKUP_LUN_FROM_TAG;
|
||||||
break;
|
break;
|
||||||
case QLA_TGT_2G_ABORT_TASK:
|
case QLA_TGT_2G_ABORT_TASK:
|
||||||
pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no);
|
pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no);
|
||||||
|
@ -637,7 +639,7 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
|
||||||
}
|
}
|
||||||
|
|
||||||
return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
|
return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
|
||||||
transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
|
transl_tmr_func, GFP_ATOMIC, tag, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
|
static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
|
||||||
|
|
|
@ -488,15 +488,13 @@ EXPORT_SYMBOL(iscsit_queue_rsp);
|
||||||
|
|
||||||
void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||||
{
|
{
|
||||||
bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
|
|
||||||
|
|
||||||
spin_lock_bh(&conn->cmd_lock);
|
spin_lock_bh(&conn->cmd_lock);
|
||||||
if (!list_empty(&cmd->i_conn_node) &&
|
if (!list_empty(&cmd->i_conn_node) &&
|
||||||
!(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
|
!(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
|
||||||
list_del_init(&cmd->i_conn_node);
|
list_del_init(&cmd->i_conn_node);
|
||||||
spin_unlock_bh(&conn->cmd_lock);
|
spin_unlock_bh(&conn->cmd_lock);
|
||||||
|
|
||||||
__iscsit_free_cmd(cmd, scsi_cmd, true);
|
__iscsit_free_cmd(cmd, true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(iscsit_aborted_task);
|
EXPORT_SYMBOL(iscsit_aborted_task);
|
||||||
|
|
||||||
|
@ -1251,12 +1249,8 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
||||||
* execution. These exceptions are processed in CmdSN order using
|
* execution. These exceptions are processed in CmdSN order using
|
||||||
* iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
|
* iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
|
||||||
*/
|
*/
|
||||||
if (cmd->sense_reason) {
|
if (cmd->sense_reason)
|
||||||
if (cmd->reject_reason)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* Call directly into transport_generic_new_cmd() to perform
|
* Call directly into transport_generic_new_cmd() to perform
|
||||||
* the backend memory allocation.
|
* the backend memory allocation.
|
||||||
|
|
|
@ -781,6 +781,7 @@ DEF_TPG_ATTRIB(default_erl);
|
||||||
DEF_TPG_ATTRIB(t10_pi);
|
DEF_TPG_ATTRIB(t10_pi);
|
||||||
DEF_TPG_ATTRIB(fabric_prot_type);
|
DEF_TPG_ATTRIB(fabric_prot_type);
|
||||||
DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
|
DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
|
||||||
|
DEF_TPG_ATTRIB(login_keys_workaround);
|
||||||
|
|
||||||
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
|
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
|
||||||
&iscsi_tpg_attrib_attr_authentication,
|
&iscsi_tpg_attrib_attr_authentication,
|
||||||
|
@ -796,6 +797,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
|
||||||
&iscsi_tpg_attrib_attr_t10_pi,
|
&iscsi_tpg_attrib_attr_t10_pi,
|
||||||
&iscsi_tpg_attrib_attr_fabric_prot_type,
|
&iscsi_tpg_attrib_attr_fabric_prot_type,
|
||||||
&iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
|
&iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
|
||||||
|
&iscsi_tpg_attrib_attr_login_keys_workaround,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -655,28 +655,6 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
|
||||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iscsi_target_do_cleanup(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct iscsi_conn *conn = container_of(work,
|
|
||||||
struct iscsi_conn, login_cleanup_work.work);
|
|
||||||
struct sock *sk = conn->sock->sk;
|
|
||||||
struct iscsi_login *login = conn->login;
|
|
||||||
struct iscsi_np *np = login->np;
|
|
||||||
struct iscsi_portal_group *tpg = conn->tpg;
|
|
||||||
struct iscsi_tpg_np *tpg_np = conn->tpg_np;
|
|
||||||
|
|
||||||
pr_debug("Entering iscsi_target_do_cleanup\n");
|
|
||||||
|
|
||||||
cancel_delayed_work_sync(&conn->login_work);
|
|
||||||
conn->orig_state_change(sk);
|
|
||||||
|
|
||||||
iscsi_target_restore_sock_callbacks(conn);
|
|
||||||
iscsi_target_login_drop(conn, login);
|
|
||||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
|
||||||
|
|
||||||
pr_debug("iscsi_target_do_cleanup done()\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
static void iscsi_target_sk_state_change(struct sock *sk)
|
static void iscsi_target_sk_state_change(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct iscsi_conn *conn;
|
struct iscsi_conn *conn;
|
||||||
|
@ -886,7 +864,8 @@ static int iscsi_target_handle_csg_zero(
|
||||||
SENDER_TARGET,
|
SENDER_TARGET,
|
||||||
login->rsp_buf,
|
login->rsp_buf,
|
||||||
&login->rsp_length,
|
&login->rsp_length,
|
||||||
conn->param_list);
|
conn->param_list,
|
||||||
|
conn->tpg->tpg_attrib.login_keys_workaround);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
@ -956,7 +935,8 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
|
||||||
SENDER_TARGET,
|
SENDER_TARGET,
|
||||||
login->rsp_buf,
|
login->rsp_buf,
|
||||||
&login->rsp_length,
|
&login->rsp_length,
|
||||||
conn->param_list);
|
conn->param_list,
|
||||||
|
conn->tpg->tpg_attrib.login_keys_workaround);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
|
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
|
||||||
ISCSI_LOGIN_STATUS_INIT_ERR);
|
ISCSI_LOGIN_STATUS_INIT_ERR);
|
||||||
|
@ -1082,7 +1062,6 @@ int iscsi_target_locate_portal(
|
||||||
int sessiontype = 0, ret = 0, tag_num, tag_size;
|
int sessiontype = 0, ret = 0, tag_num, tag_size;
|
||||||
|
|
||||||
INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx);
|
INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx);
|
||||||
INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup);
|
|
||||||
iscsi_target_set_sock_callbacks(conn);
|
iscsi_target_set_sock_callbacks(conn);
|
||||||
|
|
||||||
login->np = np;
|
login->np = np;
|
||||||
|
@ -1331,7 +1310,6 @@ int iscsi_target_start_negotiation(
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
cancel_delayed_work_sync(&conn->login_work);
|
cancel_delayed_work_sync(&conn->login_work);
|
||||||
cancel_delayed_work_sync(&conn->login_cleanup_work);
|
|
||||||
iscsi_target_restore_sock_callbacks(conn);
|
iscsi_target_restore_sock_callbacks(conn);
|
||||||
iscsi_remove_failed_auth_entry(conn);
|
iscsi_remove_failed_auth_entry(conn);
|
||||||
}
|
}
|
||||||
|
|
|
@ -765,7 +765,8 @@ static int iscsi_check_for_auth_key(char *key)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
|
static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param,
|
||||||
|
bool keys_workaround)
|
||||||
{
|
{
|
||||||
if (IS_TYPE_BOOL_AND(param)) {
|
if (IS_TYPE_BOOL_AND(param)) {
|
||||||
if (!strcmp(param->value, NO))
|
if (!strcmp(param->value, NO))
|
||||||
|
@ -773,19 +774,31 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
|
||||||
} else if (IS_TYPE_BOOL_OR(param)) {
|
} else if (IS_TYPE_BOOL_OR(param)) {
|
||||||
if (!strcmp(param->value, YES))
|
if (!strcmp(param->value, YES))
|
||||||
SET_PSTATE_REPLY_OPTIONAL(param);
|
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||||
|
|
||||||
|
if (keys_workaround) {
|
||||||
/*
|
/*
|
||||||
* Required for gPXE iSCSI boot client
|
* Required for gPXE iSCSI boot client
|
||||||
*/
|
*/
|
||||||
if (!strcmp(param->name, IMMEDIATEDATA))
|
if (!strcmp(param->name, IMMEDIATEDATA))
|
||||||
SET_PSTATE_REPLY_OPTIONAL(param);
|
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||||
|
}
|
||||||
} else if (IS_TYPE_NUMBER(param)) {
|
} else if (IS_TYPE_NUMBER(param)) {
|
||||||
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
|
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
|
||||||
SET_PSTATE_REPLY_OPTIONAL(param);
|
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||||
|
|
||||||
|
if (keys_workaround) {
|
||||||
|
/*
|
||||||
|
* Required for Mellanox Flexboot PXE boot ROM
|
||||||
|
*/
|
||||||
|
if (!strcmp(param->name, FIRSTBURSTLENGTH))
|
||||||
|
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Required for gPXE iSCSI boot client
|
* Required for gPXE iSCSI boot client
|
||||||
*/
|
*/
|
||||||
if (!strcmp(param->name, MAXCONNECTIONS))
|
if (!strcmp(param->name, MAXCONNECTIONS))
|
||||||
SET_PSTATE_REPLY_OPTIONAL(param);
|
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||||
|
}
|
||||||
} else if (IS_PHASE_DECLARATIVE(param))
|
} else if (IS_PHASE_DECLARATIVE(param))
|
||||||
SET_PSTATE_REPLY_OPTIONAL(param);
|
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||||
}
|
}
|
||||||
|
@ -1422,7 +1435,8 @@ int iscsi_encode_text_output(
|
||||||
u8 sender,
|
u8 sender,
|
||||||
char *textbuf,
|
char *textbuf,
|
||||||
u32 *length,
|
u32 *length,
|
||||||
struct iscsi_param_list *param_list)
|
struct iscsi_param_list *param_list,
|
||||||
|
bool keys_workaround)
|
||||||
{
|
{
|
||||||
char *output_buf = NULL;
|
char *output_buf = NULL;
|
||||||
struct iscsi_extra_response *er;
|
struct iscsi_extra_response *er;
|
||||||
|
@ -1458,7 +1472,8 @@ int iscsi_encode_text_output(
|
||||||
*length += 1;
|
*length += 1;
|
||||||
output_buf = textbuf + *length;
|
output_buf = textbuf + *length;
|
||||||
SET_PSTATE_PROPOSER(param);
|
SET_PSTATE_PROPOSER(param);
|
||||||
iscsi_check_proposer_for_optional_reply(param);
|
iscsi_check_proposer_for_optional_reply(param,
|
||||||
|
keys_workaround);
|
||||||
pr_debug("Sending key: %s=%s\n",
|
pr_debug("Sending key: %s=%s\n",
|
||||||
param->name, param->value);
|
param->name, param->value);
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@ extern int iscsi_extract_key_value(char *, char **, char **);
|
||||||
extern int iscsi_update_param_value(struct iscsi_param *, char *);
|
extern int iscsi_update_param_value(struct iscsi_param *, char *);
|
||||||
extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
|
extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
|
||||||
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
|
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
|
||||||
struct iscsi_param_list *);
|
struct iscsi_param_list *, bool);
|
||||||
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
|
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
|
||||||
extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
|
extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
|
||||||
struct iscsi_param_list *);
|
struct iscsi_param_list *);
|
||||||
|
|
|
@ -227,6 +227,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
|
||||||
a->t10_pi = TA_DEFAULT_T10_PI;
|
a->t10_pi = TA_DEFAULT_T10_PI;
|
||||||
a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
|
a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
|
||||||
a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
|
a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
|
||||||
|
a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND;
|
||||||
}
|
}
|
||||||
|
|
||||||
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
|
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
|
||||||
|
@ -311,11 +312,9 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
|
||||||
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
|
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&tpg->tpg_state_lock);
|
|
||||||
if (tpg->tpg_state == TPG_STATE_ACTIVE) {
|
if (tpg->tpg_state == TPG_STATE_ACTIVE) {
|
||||||
pr_err("iSCSI target portal group: %hu is already"
|
pr_err("iSCSI target portal group: %hu is already"
|
||||||
" active, ignoring request.\n", tpg->tpgt);
|
" active, ignoring request.\n", tpg->tpgt);
|
||||||
spin_unlock(&tpg->tpg_state_lock);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -324,10 +323,8 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
|
||||||
* is enforced (as per default), and remove the NONE option.
|
* is enforced (as per default), and remove the NONE option.
|
||||||
*/
|
*/
|
||||||
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
|
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
|
||||||
if (!param) {
|
if (!param)
|
||||||
spin_unlock(&tpg->tpg_state_lock);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
if (tpg->tpg_attrib.authentication) {
|
if (tpg->tpg_attrib.authentication) {
|
||||||
if (!strcmp(param->value, NONE)) {
|
if (!strcmp(param->value, NONE)) {
|
||||||
|
@ -341,6 +338,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock(&tpg->tpg_state_lock);
|
||||||
tpg->tpg_state = TPG_STATE_ACTIVE;
|
tpg->tpg_state = TPG_STATE_ACTIVE;
|
||||||
spin_unlock(&tpg->tpg_state_lock);
|
spin_unlock(&tpg->tpg_state_lock);
|
||||||
|
|
||||||
|
@ -353,7 +351,6 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
spin_unlock(&tpg->tpg_state_lock);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -899,3 +896,21 @@ int iscsit_ta_tpg_enabled_sendtargets(
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int iscsit_ta_login_keys_workaround(
|
||||||
|
struct iscsi_portal_group *tpg,
|
||||||
|
u32 flag)
|
||||||
|
{
|
||||||
|
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
|
||||||
|
|
||||||
|
if ((flag != 0) && (flag != 1)) {
|
||||||
|
pr_err("Illegal value %d\n", flag);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
a->login_keys_workaround = flag;
|
||||||
|
pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ",
|
||||||
|
tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
|
@ -48,5 +48,6 @@ extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
|
||||||
extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
|
extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
|
||||||
extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
|
extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
|
||||||
extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
|
extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
|
||||||
|
extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32);
|
||||||
|
|
||||||
#endif /* ISCSI_TARGET_TPG_H */
|
#endif /* ISCSI_TARGET_TPG_H */
|
||||||
|
|
|
@ -167,6 +167,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state)
|
||||||
|
|
||||||
cmd->se_cmd.map_tag = tag;
|
cmd->se_cmd.map_tag = tag;
|
||||||
cmd->conn = conn;
|
cmd->conn = conn;
|
||||||
|
cmd->data_direction = DMA_NONE;
|
||||||
INIT_LIST_HEAD(&cmd->i_conn_node);
|
INIT_LIST_HEAD(&cmd->i_conn_node);
|
||||||
INIT_LIST_HEAD(&cmd->datain_list);
|
INIT_LIST_HEAD(&cmd->datain_list);
|
||||||
INIT_LIST_HEAD(&cmd->cmd_r2t_list);
|
INIT_LIST_HEAD(&cmd->cmd_r2t_list);
|
||||||
|
@ -711,19 +712,16 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(iscsit_release_cmd);
|
EXPORT_SYMBOL(iscsit_release_cmd);
|
||||||
|
|
||||||
void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
|
void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
|
||||||
bool check_queues)
|
|
||||||
{
|
{
|
||||||
struct iscsi_conn *conn = cmd->conn;
|
struct iscsi_conn *conn = cmd->conn;
|
||||||
|
|
||||||
if (scsi_cmd) {
|
|
||||||
if (cmd->data_direction == DMA_TO_DEVICE) {
|
if (cmd->data_direction == DMA_TO_DEVICE) {
|
||||||
iscsit_stop_dataout_timer(cmd);
|
iscsit_stop_dataout_timer(cmd);
|
||||||
iscsit_free_r2ts_from_list(cmd);
|
iscsit_free_r2ts_from_list(cmd);
|
||||||
}
|
}
|
||||||
if (cmd->data_direction == DMA_FROM_DEVICE)
|
if (cmd->data_direction == DMA_FROM_DEVICE)
|
||||||
iscsit_free_all_datain_reqs(cmd);
|
iscsit_free_all_datain_reqs(cmd);
|
||||||
}
|
|
||||||
|
|
||||||
if (conn && check_queues) {
|
if (conn && check_queues) {
|
||||||
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
|
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
|
||||||
|
@ -736,50 +734,18 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
|
||||||
|
|
||||||
void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
|
void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
|
||||||
{
|
{
|
||||||
struct se_cmd *se_cmd = NULL;
|
struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
|
||||||
int rc;
|
int rc;
|
||||||
bool op_scsi = false;
|
|
||||||
/*
|
__iscsit_free_cmd(cmd, shutdown);
|
||||||
* Determine if a struct se_cmd is associated with
|
if (se_cmd) {
|
||||||
* this struct iscsi_cmd.
|
|
||||||
*/
|
|
||||||
switch (cmd->iscsi_opcode) {
|
|
||||||
case ISCSI_OP_SCSI_CMD:
|
|
||||||
op_scsi = true;
|
|
||||||
/*
|
|
||||||
* Fallthrough
|
|
||||||
*/
|
|
||||||
case ISCSI_OP_SCSI_TMFUNC:
|
|
||||||
se_cmd = &cmd->se_cmd;
|
|
||||||
__iscsit_free_cmd(cmd, op_scsi, shutdown);
|
|
||||||
rc = transport_generic_free_cmd(se_cmd, shutdown);
|
rc = transport_generic_free_cmd(se_cmd, shutdown);
|
||||||
if (!rc && shutdown && se_cmd->se_sess) {
|
if (!rc && shutdown && se_cmd->se_sess) {
|
||||||
__iscsit_free_cmd(cmd, op_scsi, shutdown);
|
__iscsit_free_cmd(cmd, shutdown);
|
||||||
target_put_sess_cmd(se_cmd);
|
target_put_sess_cmd(se_cmd);
|
||||||
}
|
}
|
||||||
break;
|
} else {
|
||||||
case ISCSI_OP_REJECT:
|
|
||||||
/*
|
|
||||||
* Handle special case for REJECT when iscsi_add_reject*() has
|
|
||||||
* overwritten the original iscsi_opcode assignment, and the
|
|
||||||
* associated cmd->se_cmd needs to be released.
|
|
||||||
*/
|
|
||||||
if (cmd->se_cmd.se_tfo != NULL) {
|
|
||||||
se_cmd = &cmd->se_cmd;
|
|
||||||
__iscsit_free_cmd(cmd, true, shutdown);
|
|
||||||
|
|
||||||
rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
|
|
||||||
if (!rc && shutdown && se_cmd->se_sess) {
|
|
||||||
__iscsit_free_cmd(cmd, true, shutdown);
|
|
||||||
target_put_sess_cmd(se_cmd);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
/* Fall-through */
|
|
||||||
default:
|
|
||||||
__iscsit_free_cmd(cmd, false, shutdown);
|
|
||||||
iscsit_release_cmd(cmd);
|
iscsit_release_cmd(cmd);
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(iscsit_free_cmd);
|
EXPORT_SYMBOL(iscsit_free_cmd);
|
||||||
|
|
|
@ -37,7 +37,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
|
||||||
extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
|
extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
|
||||||
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
|
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
|
||||||
extern void iscsit_release_cmd(struct iscsi_cmd *);
|
extern void iscsit_release_cmd(struct iscsi_cmd *);
|
||||||
extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
|
extern void __iscsit_free_cmd(struct iscsi_cmd *, bool);
|
||||||
extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
|
extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
|
||||||
extern int iscsit_check_session_usage_count(struct iscsi_session *);
|
extern int iscsit_check_session_usage_count(struct iscsi_session *);
|
||||||
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
|
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
|
||||||
|
|
|
@ -51,19 +51,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd);
|
||||||
*/
|
*/
|
||||||
static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
|
static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
|
||||||
{
|
{
|
||||||
/*
|
return transport_generic_free_cmd(se_cmd, 0);
|
||||||
* Do not release struct se_cmd's containing a valid TMR
|
|
||||||
* pointer. These will be released directly in tcm_loop_device_reset()
|
|
||||||
* with transport_generic_free_cmd().
|
|
||||||
*/
|
|
||||||
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
|
|
||||||
return 0;
|
|
||||||
/*
|
|
||||||
* Release the struct se_cmd, which will make a callback to release
|
|
||||||
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
|
|
||||||
*/
|
|
||||||
transport_generic_free_cmd(se_cmd, 0);
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
|
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
|
||||||
|
@ -218,10 +206,8 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
|
||||||
{
|
{
|
||||||
struct se_cmd *se_cmd = NULL;
|
struct se_cmd *se_cmd = NULL;
|
||||||
struct se_session *se_sess;
|
struct se_session *se_sess;
|
||||||
struct se_portal_group *se_tpg;
|
|
||||||
struct tcm_loop_nexus *tl_nexus;
|
struct tcm_loop_nexus *tl_nexus;
|
||||||
struct tcm_loop_cmd *tl_cmd = NULL;
|
struct tcm_loop_cmd *tl_cmd = NULL;
|
||||||
struct tcm_loop_tmr *tl_tmr = NULL;
|
|
||||||
int ret = TMR_FUNCTION_FAILED, rc;
|
int ret = TMR_FUNCTION_FAILED, rc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -240,55 +226,29 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
|
init_completion(&tl_cmd->tmr_done);
|
||||||
if (!tl_tmr) {
|
|
||||||
pr_err("Unable to allocate memory for tl_tmr\n");
|
|
||||||
goto release;
|
|
||||||
}
|
|
||||||
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
|
|
||||||
|
|
||||||
se_cmd = &tl_cmd->tl_se_cmd;
|
se_cmd = &tl_cmd->tl_se_cmd;
|
||||||
se_tpg = &tl_tpg->tl_se_tpg;
|
|
||||||
se_sess = tl_tpg->tl_nexus->se_sess;
|
se_sess = tl_tpg->tl_nexus->se_sess;
|
||||||
/*
|
|
||||||
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
|
|
||||||
*/
|
|
||||||
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
|
|
||||||
DMA_NONE, TCM_SIMPLE_TAG,
|
|
||||||
&tl_cmd->tl_sense_buf[0]);
|
|
||||||
|
|
||||||
rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
|
rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
|
||||||
|
NULL, tmr, GFP_KERNEL, task,
|
||||||
|
TARGET_SCF_ACK_KREF);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto release;
|
goto release;
|
||||||
|
wait_for_completion(&tl_cmd->tmr_done);
|
||||||
if (tmr == TMR_ABORT_TASK)
|
|
||||||
se_cmd->se_tmr_req->ref_task_tag = task;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Locate the underlying TCM struct se_lun
|
|
||||||
*/
|
|
||||||
if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
|
|
||||||
ret = TMR_LUN_DOES_NOT_EXIST;
|
|
||||||
goto release;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* Queue the TMR to TCM Core and sleep waiting for
|
|
||||||
* tcm_loop_queue_tm_rsp() to wake us up.
|
|
||||||
*/
|
|
||||||
transport_generic_handle_tmr(se_cmd);
|
|
||||||
wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
|
|
||||||
/*
|
|
||||||
* The TMR LUN_RESET has completed, check the response status and
|
|
||||||
* then release allocations.
|
|
||||||
*/
|
|
||||||
ret = se_cmd->se_tmr_req->response;
|
ret = se_cmd->se_tmr_req->response;
|
||||||
|
target_put_sess_cmd(se_cmd);
|
||||||
|
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
|
||||||
release:
|
release:
|
||||||
if (se_cmd)
|
if (se_cmd)
|
||||||
transport_generic_free_cmd(se_cmd, 1);
|
transport_generic_free_cmd(se_cmd, 0);
|
||||||
else
|
else
|
||||||
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
|
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
|
||||||
kfree(tl_tmr);
|
goto out;
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
|
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
|
||||||
|
@ -669,14 +629,11 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
|
||||||
|
|
||||||
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
|
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
|
||||||
{
|
{
|
||||||
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
|
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
|
||||||
struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
|
struct tcm_loop_cmd, tl_se_cmd);
|
||||||
/*
|
|
||||||
* The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
|
/* Wake up tcm_loop_issue_tmr(). */
|
||||||
* and wake up the wait_queue_head_t in tcm_loop_device_reset()
|
complete(&tl_cmd->tmr_done);
|
||||||
*/
|
|
||||||
atomic_set(&tl_tmr->tmr_complete, 1);
|
|
||||||
wake_up(&tl_tmr->tl_tmr_wait);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
|
static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
|
||||||
|
|
|
@ -16,15 +16,11 @@ struct tcm_loop_cmd {
|
||||||
/* The TCM I/O descriptor that is accessed via container_of() */
|
/* The TCM I/O descriptor that is accessed via container_of() */
|
||||||
struct se_cmd tl_se_cmd;
|
struct se_cmd tl_se_cmd;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
|
struct completion tmr_done;
|
||||||
/* Sense buffer that will be mapped into outgoing status */
|
/* Sense buffer that will be mapped into outgoing status */
|
||||||
unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
|
unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct tcm_loop_tmr {
|
|
||||||
atomic_t tmr_complete;
|
|
||||||
wait_queue_head_t tl_tmr_wait;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct tcm_loop_nexus {
|
struct tcm_loop_nexus {
|
||||||
/*
|
/*
|
||||||
* Pointer to TCM session for I_T Nexus
|
* Pointer to TCM session for I_T Nexus
|
||||||
|
|
|
@ -205,8 +205,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
||||||
/*
|
/*
|
||||||
* TARGET PORT GROUP
|
* TARGET PORT GROUP
|
||||||
*/
|
*/
|
||||||
buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
|
put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
|
||||||
buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
|
off += 2;
|
||||||
|
|
||||||
off++; /* Skip over Reserved */
|
off++; /* Skip over Reserved */
|
||||||
/*
|
/*
|
||||||
|
@ -235,8 +235,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
||||||
/*
|
/*
|
||||||
* Set RELATIVE TARGET PORT IDENTIFIER
|
* Set RELATIVE TARGET PORT IDENTIFIER
|
||||||
*/
|
*/
|
||||||
buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
|
put_unaligned_be16(lun->lun_rtpi, &buf[off]);
|
||||||
buf[off++] = (lun->lun_rtpi & 0xff);
|
off += 2;
|
||||||
rd_len += 4;
|
rd_len += 4;
|
||||||
}
|
}
|
||||||
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
|
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
|
||||||
|
|
|
@ -1085,6 +1085,24 @@ static ssize_t block_size_store(struct config_item *item,
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t alua_support_show(struct config_item *item, char *page)
|
||||||
|
{
|
||||||
|
struct se_dev_attrib *da = to_attrib(item);
|
||||||
|
u8 flags = da->da_dev->transport->transport_flags;
|
||||||
|
|
||||||
|
return snprintf(page, PAGE_SIZE, "%d\n",
|
||||||
|
flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t pgr_support_show(struct config_item *item, char *page)
|
||||||
|
{
|
||||||
|
struct se_dev_attrib *da = to_attrib(item);
|
||||||
|
u8 flags = da->da_dev->transport->transport_flags;
|
||||||
|
|
||||||
|
return snprintf(page, PAGE_SIZE, "%d\n",
|
||||||
|
flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
|
||||||
|
}
|
||||||
|
|
||||||
CONFIGFS_ATTR(, emulate_model_alias);
|
CONFIGFS_ATTR(, emulate_model_alias);
|
||||||
CONFIGFS_ATTR(, emulate_dpo);
|
CONFIGFS_ATTR(, emulate_dpo);
|
||||||
CONFIGFS_ATTR(, emulate_fua_write);
|
CONFIGFS_ATTR(, emulate_fua_write);
|
||||||
|
@ -1116,6 +1134,8 @@ CONFIGFS_ATTR(, unmap_granularity);
|
||||||
CONFIGFS_ATTR(, unmap_granularity_alignment);
|
CONFIGFS_ATTR(, unmap_granularity_alignment);
|
||||||
CONFIGFS_ATTR(, unmap_zeroes_data);
|
CONFIGFS_ATTR(, unmap_zeroes_data);
|
||||||
CONFIGFS_ATTR(, max_write_same_len);
|
CONFIGFS_ATTR(, max_write_same_len);
|
||||||
|
CONFIGFS_ATTR_RO(, alua_support);
|
||||||
|
CONFIGFS_ATTR_RO(, pgr_support);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dev_attrib attributes for devices using the target core SBC/SPC
|
* dev_attrib attributes for devices using the target core SBC/SPC
|
||||||
|
@ -1154,6 +1174,8 @@ struct configfs_attribute *sbc_attrib_attrs[] = {
|
||||||
&attr_unmap_granularity_alignment,
|
&attr_unmap_granularity_alignment,
|
||||||
&attr_unmap_zeroes_data,
|
&attr_unmap_zeroes_data,
|
||||||
&attr_max_write_same_len,
|
&attr_max_write_same_len,
|
||||||
|
&attr_alua_support,
|
||||||
|
&attr_pgr_support,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(sbc_attrib_attrs);
|
EXPORT_SYMBOL(sbc_attrib_attrs);
|
||||||
|
@ -1168,6 +1190,8 @@ struct configfs_attribute *passthrough_attrib_attrs[] = {
|
||||||
&attr_hw_block_size,
|
&attr_hw_block_size,
|
||||||
&attr_hw_max_sectors,
|
&attr_hw_max_sectors,
|
||||||
&attr_hw_queue_depth,
|
&attr_hw_queue_depth,
|
||||||
|
&attr_alua_support,
|
||||||
|
&attr_pgr_support,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(passthrough_attrib_attrs);
|
EXPORT_SYMBOL(passthrough_attrib_attrs);
|
||||||
|
@ -2236,7 +2260,11 @@ static void target_core_dev_release(struct config_item *item)
|
||||||
target_free_device(dev);
|
target_free_device(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct configfs_item_operations target_core_dev_item_ops = {
|
/*
|
||||||
|
* Used in target_core_fabric_configfs.c to verify valid se_device symlink
|
||||||
|
* within target_fabric_port_link()
|
||||||
|
*/
|
||||||
|
struct configfs_item_operations target_core_dev_item_ops = {
|
||||||
.release = target_core_dev_release,
|
.release = target_core_dev_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -49,8 +49,9 @@
|
||||||
#include "target_core_pr.h"
|
#include "target_core_pr.h"
|
||||||
#include "target_core_ua.h"
|
#include "target_core_ua.h"
|
||||||
|
|
||||||
DEFINE_MUTEX(g_device_mutex);
|
static DEFINE_MUTEX(device_mutex);
|
||||||
LIST_HEAD(g_device_list);
|
static LIST_HEAD(device_list);
|
||||||
|
static DEFINE_IDR(devices_idr);
|
||||||
|
|
||||||
static struct se_hba *lun0_hba;
|
static struct se_hba *lun0_hba;
|
||||||
/* not static, needed by tpg.c */
|
/* not static, needed by tpg.c */
|
||||||
|
@ -168,11 +169,20 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
deve = target_nacl_find_deve(nacl, unpacked_lun);
|
deve = target_nacl_find_deve(nacl, unpacked_lun);
|
||||||
if (deve) {
|
if (deve) {
|
||||||
se_cmd->se_lun = rcu_dereference(deve->se_lun);
|
|
||||||
se_lun = rcu_dereference(deve->se_lun);
|
se_lun = rcu_dereference(deve->se_lun);
|
||||||
|
|
||||||
|
if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
|
||||||
|
se_lun = NULL;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
se_cmd->se_lun = rcu_dereference(deve->se_lun);
|
||||||
se_cmd->pr_res_key = deve->pr_res_key;
|
se_cmd->pr_res_key = deve->pr_res_key;
|
||||||
se_cmd->orig_fe_lun = unpacked_lun;
|
se_cmd->orig_fe_lun = unpacked_lun;
|
||||||
|
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
|
||||||
|
se_cmd->lun_ref_active = true;
|
||||||
}
|
}
|
||||||
|
out_unlock:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (!se_lun) {
|
if (!se_lun) {
|
||||||
|
@ -182,9 +192,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
||||||
unpacked_lun);
|
unpacked_lun);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* XXX: Add percpu se_lun->lun_ref reference count for TMR
|
|
||||||
*/
|
|
||||||
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
|
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
|
||||||
se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
|
se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
|
||||||
|
|
||||||
|
@ -756,19 +763,16 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
|
||||||
if (!dev)
|
if (!dev)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
dev->dev_link_magic = SE_DEV_LINK_MAGIC;
|
|
||||||
dev->se_hba = hba;
|
dev->se_hba = hba;
|
||||||
dev->transport = hba->backend->ops;
|
dev->transport = hba->backend->ops;
|
||||||
dev->prot_length = sizeof(struct t10_pi_tuple);
|
dev->prot_length = sizeof(struct t10_pi_tuple);
|
||||||
dev->hba_index = hba->hba_index;
|
dev->hba_index = hba->hba_index;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dev->dev_list);
|
|
||||||
INIT_LIST_HEAD(&dev->dev_sep_list);
|
INIT_LIST_HEAD(&dev->dev_sep_list);
|
||||||
INIT_LIST_HEAD(&dev->dev_tmr_list);
|
INIT_LIST_HEAD(&dev->dev_tmr_list);
|
||||||
INIT_LIST_HEAD(&dev->delayed_cmd_list);
|
INIT_LIST_HEAD(&dev->delayed_cmd_list);
|
||||||
INIT_LIST_HEAD(&dev->state_list);
|
INIT_LIST_HEAD(&dev->state_list);
|
||||||
INIT_LIST_HEAD(&dev->qf_cmd_list);
|
INIT_LIST_HEAD(&dev->qf_cmd_list);
|
||||||
INIT_LIST_HEAD(&dev->g_dev_node);
|
|
||||||
spin_lock_init(&dev->execute_task_lock);
|
spin_lock_init(&dev->execute_task_lock);
|
||||||
spin_lock_init(&dev->delayed_cmd_lock);
|
spin_lock_init(&dev->delayed_cmd_lock);
|
||||||
spin_lock_init(&dev->dev_reservation_lock);
|
spin_lock_init(&dev->dev_reservation_lock);
|
||||||
|
@ -851,7 +855,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
|
||||||
attrib->unmap_granularity = q->limits.discard_granularity / block_size;
|
attrib->unmap_granularity = q->limits.discard_granularity / block_size;
|
||||||
attrib->unmap_granularity_alignment = q->limits.discard_alignment /
|
attrib->unmap_granularity_alignment = q->limits.discard_alignment /
|
||||||
block_size;
|
block_size;
|
||||||
attrib->unmap_zeroes_data = 0;
|
attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(target_configure_unmap_from_queue);
|
EXPORT_SYMBOL(target_configure_unmap_from_queue);
|
||||||
|
@ -875,10 +879,79 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(target_to_linux_sector);
|
EXPORT_SYMBOL(target_to_linux_sector);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* target_find_device - find a se_device by its dev_index
|
||||||
|
* @id: dev_index
|
||||||
|
* @do_depend: true if caller needs target_depend_item to be done
|
||||||
|
*
|
||||||
|
* If do_depend is true, the caller must do a target_undepend_item
|
||||||
|
* when finished using the device.
|
||||||
|
*
|
||||||
|
* If do_depend is false, the caller must be called in a configfs
|
||||||
|
* callback or during removal.
|
||||||
|
*/
|
||||||
|
struct se_device *target_find_device(int id, bool do_depend)
|
||||||
|
{
|
||||||
|
struct se_device *dev;
|
||||||
|
|
||||||
|
mutex_lock(&device_mutex);
|
||||||
|
dev = idr_find(&devices_idr, id);
|
||||||
|
if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item))
|
||||||
|
dev = NULL;
|
||||||
|
mutex_unlock(&device_mutex);
|
||||||
|
return dev;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(target_find_device);
|
||||||
|
|
||||||
|
struct devices_idr_iter {
|
||||||
|
int (*fn)(struct se_device *dev, void *data);
|
||||||
|
void *data;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int target_devices_idr_iter(int id, void *p, void *data)
|
||||||
|
{
|
||||||
|
struct devices_idr_iter *iter = data;
|
||||||
|
struct se_device *dev = p;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We add the device early to the idr, so it can be used
|
||||||
|
* by backend modules during configuration. We do not want
|
||||||
|
* to allow other callers to access partially setup devices,
|
||||||
|
* so we skip them here.
|
||||||
|
*/
|
||||||
|
if (!(dev->dev_flags & DF_CONFIGURED))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return iter->fn(dev, iter->data);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* target_for_each_device - iterate over configured devices
|
||||||
|
* @fn: iterator function
|
||||||
|
* @data: pointer to data that will be passed to fn
|
||||||
|
*
|
||||||
|
* fn must return 0 to continue looping over devices. non-zero will break
|
||||||
|
* from the loop and return that value to the caller.
|
||||||
|
*/
|
||||||
|
int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
struct devices_idr_iter iter;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
iter.fn = fn;
|
||||||
|
iter.data = data;
|
||||||
|
|
||||||
|
mutex_lock(&device_mutex);
|
||||||
|
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
|
||||||
|
mutex_unlock(&device_mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int target_configure_device(struct se_device *dev)
|
int target_configure_device(struct se_device *dev)
|
||||||
{
|
{
|
||||||
struct se_hba *hba = dev->se_hba;
|
struct se_hba *hba = dev->se_hba;
|
||||||
int ret;
|
int ret, id;
|
||||||
|
|
||||||
if (dev->dev_flags & DF_CONFIGURED) {
|
if (dev->dev_flags & DF_CONFIGURED) {
|
||||||
pr_err("se_dev->se_dev_ptr already set for storage"
|
pr_err("se_dev->se_dev_ptr already set for storage"
|
||||||
|
@ -886,9 +959,26 @@ int target_configure_device(struct se_device *dev)
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add early so modules like tcmu can use during its
|
||||||
|
* configuration.
|
||||||
|
*/
|
||||||
|
mutex_lock(&device_mutex);
|
||||||
|
/*
|
||||||
|
* Use cyclic to try and avoid collisions with devices
|
||||||
|
* that were recently removed.
|
||||||
|
*/
|
||||||
|
id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
|
||||||
|
mutex_unlock(&device_mutex);
|
||||||
|
if (id < 0) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
dev->dev_index = id;
|
||||||
|
|
||||||
ret = dev->transport->configure_device(dev);
|
ret = dev->transport->configure_device(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out_free_index;
|
||||||
/*
|
/*
|
||||||
* XXX: there is not much point to have two different values here..
|
* XXX: there is not much point to have two different values here..
|
||||||
*/
|
*/
|
||||||
|
@ -903,12 +993,11 @@ int target_configure_device(struct se_device *dev)
|
||||||
dev->dev_attrib.hw_block_size);
|
dev->dev_attrib.hw_block_size);
|
||||||
dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
|
dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
|
||||||
|
|
||||||
dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
|
|
||||||
dev->creation_time = get_jiffies_64();
|
dev->creation_time = get_jiffies_64();
|
||||||
|
|
||||||
ret = core_setup_alua(dev);
|
ret = core_setup_alua(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out_free_index;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Startup the struct se_device processing thread
|
* Startup the struct se_device processing thread
|
||||||
|
@ -946,16 +1035,16 @@ int target_configure_device(struct se_device *dev)
|
||||||
hba->dev_count++;
|
hba->dev_count++;
|
||||||
spin_unlock(&hba->device_lock);
|
spin_unlock(&hba->device_lock);
|
||||||
|
|
||||||
mutex_lock(&g_device_mutex);
|
|
||||||
list_add_tail(&dev->g_dev_node, &g_device_list);
|
|
||||||
mutex_unlock(&g_device_mutex);
|
|
||||||
|
|
||||||
dev->dev_flags |= DF_CONFIGURED;
|
dev->dev_flags |= DF_CONFIGURED;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_alua:
|
out_free_alua:
|
||||||
core_alua_free_lu_gp_mem(dev);
|
core_alua_free_lu_gp_mem(dev);
|
||||||
|
out_free_index:
|
||||||
|
mutex_lock(&device_mutex);
|
||||||
|
idr_remove(&devices_idr, dev->dev_index);
|
||||||
|
mutex_unlock(&device_mutex);
|
||||||
out:
|
out:
|
||||||
se_release_vpd_for_dev(dev);
|
se_release_vpd_for_dev(dev);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -970,9 +1059,11 @@ void target_free_device(struct se_device *dev)
|
||||||
if (dev->dev_flags & DF_CONFIGURED) {
|
if (dev->dev_flags & DF_CONFIGURED) {
|
||||||
destroy_workqueue(dev->tmr_wq);
|
destroy_workqueue(dev->tmr_wq);
|
||||||
|
|
||||||
mutex_lock(&g_device_mutex);
|
dev->transport->destroy_device(dev);
|
||||||
list_del(&dev->g_dev_node);
|
|
||||||
mutex_unlock(&g_device_mutex);
|
mutex_lock(&device_mutex);
|
||||||
|
idr_remove(&devices_idr, dev->dev_index);
|
||||||
|
mutex_unlock(&device_mutex);
|
||||||
|
|
||||||
spin_lock(&hba->device_lock);
|
spin_lock(&hba->device_lock);
|
||||||
hba->dev_count--;
|
hba->dev_count--;
|
||||||
|
@ -1087,19 +1178,19 @@ passthrough_parse_cdb(struct se_cmd *cmd,
|
||||||
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
|
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
|
||||||
if (cdb[0] == PERSISTENT_RESERVE_IN) {
|
if (cdb[0] == PERSISTENT_RESERVE_IN) {
|
||||||
cmd->execute_cmd = target_scsi3_emulate_pr_in;
|
cmd->execute_cmd = target_scsi3_emulate_pr_in;
|
||||||
size = (cdb[7] << 8) + cdb[8];
|
size = get_unaligned_be16(&cdb[7]);
|
||||||
return target_cmd_size_check(cmd, size);
|
return target_cmd_size_check(cmd, size);
|
||||||
}
|
}
|
||||||
if (cdb[0] == PERSISTENT_RESERVE_OUT) {
|
if (cdb[0] == PERSISTENT_RESERVE_OUT) {
|
||||||
cmd->execute_cmd = target_scsi3_emulate_pr_out;
|
cmd->execute_cmd = target_scsi3_emulate_pr_out;
|
||||||
size = (cdb[7] << 8) + cdb[8];
|
size = get_unaligned_be32(&cdb[5]);
|
||||||
return target_cmd_size_check(cmd, size);
|
return target_cmd_size_check(cmd, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
|
if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
|
||||||
cmd->execute_cmd = target_scsi2_reservation_release;
|
cmd->execute_cmd = target_scsi2_reservation_release;
|
||||||
if (cdb[0] == RELEASE_10)
|
if (cdb[0] == RELEASE_10)
|
||||||
size = (cdb[7] << 8) | cdb[8];
|
size = get_unaligned_be16(&cdb[7]);
|
||||||
else
|
else
|
||||||
size = cmd->data_length;
|
size = cmd->data_length;
|
||||||
return target_cmd_size_check(cmd, size);
|
return target_cmd_size_check(cmd, size);
|
||||||
|
@ -1107,7 +1198,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
|
||||||
if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
|
if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
|
||||||
cmd->execute_cmd = target_scsi2_reservation_reserve;
|
cmd->execute_cmd = target_scsi2_reservation_reserve;
|
||||||
if (cdb[0] == RESERVE_10)
|
if (cdb[0] == RESERVE_10)
|
||||||
size = (cdb[7] << 8) | cdb[8];
|
size = get_unaligned_be16(&cdb[7]);
|
||||||
else
|
else
|
||||||
size = cmd->data_length;
|
size = cmd->data_length;
|
||||||
return target_cmd_size_check(cmd, size);
|
return target_cmd_size_check(cmd, size);
|
||||||
|
@ -1126,7 +1217,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
|
||||||
case WRITE_16:
|
case WRITE_16:
|
||||||
case WRITE_VERIFY:
|
case WRITE_VERIFY:
|
||||||
case WRITE_VERIFY_12:
|
case WRITE_VERIFY_12:
|
||||||
case 0x8e: /* WRITE_VERIFY_16 */
|
case WRITE_VERIFY_16:
|
||||||
case COMPARE_AND_WRITE:
|
case COMPARE_AND_WRITE:
|
||||||
case XDWRITEREAD_10:
|
case XDWRITEREAD_10:
|
||||||
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
|
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
|
||||||
|
@ -1135,7 +1226,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
|
||||||
switch (get_unaligned_be16(&cdb[8])) {
|
switch (get_unaligned_be16(&cdb[8])) {
|
||||||
case READ_32:
|
case READ_32:
|
||||||
case WRITE_32:
|
case WRITE_32:
|
||||||
case 0x0c: /* WRITE_VERIFY_32 */
|
case WRITE_VERIFY_32:
|
||||||
case XDWRITEREAD_32:
|
case XDWRITEREAD_32:
|
||||||
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
|
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -65,6 +65,8 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
|
||||||
pr_debug("Setup generic %s\n", __stringify(_name)); \
|
pr_debug("Setup generic %s\n", __stringify(_name)); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct configfs_item_operations target_fabric_port_item_ops;
|
||||||
|
|
||||||
/* Start of tfc_tpg_mappedlun_cit */
|
/* Start of tfc_tpg_mappedlun_cit */
|
||||||
|
|
||||||
static int target_fabric_mappedlun_link(
|
static int target_fabric_mappedlun_link(
|
||||||
|
@ -72,19 +74,20 @@ static int target_fabric_mappedlun_link(
|
||||||
struct config_item *lun_ci)
|
struct config_item *lun_ci)
|
||||||
{
|
{
|
||||||
struct se_dev_entry *deve;
|
struct se_dev_entry *deve;
|
||||||
struct se_lun *lun = container_of(to_config_group(lun_ci),
|
struct se_lun *lun;
|
||||||
struct se_lun, lun_group);
|
|
||||||
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
|
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
|
||||||
struct se_lun_acl, se_lun_group);
|
struct se_lun_acl, se_lun_group);
|
||||||
struct se_portal_group *se_tpg;
|
struct se_portal_group *se_tpg;
|
||||||
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
|
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
|
||||||
bool lun_access_ro;
|
bool lun_access_ro;
|
||||||
|
|
||||||
if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
|
if (!lun_ci->ci_type ||
|
||||||
pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
|
lun_ci->ci_type->ct_item_ops != &target_fabric_port_item_ops) {
|
||||||
" %p to struct lun: %p\n", lun_ci, lun);
|
pr_err("Bad lun_ci, not a valid lun_ci pointer: %p\n", lun_ci);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that the source port exists
|
* Ensure that the source port exists
|
||||||
*/
|
*/
|
||||||
|
@ -620,6 +623,8 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern struct configfs_item_operations target_core_dev_item_ops;
|
||||||
|
|
||||||
static int target_fabric_port_link(
|
static int target_fabric_port_link(
|
||||||
struct config_item *lun_ci,
|
struct config_item *lun_ci,
|
||||||
struct config_item *se_dev_ci)
|
struct config_item *se_dev_ci)
|
||||||
|
@ -628,16 +633,16 @@ static int target_fabric_port_link(
|
||||||
struct se_lun *lun = container_of(to_config_group(lun_ci),
|
struct se_lun *lun = container_of(to_config_group(lun_ci),
|
||||||
struct se_lun, lun_group);
|
struct se_lun, lun_group);
|
||||||
struct se_portal_group *se_tpg;
|
struct se_portal_group *se_tpg;
|
||||||
struct se_device *dev =
|
struct se_device *dev;
|
||||||
container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
|
|
||||||
struct target_fabric_configfs *tf;
|
struct target_fabric_configfs *tf;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
|
if (!se_dev_ci->ci_type ||
|
||||||
pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
|
se_dev_ci->ci_type->ct_item_ops != &target_core_dev_item_ops) {
|
||||||
" %p to struct se_device: %p\n", se_dev_ci, dev);
|
pr_err("Bad se_dev_ci, not a valid se_dev_ci pointer: %p\n", se_dev_ci);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
|
||||||
|
|
||||||
if (!(dev->dev_flags & DF_CONFIGURED)) {
|
if (!(dev->dev_flags & DF_CONFIGURED)) {
|
||||||
pr_err("se_device not configured yet, cannot port link\n");
|
pr_err("se_device not configured yet, cannot port link\n");
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
#include <scsi/scsi_proto.h>
|
#include <scsi/scsi_proto.h>
|
||||||
|
|
||||||
|
@ -216,8 +217,7 @@ static int iscsi_get_pr_transport_id(
|
||||||
if (padding != 0)
|
if (padding != 0)
|
||||||
len += padding;
|
len += padding;
|
||||||
|
|
||||||
buf[2] = ((len >> 8) & 0xff);
|
put_unaligned_be16(len, &buf[2]);
|
||||||
buf[3] = (len & 0xff);
|
|
||||||
/*
|
/*
|
||||||
* Increment value for total payload + header length for
|
* Increment value for total payload + header length for
|
||||||
* full status descriptor
|
* full status descriptor
|
||||||
|
@ -306,7 +306,7 @@ static char *iscsi_parse_pr_out_transport_id(
|
||||||
*/
|
*/
|
||||||
if (out_tid_len) {
|
if (out_tid_len) {
|
||||||
/* The shift works thanks to integer promotion rules */
|
/* The shift works thanks to integer promotion rules */
|
||||||
add_len = (buf[2] << 8) | buf[3];
|
add_len = get_unaligned_be16(&buf[2]);
|
||||||
|
|
||||||
tid_len = strlen(&buf[4]);
|
tid_len = strlen(&buf[4]);
|
||||||
tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
|
tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
|
||||||
|
|
|
@ -236,6 +236,11 @@ static void fd_dev_call_rcu(struct rcu_head *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fd_free_device(struct se_device *dev)
|
static void fd_free_device(struct se_device *dev)
|
||||||
|
{
|
||||||
|
call_rcu(&dev->rcu_head, fd_dev_call_rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void fd_destroy_device(struct se_device *dev)
|
||||||
{
|
{
|
||||||
struct fd_dev *fd_dev = FD_DEV(dev);
|
struct fd_dev *fd_dev = FD_DEV(dev);
|
||||||
|
|
||||||
|
@ -243,7 +248,6 @@ static void fd_free_device(struct se_device *dev)
|
||||||
filp_close(fd_dev->fd_file, NULL);
|
filp_close(fd_dev->fd_file, NULL);
|
||||||
fd_dev->fd_file = NULL;
|
fd_dev->fd_file = NULL;
|
||||||
}
|
}
|
||||||
call_rcu(&dev->rcu_head, fd_dev_call_rcu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
|
static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
|
||||||
|
@ -826,6 +830,7 @@ static const struct target_backend_ops fileio_ops = {
|
||||||
.detach_hba = fd_detach_hba,
|
.detach_hba = fd_detach_hba,
|
||||||
.alloc_device = fd_alloc_device,
|
.alloc_device = fd_alloc_device,
|
||||||
.configure_device = fd_configure_device,
|
.configure_device = fd_configure_device,
|
||||||
|
.destroy_device = fd_destroy_device,
|
||||||
.free_device = fd_free_device,
|
.free_device = fd_free_device,
|
||||||
.parse_cdb = fd_parse_cdb,
|
.parse_cdb = fd_parse_cdb,
|
||||||
.set_configfs_dev_params = fd_set_configfs_dev_params,
|
.set_configfs_dev_params = fd_set_configfs_dev_params,
|
||||||
|
|
|
@ -86,6 +86,7 @@ static int iblock_configure_device(struct se_device *dev)
|
||||||
struct block_device *bd = NULL;
|
struct block_device *bd = NULL;
|
||||||
struct blk_integrity *bi;
|
struct blk_integrity *bi;
|
||||||
fmode_t mode;
|
fmode_t mode;
|
||||||
|
unsigned int max_write_zeroes_sectors;
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
|
if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
|
||||||
|
@ -129,6 +130,10 @@ static int iblock_configure_device(struct se_device *dev)
|
||||||
* Enable write same emulation for IBLOCK and use 0xFFFF as
|
* Enable write same emulation for IBLOCK and use 0xFFFF as
|
||||||
* the smaller WRITE_SAME(10) only has a two-byte block count.
|
* the smaller WRITE_SAME(10) only has a two-byte block count.
|
||||||
*/
|
*/
|
||||||
|
max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
|
||||||
|
if (max_write_zeroes_sectors)
|
||||||
|
dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
|
||||||
|
else
|
||||||
dev->dev_attrib.max_write_same_len = 0xFFFF;
|
dev->dev_attrib.max_write_same_len = 0xFFFF;
|
||||||
|
|
||||||
if (blk_queue_nonrot(q))
|
if (blk_queue_nonrot(q))
|
||||||
|
@ -184,6 +189,11 @@ static void iblock_dev_call_rcu(struct rcu_head *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iblock_free_device(struct se_device *dev)
|
static void iblock_free_device(struct se_device *dev)
|
||||||
|
{
|
||||||
|
call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void iblock_destroy_device(struct se_device *dev)
|
||||||
{
|
{
|
||||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||||
|
|
||||||
|
@ -191,8 +201,6 @@ static void iblock_free_device(struct se_device *dev)
|
||||||
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
|
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
|
||||||
if (ib_dev->ibd_bio_set != NULL)
|
if (ib_dev->ibd_bio_set != NULL)
|
||||||
bioset_free(ib_dev->ibd_bio_set);
|
bioset_free(ib_dev->ibd_bio_set);
|
||||||
|
|
||||||
call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long long iblock_emulate_read_cap_with_block_size(
|
static unsigned long long iblock_emulate_read_cap_with_block_size(
|
||||||
|
@ -415,28 +423,31 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
|
||||||
}
|
}
|
||||||
|
|
||||||
static sense_reason_t
|
static sense_reason_t
|
||||||
iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd)
|
iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
|
||||||
{
|
{
|
||||||
struct se_device *dev = cmd->se_dev;
|
struct se_device *dev = cmd->se_dev;
|
||||||
struct scatterlist *sg = &cmd->t_data_sg[0];
|
struct scatterlist *sg = &cmd->t_data_sg[0];
|
||||||
struct page *page = NULL;
|
unsigned char *buf, zero = 0x00, *p = &zero;
|
||||||
int ret;
|
int rc, ret;
|
||||||
|
|
||||||
if (sg->offset) {
|
buf = kmap(sg_page(sg)) + sg->offset;
|
||||||
page = alloc_page(GFP_KERNEL);
|
if (!buf)
|
||||||
if (!page)
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
return TCM_OUT_OF_RESOURCES;
|
/*
|
||||||
sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page),
|
* Fall back to block_execute_write_same() slow-path if
|
||||||
dev->dev_attrib.block_size);
|
* incoming WRITE_SAME payload does not contain zeros.
|
||||||
}
|
*/
|
||||||
|
rc = memcmp(buf, p, cmd->data_length);
|
||||||
|
kunmap(sg_page(sg));
|
||||||
|
|
||||||
ret = blkdev_issue_write_same(bdev,
|
if (rc)
|
||||||
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
|
|
||||||
|
ret = blkdev_issue_zeroout(bdev,
|
||||||
target_to_linux_sector(dev, cmd->t_task_lba),
|
target_to_linux_sector(dev, cmd->t_task_lba),
|
||||||
target_to_linux_sector(dev,
|
target_to_linux_sector(dev,
|
||||||
sbc_get_write_same_sectors(cmd)),
|
sbc_get_write_same_sectors(cmd)),
|
||||||
GFP_KERNEL, page ? page : sg_page(sg));
|
GFP_KERNEL, false);
|
||||||
if (page)
|
|
||||||
__free_page(page);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
|
|
||||||
|
@ -472,8 +483,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
|
||||||
return TCM_INVALID_CDB_FIELD;
|
return TCM_INVALID_CDB_FIELD;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bdev_write_same(bdev))
|
if (bdev_write_zeroes_sectors(bdev)) {
|
||||||
return iblock_execute_write_same_direct(bdev, cmd);
|
if (!iblock_execute_zero_out(bdev, cmd))
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
|
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
|
||||||
if (!ibr)
|
if (!ibr)
|
||||||
|
@ -848,6 +861,7 @@ static const struct target_backend_ops iblock_ops = {
|
||||||
.detach_hba = iblock_detach_hba,
|
.detach_hba = iblock_detach_hba,
|
||||||
.alloc_device = iblock_alloc_device,
|
.alloc_device = iblock_alloc_device,
|
||||||
.configure_device = iblock_configure_device,
|
.configure_device = iblock_configure_device,
|
||||||
|
.destroy_device = iblock_destroy_device,
|
||||||
.free_device = iblock_free_device,
|
.free_device = iblock_free_device,
|
||||||
.parse_cdb = iblock_parse_cdb,
|
.parse_cdb = iblock_parse_cdb,
|
||||||
.set_configfs_dev_params = iblock_set_configfs_dev_params,
|
.set_configfs_dev_params = iblock_set_configfs_dev_params,
|
||||||
|
|
|
@ -56,9 +56,6 @@ struct target_fabric_configfs {
|
||||||
extern struct t10_alua_lu_gp *default_lu_gp;
|
extern struct t10_alua_lu_gp *default_lu_gp;
|
||||||
|
|
||||||
/* target_core_device.c */
|
/* target_core_device.c */
|
||||||
extern struct mutex g_device_mutex;
|
|
||||||
extern struct list_head g_device_list;
|
|
||||||
|
|
||||||
int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev);
|
int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev);
|
||||||
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
|
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
|
||||||
void target_pr_kref_release(struct kref *);
|
void target_pr_kref_release(struct kref *);
|
||||||
|
@ -87,6 +84,8 @@ void core_dev_release_virtual_lun0(void);
|
||||||
struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
|
struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
|
||||||
int target_configure_device(struct se_device *dev);
|
int target_configure_device(struct se_device *dev);
|
||||||
void target_free_device(struct se_device *);
|
void target_free_device(struct se_device *);
|
||||||
|
int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
|
||||||
|
void *data);
|
||||||
|
|
||||||
/* target_core_configfs.c */
|
/* target_core_configfs.c */
|
||||||
void target_setup_backend_cits(struct target_backend *);
|
void target_setup_backend_cits(struct target_backend *);
|
||||||
|
|
|
@ -1562,10 +1562,7 @@ core_scsi3_decode_spec_i_port(
|
||||||
* first extract TransportID Parameter Data Length, and make sure
|
* first extract TransportID Parameter Data Length, and make sure
|
||||||
* the value matches up to the SCSI expected data transfer length.
|
* the value matches up to the SCSI expected data transfer length.
|
||||||
*/
|
*/
|
||||||
tpdl = (buf[24] & 0xff) << 24;
|
tpdl = get_unaligned_be32(&buf[24]);
|
||||||
tpdl |= (buf[25] & 0xff) << 16;
|
|
||||||
tpdl |= (buf[26] & 0xff) << 8;
|
|
||||||
tpdl |= buf[27] & 0xff;
|
|
||||||
|
|
||||||
if ((tpdl + 28) != cmd->data_length) {
|
if ((tpdl + 28) != cmd->data_length) {
|
||||||
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
|
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
|
||||||
|
@ -3221,12 +3218,8 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||||
goto out_put_pr_reg;
|
goto out_put_pr_reg;
|
||||||
}
|
}
|
||||||
|
|
||||||
rtpi = (buf[18] & 0xff) << 8;
|
rtpi = get_unaligned_be16(&buf[18]);
|
||||||
rtpi |= buf[19] & 0xff;
|
tid_len = get_unaligned_be32(&buf[20]);
|
||||||
tid_len = (buf[20] & 0xff) << 24;
|
|
||||||
tid_len |= (buf[21] & 0xff) << 16;
|
|
||||||
tid_len |= (buf[22] & 0xff) << 8;
|
|
||||||
tid_len |= buf[23] & 0xff;
|
|
||||||
transport_kunmap_data_sg(cmd);
|
transport_kunmap_data_sg(cmd);
|
||||||
buf = NULL;
|
buf = NULL;
|
||||||
|
|
||||||
|
@ -3552,16 +3545,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
|
|
||||||
{
|
|
||||||
unsigned int __v1, __v2;
|
|
||||||
|
|
||||||
__v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
|
|
||||||
__v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
|
|
||||||
|
|
||||||
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See spc4r17 section 6.14 Table 170
|
* See spc4r17 section 6.14 Table 170
|
||||||
*/
|
*/
|
||||||
|
@ -3602,7 +3585,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
|
||||||
if (cmd->data_length < 24) {
|
if (cmd->data_length < 24) {
|
||||||
pr_warn("SPC-PR: Received PR OUT parameter list"
|
pr_warn("SPC-PR: Received PR OUT parameter list"
|
||||||
" length too small: %u\n", cmd->data_length);
|
" length too small: %u\n", cmd->data_length);
|
||||||
return TCM_INVALID_PARAMETER_LIST;
|
return TCM_PARAMETER_LIST_LENGTH_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3619,8 +3602,8 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
|
||||||
/*
|
/*
|
||||||
* From PERSISTENT_RESERVE_OUT parameter list (payload)
|
* From PERSISTENT_RESERVE_OUT parameter list (payload)
|
||||||
*/
|
*/
|
||||||
res_key = core_scsi3_extract_reservation_key(&buf[0]);
|
res_key = get_unaligned_be64(&buf[0]);
|
||||||
sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
|
sa_res_key = get_unaligned_be64(&buf[8]);
|
||||||
/*
|
/*
|
||||||
* REGISTER_AND_MOVE uses a different SA parameter list containing
|
* REGISTER_AND_MOVE uses a different SA parameter list containing
|
||||||
* SCSI TransportIDs.
|
* SCSI TransportIDs.
|
||||||
|
@ -3646,7 +3629,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
|
||||||
/*
|
/*
|
||||||
* SPEC_I_PT=1 is only valid for Service action: REGISTER
|
* SPEC_I_PT=1 is only valid for Service action: REGISTER
|
||||||
*/
|
*/
|
||||||
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
|
if (spec_i_pt && (sa != PRO_REGISTER))
|
||||||
return TCM_INVALID_PARAMETER_LIST;
|
return TCM_INVALID_PARAMETER_LIST;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3658,11 +3641,11 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
|
||||||
* the sense key set to ILLEGAL REQUEST, and the additional sense
|
* the sense key set to ILLEGAL REQUEST, and the additional sense
|
||||||
* code set to PARAMETER LIST LENGTH ERROR.
|
* code set to PARAMETER LIST LENGTH ERROR.
|
||||||
*/
|
*/
|
||||||
if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
|
if (!spec_i_pt && (sa != PRO_REGISTER_AND_MOVE) &&
|
||||||
(cmd->data_length != 24)) {
|
(cmd->data_length != 24)) {
|
||||||
pr_warn("SPC-PR: Received PR OUT illegal parameter"
|
pr_warn("SPC-PR: Received PR OUT illegal parameter"
|
||||||
" list length: %u\n", cmd->data_length);
|
" list length: %u\n", cmd->data_length);
|
||||||
return TCM_INVALID_PARAMETER_LIST;
|
return TCM_PARAMETER_LIST_LENGTH_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3702,7 +3685,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
|
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
|
||||||
" action: 0x%02x\n", cdb[1] & 0x1f);
|
" action: 0x%02x\n", sa);
|
||||||
return TCM_INVALID_CDB_FIELD;
|
return TCM_INVALID_CDB_FIELD;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3734,10 +3717,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
|
|
||||||
buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
|
put_unaligned_be32(dev->t10_pr.pr_generation, buf);
|
||||||
buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
|
|
||||||
buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
|
|
||||||
buf[3] = (dev->t10_pr.pr_generation & 0xff);
|
|
||||||
|
|
||||||
spin_lock(&dev->t10_pr.registration_lock);
|
spin_lock(&dev->t10_pr.registration_lock);
|
||||||
list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
|
list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
|
||||||
|
@ -3749,23 +3729,13 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
|
||||||
if ((add_len + 8) > (cmd->data_length - 8))
|
if ((add_len + 8) > (cmd->data_length - 8))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
|
put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
|
off += 8;
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
|
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
|
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
|
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
|
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
|
|
||||||
buf[off++] = (pr_reg->pr_res_key & 0xff);
|
|
||||||
|
|
||||||
add_len += 8;
|
add_len += 8;
|
||||||
}
|
}
|
||||||
spin_unlock(&dev->t10_pr.registration_lock);
|
spin_unlock(&dev->t10_pr.registration_lock);
|
||||||
|
|
||||||
buf[4] = ((add_len >> 24) & 0xff);
|
put_unaligned_be32(add_len, &buf[4]);
|
||||||
buf[5] = ((add_len >> 16) & 0xff);
|
|
||||||
buf[6] = ((add_len >> 8) & 0xff);
|
|
||||||
buf[7] = (add_len & 0xff);
|
|
||||||
|
|
||||||
transport_kunmap_data_sg(cmd);
|
transport_kunmap_data_sg(cmd);
|
||||||
|
|
||||||
|
@ -3796,10 +3766,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
|
|
||||||
buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
|
put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]);
|
||||||
buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
|
|
||||||
buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
|
|
||||||
buf[3] = (dev->t10_pr.pr_generation & 0xff);
|
|
||||||
|
|
||||||
spin_lock(&dev->dev_reservation_lock);
|
spin_lock(&dev->dev_reservation_lock);
|
||||||
pr_reg = dev->dev_pr_res_holder;
|
pr_reg = dev->dev_pr_res_holder;
|
||||||
|
@ -3807,10 +3774,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
|
||||||
/*
|
/*
|
||||||
* Set the hardcoded Additional Length
|
* Set the hardcoded Additional Length
|
||||||
*/
|
*/
|
||||||
buf[4] = ((add_len >> 24) & 0xff);
|
put_unaligned_be32(add_len, &buf[4]);
|
||||||
buf[5] = ((add_len >> 16) & 0xff);
|
|
||||||
buf[6] = ((add_len >> 8) & 0xff);
|
|
||||||
buf[7] = (add_len & 0xff);
|
|
||||||
|
|
||||||
if (cmd->data_length < 22)
|
if (cmd->data_length < 22)
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -3837,14 +3801,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
|
||||||
else
|
else
|
||||||
pr_res_key = pr_reg->pr_res_key;
|
pr_res_key = pr_reg->pr_res_key;
|
||||||
|
|
||||||
buf[8] = ((pr_res_key >> 56) & 0xff);
|
put_unaligned_be64(pr_res_key, &buf[8]);
|
||||||
buf[9] = ((pr_res_key >> 48) & 0xff);
|
|
||||||
buf[10] = ((pr_res_key >> 40) & 0xff);
|
|
||||||
buf[11] = ((pr_res_key >> 32) & 0xff);
|
|
||||||
buf[12] = ((pr_res_key >> 24) & 0xff);
|
|
||||||
buf[13] = ((pr_res_key >> 16) & 0xff);
|
|
||||||
buf[14] = ((pr_res_key >> 8) & 0xff);
|
|
||||||
buf[15] = (pr_res_key & 0xff);
|
|
||||||
/*
|
/*
|
||||||
* Set the SCOPE and TYPE
|
* Set the SCOPE and TYPE
|
||||||
*/
|
*/
|
||||||
|
@ -3882,8 +3839,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
|
|
||||||
buf[0] = ((add_len >> 8) & 0xff);
|
put_unaligned_be16(add_len, &buf[0]);
|
||||||
buf[1] = (add_len & 0xff);
|
|
||||||
buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
|
buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
|
||||||
buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
|
buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
|
||||||
buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
|
buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
|
||||||
|
@ -3947,10 +3903,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
|
|
||||||
buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
|
put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]);
|
||||||
buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
|
|
||||||
buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
|
|
||||||
buf[3] = (dev->t10_pr.pr_generation & 0xff);
|
|
||||||
|
|
||||||
spin_lock(&dev->dev_reservation_lock);
|
spin_lock(&dev->dev_reservation_lock);
|
||||||
if (dev->dev_pr_res_holder) {
|
if (dev->dev_pr_res_holder) {
|
||||||
|
@ -3992,14 +3945,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||||
/*
|
/*
|
||||||
* Set RESERVATION KEY
|
* Set RESERVATION KEY
|
||||||
*/
|
*/
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
|
put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
|
off += 8;
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
|
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
|
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
|
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
|
|
||||||
buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
|
|
||||||
buf[off++] = (pr_reg->pr_res_key & 0xff);
|
|
||||||
off += 4; /* Skip Over Reserved area */
|
off += 4; /* Skip Over Reserved area */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4041,8 +3988,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||||
if (!pr_reg->pr_reg_all_tg_pt) {
|
if (!pr_reg->pr_reg_all_tg_pt) {
|
||||||
u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi;
|
u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi;
|
||||||
|
|
||||||
buf[off++] = ((sep_rtpi >> 8) & 0xff);
|
put_unaligned_be16(sep_rtpi, &buf[off]);
|
||||||
buf[off++] = (sep_rtpi & 0xff);
|
off += 2;
|
||||||
} else
|
} else
|
||||||
off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */
|
off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */
|
||||||
|
|
||||||
|
@ -4062,10 +4009,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||||
/*
|
/*
|
||||||
* Set the ADDITIONAL DESCRIPTOR LENGTH
|
* Set the ADDITIONAL DESCRIPTOR LENGTH
|
||||||
*/
|
*/
|
||||||
buf[off++] = ((desc_len >> 24) & 0xff);
|
put_unaligned_be32(desc_len, &buf[off]);
|
||||||
buf[off++] = ((desc_len >> 16) & 0xff);
|
|
||||||
buf[off++] = ((desc_len >> 8) & 0xff);
|
|
||||||
buf[off++] = (desc_len & 0xff);
|
|
||||||
/*
|
/*
|
||||||
* Size of full desctipor header minus TransportID
|
* Size of full desctipor header minus TransportID
|
||||||
* containing $FABRIC_MOD specific) initiator device/port
|
* containing $FABRIC_MOD specific) initiator device/port
|
||||||
|
@ -4082,10 +4026,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||||
/*
|
/*
|
||||||
* Set ADDITIONAL_LENGTH
|
* Set ADDITIONAL_LENGTH
|
||||||
*/
|
*/
|
||||||
buf[4] = ((add_len >> 24) & 0xff);
|
put_unaligned_be32(add_len, &buf[4]);
|
||||||
buf[5] = ((add_len >> 16) & 0xff);
|
|
||||||
buf[6] = ((add_len >> 8) & 0xff);
|
|
||||||
buf[7] = (add_len & 0xff);
|
|
||||||
|
|
||||||
transport_kunmap_data_sg(cmd);
|
transport_kunmap_data_sg(cmd);
|
||||||
|
|
||||||
|
|
|
@ -168,7 +168,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
|
||||||
/*
|
/*
|
||||||
* If MODE_SENSE still returns zero, set the default value to 1024.
|
* If MODE_SENSE still returns zero, set the default value to 1024.
|
||||||
*/
|
*/
|
||||||
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
|
sdev->sector_size = get_unaligned_be24(&buf[9]);
|
||||||
out_free:
|
out_free:
|
||||||
if (!sdev->sector_size)
|
if (!sdev->sector_size)
|
||||||
sdev->sector_size = 1024;
|
sdev->sector_size = 1024;
|
||||||
|
@ -209,8 +209,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
|
||||||
cdb[0] = INQUIRY;
|
cdb[0] = INQUIRY;
|
||||||
cdb[1] = 0x01; /* Query VPD */
|
cdb[1] = 0x01; /* Query VPD */
|
||||||
cdb[2] = 0x80; /* Unit Serial Number */
|
cdb[2] = 0x80; /* Unit Serial Number */
|
||||||
cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
|
put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]);
|
||||||
cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
|
|
||||||
|
|
||||||
ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
|
ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
|
||||||
INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
|
INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
|
||||||
|
@ -245,8 +244,7 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
|
||||||
cdb[0] = INQUIRY;
|
cdb[0] = INQUIRY;
|
||||||
cdb[1] = 0x01; /* Query VPD */
|
cdb[1] = 0x01; /* Query VPD */
|
||||||
cdb[2] = 0x83; /* Device Identifier */
|
cdb[2] = 0x83; /* Device Identifier */
|
||||||
cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
|
put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]);
|
||||||
cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
|
|
||||||
|
|
||||||
ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
|
ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
|
||||||
INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
|
INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
|
||||||
|
@ -254,7 +252,7 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
page_len = (buf[2] << 8) | buf[3];
|
page_len = get_unaligned_be16(&buf[2]);
|
||||||
while (page_len > 0) {
|
while (page_len > 0) {
|
||||||
/* Grab a pointer to the Identification descriptor */
|
/* Grab a pointer to the Identification descriptor */
|
||||||
page_83 = &buf[off];
|
page_83 = &buf[off];
|
||||||
|
@ -384,7 +382,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
|
||||||
spin_unlock_irq(sh->host_lock);
|
spin_unlock_irq(sh->host_lock);
|
||||||
/*
|
/*
|
||||||
* Claim exclusive struct block_device access to struct scsi_device
|
* Claim exclusive struct block_device access to struct scsi_device
|
||||||
* for TYPE_DISK using supplied udev_path
|
* for TYPE_DISK and TYPE_ZBC using supplied udev_path
|
||||||
*/
|
*/
|
||||||
bd = blkdev_get_by_path(dev->udev_path,
|
bd = blkdev_get_by_path(dev->udev_path,
|
||||||
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
|
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
|
||||||
|
@ -402,8 +400,9 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%llu\n",
|
pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n",
|
||||||
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
|
phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC",
|
||||||
|
sh->host_no, sd->channel, sd->id, sd->lun);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -522,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
|
||||||
*/
|
*/
|
||||||
switch (sd->type) {
|
switch (sd->type) {
|
||||||
case TYPE_DISK:
|
case TYPE_DISK:
|
||||||
|
case TYPE_ZBC:
|
||||||
ret = pscsi_create_type_disk(dev, sd);
|
ret = pscsi_create_type_disk(dev, sd);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -565,6 +565,11 @@ static void pscsi_dev_call_rcu(struct rcu_head *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pscsi_free_device(struct se_device *dev)
|
static void pscsi_free_device(struct se_device *dev)
|
||||||
|
{
|
||||||
|
call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void pscsi_destroy_device(struct se_device *dev)
|
||||||
{
|
{
|
||||||
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
|
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
|
||||||
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
|
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
|
||||||
|
@ -573,9 +578,11 @@ static void pscsi_free_device(struct se_device *dev)
|
||||||
if (sd) {
|
if (sd) {
|
||||||
/*
|
/*
|
||||||
* Release exclusive pSCSI internal struct block_device claim for
|
* Release exclusive pSCSI internal struct block_device claim for
|
||||||
* struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
|
* struct scsi_device with TYPE_DISK or TYPE_ZBC
|
||||||
|
* from pscsi_create_type_disk()
|
||||||
*/
|
*/
|
||||||
if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
|
if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
|
||||||
|
pdv->pdv_bd) {
|
||||||
blkdev_put(pdv->pdv_bd,
|
blkdev_put(pdv->pdv_bd,
|
||||||
FMODE_WRITE|FMODE_READ|FMODE_EXCL);
|
FMODE_WRITE|FMODE_READ|FMODE_EXCL);
|
||||||
pdv->pdv_bd = NULL;
|
pdv->pdv_bd = NULL;
|
||||||
|
@ -594,15 +601,13 @@ static void pscsi_free_device(struct se_device *dev)
|
||||||
|
|
||||||
pdv->pdv_sd = NULL;
|
pdv->pdv_sd = NULL;
|
||||||
}
|
}
|
||||||
call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
|
static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
|
||||||
unsigned char *sense_buffer)
|
unsigned char *req_sense)
|
||||||
{
|
{
|
||||||
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
|
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
|
||||||
struct scsi_device *sd = pdv->pdv_sd;
|
struct scsi_device *sd = pdv->pdv_sd;
|
||||||
int result;
|
|
||||||
struct pscsi_plugin_task *pt = cmd->priv;
|
struct pscsi_plugin_task *pt = cmd->priv;
|
||||||
unsigned char *cdb;
|
unsigned char *cdb;
|
||||||
/*
|
/*
|
||||||
|
@ -613,7 +618,6 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cdb = &pt->pscsi_cdb[0];
|
cdb = &pt->pscsi_cdb[0];
|
||||||
result = pt->pscsi_result;
|
|
||||||
/*
|
/*
|
||||||
* Hack to make sure that Write-Protect modepage is set if R/O mode is
|
* Hack to make sure that Write-Protect modepage is set if R/O mode is
|
||||||
* forced.
|
* forced.
|
||||||
|
@ -622,7 +626,7 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
|
||||||
goto after_mode_sense;
|
goto after_mode_sense;
|
||||||
|
|
||||||
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
|
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
|
||||||
(status_byte(result) << 1) == SAM_STAT_GOOD) {
|
scsi_status == SAM_STAT_GOOD) {
|
||||||
bool read_only = target_lun_is_rdonly(cmd);
|
bool read_only = target_lun_is_rdonly(cmd);
|
||||||
|
|
||||||
if (read_only) {
|
if (read_only) {
|
||||||
|
@ -657,40 +661,36 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
|
||||||
* storage engine.
|
* storage engine.
|
||||||
*/
|
*/
|
||||||
if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
|
if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
|
||||||
(status_byte(result) << 1) == SAM_STAT_GOOD) {
|
scsi_status == SAM_STAT_GOOD) {
|
||||||
unsigned char *buf;
|
unsigned char *buf;
|
||||||
u16 bdl;
|
u16 bdl;
|
||||||
u32 blocksize;
|
u32 blocksize;
|
||||||
|
|
||||||
buf = sg_virt(&sg[0]);
|
buf = sg_virt(&cmd->t_data_sg[0]);
|
||||||
if (!buf) {
|
if (!buf) {
|
||||||
pr_err("Unable to get buf for scatterlist\n");
|
pr_err("Unable to get buf for scatterlist\n");
|
||||||
goto after_mode_select;
|
goto after_mode_select;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cdb[0] == MODE_SELECT)
|
if (cdb[0] == MODE_SELECT)
|
||||||
bdl = (buf[3]);
|
bdl = buf[3];
|
||||||
else
|
else
|
||||||
bdl = (buf[6] << 8) | (buf[7]);
|
bdl = get_unaligned_be16(&buf[6]);
|
||||||
|
|
||||||
if (!bdl)
|
if (!bdl)
|
||||||
goto after_mode_select;
|
goto after_mode_select;
|
||||||
|
|
||||||
if (cdb[0] == MODE_SELECT)
|
if (cdb[0] == MODE_SELECT)
|
||||||
blocksize = (buf[9] << 16) | (buf[10] << 8) |
|
blocksize = get_unaligned_be24(&buf[9]);
|
||||||
(buf[11]);
|
|
||||||
else
|
else
|
||||||
blocksize = (buf[13] << 16) | (buf[14] << 8) |
|
blocksize = get_unaligned_be24(&buf[13]);
|
||||||
(buf[15]);
|
|
||||||
|
|
||||||
sd->sector_size = blocksize;
|
sd->sector_size = blocksize;
|
||||||
}
|
}
|
||||||
after_mode_select:
|
after_mode_select:
|
||||||
|
|
||||||
if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) {
|
if (scsi_status == SAM_STAT_CHECK_CONDITION)
|
||||||
memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER);
|
transport_copy_sense_to_cmd(cmd, req_sense);
|
||||||
cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -1002,7 +1002,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
|
||||||
req->end_io_data = cmd;
|
req->end_io_data = cmd;
|
||||||
scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb);
|
scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb);
|
||||||
scsi_req(req)->cmd = &pt->pscsi_cdb[0];
|
scsi_req(req)->cmd = &pt->pscsi_cdb[0];
|
||||||
if (pdv->pdv_sd->type == TYPE_DISK)
|
if (pdv->pdv_sd->type == TYPE_DISK ||
|
||||||
|
pdv->pdv_sd->type == TYPE_ZBC)
|
||||||
req->timeout = PS_TIMEOUT_DISK;
|
req->timeout = PS_TIMEOUT_DISK;
|
||||||
else
|
else
|
||||||
req->timeout = PS_TIMEOUT_OTHER;
|
req->timeout = PS_TIMEOUT_OTHER;
|
||||||
|
@ -1047,30 +1048,29 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
|
||||||
{
|
{
|
||||||
struct se_cmd *cmd = req->end_io_data;
|
struct se_cmd *cmd = req->end_io_data;
|
||||||
struct pscsi_plugin_task *pt = cmd->priv;
|
struct pscsi_plugin_task *pt = cmd->priv;
|
||||||
|
int result = scsi_req(req)->result;
|
||||||
|
u8 scsi_status = status_byte(result) << 1;
|
||||||
|
|
||||||
pt->pscsi_result = scsi_req(req)->result;
|
if (scsi_status) {
|
||||||
pt->pscsi_resid = scsi_req(req)->resid_len;
|
|
||||||
|
|
||||||
cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
|
|
||||||
if (cmd->scsi_status) {
|
|
||||||
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
|
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
|
||||||
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
|
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
|
||||||
pt->pscsi_result);
|
result);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (host_byte(pt->pscsi_result)) {
|
pscsi_complete_cmd(cmd, scsi_status, scsi_req(req)->sense);
|
||||||
|
|
||||||
|
switch (host_byte(result)) {
|
||||||
case DID_OK:
|
case DID_OK:
|
||||||
target_complete_cmd(cmd, cmd->scsi_status);
|
target_complete_cmd(cmd, scsi_status);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
|
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
|
||||||
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
|
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
|
||||||
pt->pscsi_result);
|
result);
|
||||||
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
|
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(pt->pscsi_sense, scsi_req(req)->sense, TRANSPORT_SENSE_BUFFER);
|
|
||||||
__blk_put_request(req->q, req);
|
__blk_put_request(req->q, req);
|
||||||
kfree(pt);
|
kfree(pt);
|
||||||
}
|
}
|
||||||
|
@ -1086,8 +1086,8 @@ static const struct target_backend_ops pscsi_ops = {
|
||||||
.pmode_enable_hba = pscsi_pmode_enable_hba,
|
.pmode_enable_hba = pscsi_pmode_enable_hba,
|
||||||
.alloc_device = pscsi_alloc_device,
|
.alloc_device = pscsi_alloc_device,
|
||||||
.configure_device = pscsi_configure_device,
|
.configure_device = pscsi_configure_device,
|
||||||
|
.destroy_device = pscsi_destroy_device,
|
||||||
.free_device = pscsi_free_device,
|
.free_device = pscsi_free_device,
|
||||||
.transport_complete = pscsi_transport_complete,
|
|
||||||
.parse_cdb = pscsi_parse_cdb,
|
.parse_cdb = pscsi_parse_cdb,
|
||||||
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
|
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
|
||||||
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
|
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
|
||||||
|
|
|
@ -23,10 +23,6 @@ struct scsi_device;
|
||||||
struct Scsi_Host;
|
struct Scsi_Host;
|
||||||
|
|
||||||
struct pscsi_plugin_task {
|
struct pscsi_plugin_task {
|
||||||
unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER];
|
|
||||||
int pscsi_direction;
|
|
||||||
int pscsi_result;
|
|
||||||
u32 pscsi_resid;
|
|
||||||
unsigned char pscsi_cdb[0];
|
unsigned char pscsi_cdb[0];
|
||||||
} ____cacheline_aligned;
|
} ____cacheline_aligned;
|
||||||
|
|
||||||
|
|
|
@ -338,11 +338,15 @@ static void rd_dev_call_rcu(struct rcu_head *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rd_free_device(struct se_device *dev)
|
static void rd_free_device(struct se_device *dev)
|
||||||
|
{
|
||||||
|
call_rcu(&dev->rcu_head, rd_dev_call_rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rd_destroy_device(struct se_device *dev)
|
||||||
{
|
{
|
||||||
struct rd_dev *rd_dev = RD_DEV(dev);
|
struct rd_dev *rd_dev = RD_DEV(dev);
|
||||||
|
|
||||||
rd_release_device_space(rd_dev);
|
rd_release_device_space(rd_dev);
|
||||||
call_rcu(&dev->rcu_head, rd_dev_call_rcu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
|
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
|
||||||
|
@ -554,7 +558,7 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
|
||||||
struct rd_dev *rd_dev = RD_DEV(dev);
|
struct rd_dev *rd_dev = RD_DEV(dev);
|
||||||
char *orig, *ptr, *opts;
|
char *orig, *ptr, *opts;
|
||||||
substring_t args[MAX_OPT_ARGS];
|
substring_t args[MAX_OPT_ARGS];
|
||||||
int ret = 0, arg, token;
|
int arg, token;
|
||||||
|
|
||||||
opts = kstrdup(page, GFP_KERNEL);
|
opts = kstrdup(page, GFP_KERNEL);
|
||||||
if (!opts)
|
if (!opts)
|
||||||
|
@ -589,7 +593,7 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(orig);
|
kfree(orig);
|
||||||
return (!ret) ? count : ret;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
|
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
|
||||||
|
@ -651,6 +655,7 @@ static const struct target_backend_ops rd_mcp_ops = {
|
||||||
.detach_hba = rd_detach_hba,
|
.detach_hba = rd_detach_hba,
|
||||||
.alloc_device = rd_alloc_device,
|
.alloc_device = rd_alloc_device,
|
||||||
.configure_device = rd_configure_device,
|
.configure_device = rd_configure_device,
|
||||||
|
.destroy_device = rd_destroy_device,
|
||||||
.free_device = rd_free_device,
|
.free_device = rd_free_device,
|
||||||
.parse_cdb = rd_parse_cdb,
|
.parse_cdb = rd_parse_cdb,
|
||||||
.set_configfs_dev_params = rd_set_configfs_dev_params,
|
.set_configfs_dev_params = rd_set_configfs_dev_params,
|
||||||
|
|
|
@ -71,14 +71,8 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
|
||||||
else
|
else
|
||||||
blocks = (u32)blocks_long;
|
blocks = (u32)blocks_long;
|
||||||
|
|
||||||
buf[0] = (blocks >> 24) & 0xff;
|
put_unaligned_be32(blocks, &buf[0]);
|
||||||
buf[1] = (blocks >> 16) & 0xff;
|
put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]);
|
||||||
buf[2] = (blocks >> 8) & 0xff;
|
|
||||||
buf[3] = blocks & 0xff;
|
|
||||||
buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
|
|
||||||
buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
|
|
||||||
buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
|
|
||||||
buf[7] = dev->dev_attrib.block_size & 0xff;
|
|
||||||
|
|
||||||
rbuf = transport_kmap_data_sg(cmd);
|
rbuf = transport_kmap_data_sg(cmd);
|
||||||
if (rbuf) {
|
if (rbuf) {
|
||||||
|
@ -102,18 +96,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
|
||||||
unsigned long long blocks = dev->transport->get_blocks(dev);
|
unsigned long long blocks = dev->transport->get_blocks(dev);
|
||||||
|
|
||||||
memset(buf, 0, sizeof(buf));
|
memset(buf, 0, sizeof(buf));
|
||||||
buf[0] = (blocks >> 56) & 0xff;
|
put_unaligned_be64(blocks, &buf[0]);
|
||||||
buf[1] = (blocks >> 48) & 0xff;
|
put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]);
|
||||||
buf[2] = (blocks >> 40) & 0xff;
|
|
||||||
buf[3] = (blocks >> 32) & 0xff;
|
|
||||||
buf[4] = (blocks >> 24) & 0xff;
|
|
||||||
buf[5] = (blocks >> 16) & 0xff;
|
|
||||||
buf[6] = (blocks >> 8) & 0xff;
|
|
||||||
buf[7] = blocks & 0xff;
|
|
||||||
buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
|
|
||||||
buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
|
|
||||||
buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
|
|
||||||
buf[11] = dev->dev_attrib.block_size & 0xff;
|
|
||||||
/*
|
/*
|
||||||
* Set P_TYPE and PROT_EN bits for DIF support
|
* Set P_TYPE and PROT_EN bits for DIF support
|
||||||
*/
|
*/
|
||||||
|
@ -134,8 +118,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
|
||||||
|
|
||||||
if (dev->transport->get_alignment_offset_lbas) {
|
if (dev->transport->get_alignment_offset_lbas) {
|
||||||
u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
|
u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
|
||||||
buf[14] = (lalba >> 8) & 0x3f;
|
|
||||||
buf[15] = lalba & 0xff;
|
put_unaligned_be16(lalba, &buf[14]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -262,18 +246,17 @@ static inline u32 transport_get_sectors_6(unsigned char *cdb)
|
||||||
|
|
||||||
static inline u32 transport_get_sectors_10(unsigned char *cdb)
|
static inline u32 transport_get_sectors_10(unsigned char *cdb)
|
||||||
{
|
{
|
||||||
return (u32)(cdb[7] << 8) + cdb[8];
|
return get_unaligned_be16(&cdb[7]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 transport_get_sectors_12(unsigned char *cdb)
|
static inline u32 transport_get_sectors_12(unsigned char *cdb)
|
||||||
{
|
{
|
||||||
return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
|
return get_unaligned_be32(&cdb[6]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 transport_get_sectors_16(unsigned char *cdb)
|
static inline u32 transport_get_sectors_16(unsigned char *cdb)
|
||||||
{
|
{
|
||||||
return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
|
return get_unaligned_be32(&cdb[10]);
|
||||||
(cdb[12] << 8) + cdb[13];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -281,29 +264,23 @@ static inline u32 transport_get_sectors_16(unsigned char *cdb)
|
||||||
*/
|
*/
|
||||||
static inline u32 transport_get_sectors_32(unsigned char *cdb)
|
static inline u32 transport_get_sectors_32(unsigned char *cdb)
|
||||||
{
|
{
|
||||||
return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
|
return get_unaligned_be32(&cdb[28]);
|
||||||
(cdb[30] << 8) + cdb[31];
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 transport_lba_21(unsigned char *cdb)
|
static inline u32 transport_lba_21(unsigned char *cdb)
|
||||||
{
|
{
|
||||||
return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
|
return get_unaligned_be24(&cdb[1]) & 0x1fffff;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 transport_lba_32(unsigned char *cdb)
|
static inline u32 transport_lba_32(unsigned char *cdb)
|
||||||
{
|
{
|
||||||
return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
|
return get_unaligned_be32(&cdb[2]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long long transport_lba_64(unsigned char *cdb)
|
static inline unsigned long long transport_lba_64(unsigned char *cdb)
|
||||||
{
|
{
|
||||||
unsigned int __v1, __v2;
|
return get_unaligned_be64(&cdb[2]);
|
||||||
|
|
||||||
__v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
|
|
||||||
__v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
|
|
||||||
|
|
||||||
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -311,12 +288,7 @@ static inline unsigned long long transport_lba_64(unsigned char *cdb)
|
||||||
*/
|
*/
|
||||||
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
|
static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
|
||||||
{
|
{
|
||||||
unsigned int __v1, __v2;
|
return get_unaligned_be64(&cdb[12]);
|
||||||
|
|
||||||
__v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
|
|
||||||
__v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
|
|
||||||
|
|
||||||
return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static sense_reason_t
|
static sense_reason_t
|
||||||
|
@ -1005,6 +977,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case COMPARE_AND_WRITE:
|
case COMPARE_AND_WRITE:
|
||||||
|
if (!dev->dev_attrib.emulate_caw) {
|
||||||
|
pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject"
|
||||||
|
" COMPARE_AND_WRITE\n", dev->se_hba->backend->ops->name,
|
||||||
|
dev->dev_group.cg_item.ci_name, dev->t10_wwn.unit_serial);
|
||||||
|
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||||
|
}
|
||||||
sectors = cdb[13];
|
sectors = cdb[13];
|
||||||
/*
|
/*
|
||||||
* Currently enforce COMPARE_AND_WRITE for a single sector
|
* Currently enforce COMPARE_AND_WRITE for a single sector
|
||||||
|
@ -1045,8 +1023,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
||||||
cmd->t_task_cdb[1] & 0x1f);
|
cmd->t_task_cdb[1] & 0x1f);
|
||||||
return TCM_INVALID_CDB_FIELD;
|
return TCM_INVALID_CDB_FIELD;
|
||||||
}
|
}
|
||||||
size = (cdb[10] << 24) | (cdb[11] << 16) |
|
size = get_unaligned_be32(&cdb[10]);
|
||||||
(cdb[12] << 8) | cdb[13];
|
|
||||||
break;
|
break;
|
||||||
case SYNCHRONIZE_CACHE:
|
case SYNCHRONIZE_CACHE:
|
||||||
case SYNCHRONIZE_CACHE_16:
|
case SYNCHRONIZE_CACHE_16:
|
||||||
|
|
|
@ -287,8 +287,8 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
||||||
/* Skip over Obsolete field in RTPI payload
|
/* Skip over Obsolete field in RTPI payload
|
||||||
* in Table 472 */
|
* in Table 472 */
|
||||||
off += 2;
|
off += 2;
|
||||||
buf[off++] = ((lun->lun_rtpi >> 8) & 0xff);
|
put_unaligned_be16(lun->lun_rtpi, &buf[off]);
|
||||||
buf[off++] = (lun->lun_rtpi & 0xff);
|
off += 2;
|
||||||
len += 8; /* Header size + Designation descriptor */
|
len += 8; /* Header size + Designation descriptor */
|
||||||
/*
|
/*
|
||||||
* Target port group identifier, see spc4r17
|
* Target port group identifier, see spc4r17
|
||||||
|
@ -316,8 +316,8 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
||||||
off++; /* Skip over Reserved */
|
off++; /* Skip over Reserved */
|
||||||
buf[off++] = 4; /* DESIGNATOR LENGTH */
|
buf[off++] = 4; /* DESIGNATOR LENGTH */
|
||||||
off += 2; /* Skip over Reserved Field */
|
off += 2; /* Skip over Reserved Field */
|
||||||
buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
|
put_unaligned_be16(tg_pt_gp_id, &buf[off]);
|
||||||
buf[off++] = (tg_pt_gp_id & 0xff);
|
off += 2;
|
||||||
len += 8; /* Header size + Designation descriptor */
|
len += 8; /* Header size + Designation descriptor */
|
||||||
/*
|
/*
|
||||||
* Logical Unit Group identifier, see spc4r17
|
* Logical Unit Group identifier, see spc4r17
|
||||||
|
@ -343,8 +343,8 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
||||||
off++; /* Skip over Reserved */
|
off++; /* Skip over Reserved */
|
||||||
buf[off++] = 4; /* DESIGNATOR LENGTH */
|
buf[off++] = 4; /* DESIGNATOR LENGTH */
|
||||||
off += 2; /* Skip over Reserved Field */
|
off += 2; /* Skip over Reserved Field */
|
||||||
buf[off++] = ((lu_gp_id >> 8) & 0xff);
|
put_unaligned_be16(lu_gp_id, &buf[off]);
|
||||||
buf[off++] = (lu_gp_id & 0xff);
|
off += 2;
|
||||||
len += 8; /* Header size + Designation descriptor */
|
len += 8; /* Header size + Designation descriptor */
|
||||||
/*
|
/*
|
||||||
* SCSI name string designator, see spc4r17
|
* SCSI name string designator, see spc4r17
|
||||||
|
@ -431,8 +431,7 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
|
||||||
/* Header size + Designation descriptor */
|
/* Header size + Designation descriptor */
|
||||||
len += (scsi_target_len + 4);
|
len += (scsi_target_len + 4);
|
||||||
}
|
}
|
||||||
buf[2] = ((len >> 8) & 0xff);
|
put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */
|
||||||
buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(spc_emulate_evpd_83);
|
EXPORT_SYMBOL(spc_emulate_evpd_83);
|
||||||
|
@ -1288,7 +1287,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||||
cmd->execute_cmd = spc_emulate_modeselect;
|
cmd->execute_cmd = spc_emulate_modeselect;
|
||||||
break;
|
break;
|
||||||
case MODE_SELECT_10:
|
case MODE_SELECT_10:
|
||||||
*size = (cdb[7] << 8) + cdb[8];
|
*size = get_unaligned_be16(&cdb[7]);
|
||||||
cmd->execute_cmd = spc_emulate_modeselect;
|
cmd->execute_cmd = spc_emulate_modeselect;
|
||||||
break;
|
break;
|
||||||
case MODE_SENSE:
|
case MODE_SENSE:
|
||||||
|
@ -1296,25 +1295,25 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||||
cmd->execute_cmd = spc_emulate_modesense;
|
cmd->execute_cmd = spc_emulate_modesense;
|
||||||
break;
|
break;
|
||||||
case MODE_SENSE_10:
|
case MODE_SENSE_10:
|
||||||
*size = (cdb[7] << 8) + cdb[8];
|
*size = get_unaligned_be16(&cdb[7]);
|
||||||
cmd->execute_cmd = spc_emulate_modesense;
|
cmd->execute_cmd = spc_emulate_modesense;
|
||||||
break;
|
break;
|
||||||
case LOG_SELECT:
|
case LOG_SELECT:
|
||||||
case LOG_SENSE:
|
case LOG_SENSE:
|
||||||
*size = (cdb[7] << 8) + cdb[8];
|
*size = get_unaligned_be16(&cdb[7]);
|
||||||
break;
|
break;
|
||||||
case PERSISTENT_RESERVE_IN:
|
case PERSISTENT_RESERVE_IN:
|
||||||
*size = (cdb[7] << 8) + cdb[8];
|
*size = get_unaligned_be16(&cdb[7]);
|
||||||
cmd->execute_cmd = target_scsi3_emulate_pr_in;
|
cmd->execute_cmd = target_scsi3_emulate_pr_in;
|
||||||
break;
|
break;
|
||||||
case PERSISTENT_RESERVE_OUT:
|
case PERSISTENT_RESERVE_OUT:
|
||||||
*size = (cdb[7] << 8) + cdb[8];
|
*size = get_unaligned_be32(&cdb[5]);
|
||||||
cmd->execute_cmd = target_scsi3_emulate_pr_out;
|
cmd->execute_cmd = target_scsi3_emulate_pr_out;
|
||||||
break;
|
break;
|
||||||
case RELEASE:
|
case RELEASE:
|
||||||
case RELEASE_10:
|
case RELEASE_10:
|
||||||
if (cdb[0] == RELEASE_10)
|
if (cdb[0] == RELEASE_10)
|
||||||
*size = (cdb[7] << 8) | cdb[8];
|
*size = get_unaligned_be16(&cdb[7]);
|
||||||
else
|
else
|
||||||
*size = cmd->data_length;
|
*size = cmd->data_length;
|
||||||
|
|
||||||
|
@ -1327,7 +1326,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||||
* Assume the passthrough or $FABRIC_MOD will tell us about it.
|
* Assume the passthrough or $FABRIC_MOD will tell us about it.
|
||||||
*/
|
*/
|
||||||
if (cdb[0] == RESERVE_10)
|
if (cdb[0] == RESERVE_10)
|
||||||
*size = (cdb[7] << 8) | cdb[8];
|
*size = get_unaligned_be16(&cdb[7]);
|
||||||
else
|
else
|
||||||
*size = cmd->data_length;
|
*size = cmd->data_length;
|
||||||
|
|
||||||
|
@ -1338,7 +1337,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||||
cmd->execute_cmd = spc_emulate_request_sense;
|
cmd->execute_cmd = spc_emulate_request_sense;
|
||||||
break;
|
break;
|
||||||
case INQUIRY:
|
case INQUIRY:
|
||||||
*size = (cdb[3] << 8) + cdb[4];
|
*size = get_unaligned_be16(&cdb[3]);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do implicit HEAD_OF_QUEUE processing for INQUIRY.
|
* Do implicit HEAD_OF_QUEUE processing for INQUIRY.
|
||||||
|
@ -1349,7 +1348,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||||
break;
|
break;
|
||||||
case SECURITY_PROTOCOL_IN:
|
case SECURITY_PROTOCOL_IN:
|
||||||
case SECURITY_PROTOCOL_OUT:
|
case SECURITY_PROTOCOL_OUT:
|
||||||
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
|
*size = get_unaligned_be32(&cdb[6]);
|
||||||
break;
|
break;
|
||||||
case EXTENDED_COPY:
|
case EXTENDED_COPY:
|
||||||
*size = get_unaligned_be32(&cdb[10]);
|
*size = get_unaligned_be32(&cdb[10]);
|
||||||
|
@ -1361,19 +1360,18 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||||
break;
|
break;
|
||||||
case READ_ATTRIBUTE:
|
case READ_ATTRIBUTE:
|
||||||
case WRITE_ATTRIBUTE:
|
case WRITE_ATTRIBUTE:
|
||||||
*size = (cdb[10] << 24) | (cdb[11] << 16) |
|
*size = get_unaligned_be32(&cdb[10]);
|
||||||
(cdb[12] << 8) | cdb[13];
|
|
||||||
break;
|
break;
|
||||||
case RECEIVE_DIAGNOSTIC:
|
case RECEIVE_DIAGNOSTIC:
|
||||||
case SEND_DIAGNOSTIC:
|
case SEND_DIAGNOSTIC:
|
||||||
*size = (cdb[3] << 8) | cdb[4];
|
*size = get_unaligned_be16(&cdb[3]);
|
||||||
break;
|
break;
|
||||||
case WRITE_BUFFER:
|
case WRITE_BUFFER:
|
||||||
*size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
|
*size = get_unaligned_be24(&cdb[6]);
|
||||||
break;
|
break;
|
||||||
case REPORT_LUNS:
|
case REPORT_LUNS:
|
||||||
cmd->execute_cmd = spc_emulate_report_luns;
|
cmd->execute_cmd = spc_emulate_report_luns;
|
||||||
*size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
|
*size = get_unaligned_be32(&cdb[6]);
|
||||||
/*
|
/*
|
||||||
* Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
|
* Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
|
||||||
* See spc4r17 section 5.3
|
* See spc4r17 section 5.3
|
||||||
|
|
|
@ -355,20 +355,10 @@ static void core_tmr_drain_state_list(
|
||||||
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
|
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
|
||||||
list_del_init(&cmd->state_list);
|
list_del_init(&cmd->state_list);
|
||||||
|
|
||||||
pr_debug("LUN_RESET: %s cmd: %p"
|
target_show_cmd("LUN_RESET: ", cmd);
|
||||||
" ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
|
pr_debug("LUN_RESET: ITT[0x%08llx] - %s pr_res_key: 0x%016Lx\n",
|
||||||
"cdb: 0x%02x\n",
|
cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
|
||||||
(preempt_and_abort_list) ? "Preempt" : "", cmd,
|
cmd->pr_res_key);
|
||||||
cmd->tag, 0,
|
|
||||||
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
|
|
||||||
cmd->t_task_cdb[0]);
|
|
||||||
pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx"
|
|
||||||
" -- CMD_T_ACTIVE: %d"
|
|
||||||
" CMD_T_STOP: %d CMD_T_SENT: %d\n",
|
|
||||||
cmd->tag, cmd->pr_res_key,
|
|
||||||
(cmd->transport_state & CMD_T_ACTIVE) != 0,
|
|
||||||
(cmd->transport_state & CMD_T_STOP) != 0,
|
|
||||||
(cmd->transport_state & CMD_T_SENT) != 0);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the command may be queued onto a workqueue cancel it now.
|
* If the command may be queued onto a workqueue cancel it now.
|
||||||
|
|
|
@ -576,7 +576,6 @@ struct se_lun *core_tpg_alloc_lun(
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
lun->unpacked_lun = unpacked_lun;
|
lun->unpacked_lun = unpacked_lun;
|
||||||
lun->lun_link_magic = SE_LUN_LINK_MAGIC;
|
|
||||||
atomic_set(&lun->lun_acl_count, 0);
|
atomic_set(&lun->lun_acl_count, 0);
|
||||||
init_completion(&lun->lun_ref_comp);
|
init_completion(&lun->lun_ref_comp);
|
||||||
init_completion(&lun->lun_shutdown_comp);
|
init_completion(&lun->lun_shutdown_comp);
|
||||||
|
|
|
@ -704,23 +704,43 @@ static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
|
||||||
return cmd->sense_buffer;
|
return cmd->sense_buffer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
|
||||||
|
{
|
||||||
|
unsigned char *cmd_sense_buf;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||||
|
cmd_sense_buf = transport_get_sense_buffer(cmd);
|
||||||
|
if (!cmd_sense_buf) {
|
||||||
|
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
|
||||||
|
memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
|
||||||
|
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(transport_copy_sense_to_cmd);
|
||||||
|
|
||||||
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
|
void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
|
||||||
{
|
{
|
||||||
struct se_device *dev = cmd->se_dev;
|
struct se_device *dev = cmd->se_dev;
|
||||||
int success = scsi_status == GOOD;
|
int success;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
cmd->scsi_status = scsi_status;
|
cmd->scsi_status = scsi_status;
|
||||||
|
|
||||||
|
|
||||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||||
|
switch (cmd->scsi_status) {
|
||||||
if (dev && dev->transport->transport_complete) {
|
case SAM_STAT_CHECK_CONDITION:
|
||||||
dev->transport->transport_complete(cmd,
|
|
||||||
cmd->t_data_sg,
|
|
||||||
transport_get_sense_buffer(cmd));
|
|
||||||
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
|
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
|
||||||
success = 1;
|
success = 1;
|
||||||
|
else
|
||||||
|
success = 0;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
success = 1;
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -730,6 +750,15 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
|
||||||
if (cmd->transport_state & CMD_T_ABORTED ||
|
if (cmd->transport_state & CMD_T_ABORTED ||
|
||||||
cmd->transport_state & CMD_T_STOP) {
|
cmd->transport_state & CMD_T_STOP) {
|
||||||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||||
|
/*
|
||||||
|
* If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
|
||||||
|
* release se_device->caw_sem obtained by sbc_compare_and_write()
|
||||||
|
* since target_complete_ok_work() or target_complete_failure_work()
|
||||||
|
* won't be called to invoke the normal CAW completion callbacks.
|
||||||
|
*/
|
||||||
|
if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
|
||||||
|
up(&dev->caw_sem);
|
||||||
|
}
|
||||||
complete_all(&cmd->t_transport_stop_comp);
|
complete_all(&cmd->t_transport_stop_comp);
|
||||||
return;
|
return;
|
||||||
} else if (!success) {
|
} else if (!success) {
|
||||||
|
@ -1239,6 +1268,7 @@ void transport_init_se_cmd(
|
||||||
init_completion(&cmd->t_transport_stop_comp);
|
init_completion(&cmd->t_transport_stop_comp);
|
||||||
init_completion(&cmd->cmd_wait_comp);
|
init_completion(&cmd->cmd_wait_comp);
|
||||||
spin_lock_init(&cmd->t_state_lock);
|
spin_lock_init(&cmd->t_state_lock);
|
||||||
|
INIT_WORK(&cmd->work, NULL);
|
||||||
kref_init(&cmd->cmd_kref);
|
kref_init(&cmd->cmd_kref);
|
||||||
|
|
||||||
cmd->se_tfo = tfo;
|
cmd->se_tfo = tfo;
|
||||||
|
@ -1590,9 +1620,33 @@ static void target_complete_tmr_failure(struct work_struct *work)
|
||||||
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
|
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
|
||||||
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
|
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
|
||||||
|
|
||||||
|
transport_lun_remove_cmd(se_cmd);
|
||||||
transport_cmd_check_stop_to_fabric(se_cmd);
|
transport_cmd_check_stop_to_fabric(se_cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
|
||||||
|
u64 *unpacked_lun)
|
||||||
|
{
|
||||||
|
struct se_cmd *se_cmd;
|
||||||
|
unsigned long flags;
|
||||||
|
bool ret = false;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
|
||||||
|
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
|
||||||
|
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (se_cmd->tag == tag) {
|
||||||
|
*unpacked_lun = se_cmd->orig_fe_lun;
|
||||||
|
ret = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
|
* target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
|
||||||
* for TMR CDBs
|
* for TMR CDBs
|
||||||
|
@ -1640,19 +1694,31 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
|
||||||
core_tmr_release_req(se_cmd->se_tmr_req);
|
core_tmr_release_req(se_cmd->se_tmr_req);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* If this is ABORT_TASK with no explicit fabric provided LUN,
|
||||||
|
* go ahead and search active session tags for a match to figure
|
||||||
|
* out unpacked_lun for the original se_cmd.
|
||||||
|
*/
|
||||||
|
if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
|
||||||
|
if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
|
||||||
|
goto failure;
|
||||||
|
}
|
||||||
|
|
||||||
ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
|
ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
|
||||||
if (ret) {
|
if (ret)
|
||||||
|
goto failure;
|
||||||
|
|
||||||
|
transport_generic_handle_tmr(se_cmd);
|
||||||
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For callback during failure handling, push this work off
|
* For callback during failure handling, push this work off
|
||||||
* to process context with TMR_LUN_DOES_NOT_EXIST status.
|
* to process context with TMR_LUN_DOES_NOT_EXIST status.
|
||||||
*/
|
*/
|
||||||
|
failure:
|
||||||
INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
|
INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
|
||||||
schedule_work(&se_cmd->work);
|
schedule_work(&se_cmd->work);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
transport_generic_handle_tmr(se_cmd);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(target_submit_tmr);
|
EXPORT_SYMBOL(target_submit_tmr);
|
||||||
|
|
||||||
|
@ -1667,15 +1733,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
||||||
if (transport_check_aborted_status(cmd, 1))
|
if (transport_check_aborted_status(cmd, 1))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
|
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
|
||||||
" CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
|
sense_reason);
|
||||||
pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
|
target_show_cmd("-----[ ", cmd);
|
||||||
cmd->se_tfo->get_cmd_state(cmd),
|
|
||||||
cmd->t_state, sense_reason);
|
|
||||||
pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
|
|
||||||
(cmd->transport_state & CMD_T_ACTIVE) != 0,
|
|
||||||
(cmd->transport_state & CMD_T_STOP) != 0,
|
|
||||||
(cmd->transport_state & CMD_T_SENT) != 0);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For SAM Task Attribute emulation for failed struct se_cmd
|
* For SAM Task Attribute emulation for failed struct se_cmd
|
||||||
|
@ -2668,6 +2728,108 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(target_put_sess_cmd);
|
EXPORT_SYMBOL(target_put_sess_cmd);
|
||||||
|
|
||||||
|
static const char *data_dir_name(enum dma_data_direction d)
|
||||||
|
{
|
||||||
|
switch (d) {
|
||||||
|
case DMA_BIDIRECTIONAL: return "BIDI";
|
||||||
|
case DMA_TO_DEVICE: return "WRITE";
|
||||||
|
case DMA_FROM_DEVICE: return "READ";
|
||||||
|
case DMA_NONE: return "NONE";
|
||||||
|
}
|
||||||
|
|
||||||
|
return "(?)";
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char *cmd_state_name(enum transport_state_table t)
|
||||||
|
{
|
||||||
|
switch (t) {
|
||||||
|
case TRANSPORT_NO_STATE: return "NO_STATE";
|
||||||
|
case TRANSPORT_NEW_CMD: return "NEW_CMD";
|
||||||
|
case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
|
||||||
|
case TRANSPORT_PROCESSING: return "PROCESSING";
|
||||||
|
case TRANSPORT_COMPLETE: return "COMPLETE";
|
||||||
|
case TRANSPORT_ISTATE_PROCESSING:
|
||||||
|
return "ISTATE_PROCESSING";
|
||||||
|
case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
|
||||||
|
case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
|
||||||
|
case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
|
||||||
|
}
|
||||||
|
|
||||||
|
return "(?)";
|
||||||
|
}
|
||||||
|
|
||||||
|
static void target_append_str(char **str, const char *txt)
|
||||||
|
{
|
||||||
|
char *prev = *str;
|
||||||
|
|
||||||
|
*str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
|
||||||
|
kstrdup(txt, GFP_ATOMIC);
|
||||||
|
kfree(prev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Convert a transport state bitmask into a string. The caller is
|
||||||
|
* responsible for freeing the returned pointer.
|
||||||
|
*/
|
||||||
|
static char *target_ts_to_str(u32 ts)
|
||||||
|
{
|
||||||
|
char *str = NULL;
|
||||||
|
|
||||||
|
if (ts & CMD_T_ABORTED)
|
||||||
|
target_append_str(&str, "aborted");
|
||||||
|
if (ts & CMD_T_ACTIVE)
|
||||||
|
target_append_str(&str, "active");
|
||||||
|
if (ts & CMD_T_COMPLETE)
|
||||||
|
target_append_str(&str, "complete");
|
||||||
|
if (ts & CMD_T_SENT)
|
||||||
|
target_append_str(&str, "sent");
|
||||||
|
if (ts & CMD_T_STOP)
|
||||||
|
target_append_str(&str, "stop");
|
||||||
|
if (ts & CMD_T_FABRIC_STOP)
|
||||||
|
target_append_str(&str, "fabric_stop");
|
||||||
|
|
||||||
|
return str;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char *target_tmf_name(enum tcm_tmreq_table tmf)
|
||||||
|
{
|
||||||
|
switch (tmf) {
|
||||||
|
case TMR_ABORT_TASK: return "ABORT_TASK";
|
||||||
|
case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
|
||||||
|
case TMR_CLEAR_ACA: return "CLEAR_ACA";
|
||||||
|
case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
|
||||||
|
case TMR_LUN_RESET: return "LUN_RESET";
|
||||||
|
case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
|
||||||
|
case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
|
||||||
|
case TMR_UNKNOWN: break;
|
||||||
|
}
|
||||||
|
return "(?)";
|
||||||
|
}
|
||||||
|
|
||||||
|
void target_show_cmd(const char *pfx, struct se_cmd *cmd)
|
||||||
|
{
|
||||||
|
char *ts_str = target_ts_to_str(cmd->transport_state);
|
||||||
|
const u8 *cdb = cmd->t_task_cdb;
|
||||||
|
struct se_tmr_req *tmf = cmd->se_tmr_req;
|
||||||
|
|
||||||
|
if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
|
||||||
|
pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
|
||||||
|
pfx, cdb[0], cdb[1], cmd->tag,
|
||||||
|
data_dir_name(cmd->data_direction),
|
||||||
|
cmd->se_tfo->get_cmd_state(cmd),
|
||||||
|
cmd_state_name(cmd->t_state), cmd->data_length,
|
||||||
|
kref_read(&cmd->cmd_kref), ts_str);
|
||||||
|
} else {
|
||||||
|
pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
|
||||||
|
pfx, target_tmf_name(tmf->function), cmd->tag,
|
||||||
|
tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
|
||||||
|
cmd_state_name(cmd->t_state),
|
||||||
|
kref_read(&cmd->cmd_kref), ts_str);
|
||||||
|
}
|
||||||
|
kfree(ts_str);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(target_show_cmd);
|
||||||
|
|
||||||
/* target_sess_cmd_list_set_waiting - Flag all commands in
|
/* target_sess_cmd_list_set_waiting - Flag all commands in
|
||||||
* sess_cmd_list to complete cmd_wait_comp. Set
|
* sess_cmd_list to complete cmd_wait_comp. Set
|
||||||
* sess_tearing_down so no more commands are queued.
|
* sess_tearing_down so no more commands are queued.
|
||||||
|
@ -2812,13 +2974,13 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
|
||||||
|
|
||||||
cmd->transport_state |= CMD_T_STOP;
|
cmd->transport_state |= CMD_T_STOP;
|
||||||
|
|
||||||
pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
|
target_show_cmd("wait_for_tasks: Stopping ", cmd);
|
||||||
" t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
|
|
||||||
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
|
spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
|
||||||
|
|
||||||
wait_for_completion(&cmd->t_transport_stop_comp);
|
while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
|
||||||
|
180 * HZ))
|
||||||
|
target_show_cmd("wait for tasks: ", cmd);
|
||||||
|
|
||||||
spin_lock_irqsave(&cmd->t_state_lock, *flags);
|
spin_lock_irqsave(&cmd->t_state_lock, *flags);
|
||||||
cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
|
cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
|
||||||
|
@ -3201,6 +3363,7 @@ static void target_tmr_work(struct work_struct *work)
|
||||||
cmd->se_tfo->queue_tm_rsp(cmd);
|
cmd->se_tfo->queue_tm_rsp(cmd);
|
||||||
|
|
||||||
check_stop:
|
check_stop:
|
||||||
|
transport_lun_remove_cmd(cmd);
|
||||||
transport_cmd_check_stop_to_fabric(cmd);
|
transport_cmd_check_stop_to_fabric(cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3223,6 +3386,7 @@ int transport_generic_handle_tmr(
|
||||||
pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
|
pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
|
||||||
"ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
|
"ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
|
||||||
cmd->se_tmr_req->ref_task_tag, cmd->tag);
|
cmd->se_tmr_req->ref_task_tag, cmd->tag);
|
||||||
|
transport_lun_remove_cmd(cmd);
|
||||||
transport_cmd_check_stop_to_fabric(cmd);
|
transport_cmd_check_stop_to_fabric(cmd);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -87,6 +87,8 @@
|
||||||
/* Default maximum of the global data blocks(512K * PAGE_SIZE) */
|
/* Default maximum of the global data blocks(512K * PAGE_SIZE) */
|
||||||
#define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
|
#define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
|
||||||
|
|
||||||
|
static u8 tcmu_kern_cmd_reply_supported;
|
||||||
|
|
||||||
static struct device *tcmu_root_device;
|
static struct device *tcmu_root_device;
|
||||||
|
|
||||||
struct tcmu_hba {
|
struct tcmu_hba {
|
||||||
|
@ -95,6 +97,13 @@ struct tcmu_hba {
|
||||||
|
|
||||||
#define TCMU_CONFIG_LEN 256
|
#define TCMU_CONFIG_LEN 256
|
||||||
|
|
||||||
|
struct tcmu_nl_cmd {
|
||||||
|
/* wake up thread waiting for reply */
|
||||||
|
struct completion complete;
|
||||||
|
int cmd;
|
||||||
|
int status;
|
||||||
|
};
|
||||||
|
|
||||||
struct tcmu_dev {
|
struct tcmu_dev {
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
struct kref kref;
|
struct kref kref;
|
||||||
|
@ -135,6 +144,11 @@ struct tcmu_dev {
|
||||||
struct timer_list timeout;
|
struct timer_list timeout;
|
||||||
unsigned int cmd_time_out;
|
unsigned int cmd_time_out;
|
||||||
|
|
||||||
|
spinlock_t nl_cmd_lock;
|
||||||
|
struct tcmu_nl_cmd curr_nl_cmd;
|
||||||
|
/* wake up threads waiting on curr_nl_cmd */
|
||||||
|
wait_queue_head_t nl_cmd_wq;
|
||||||
|
|
||||||
char dev_config[TCMU_CONFIG_LEN];
|
char dev_config[TCMU_CONFIG_LEN];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -178,16 +192,128 @@ static const struct genl_multicast_group tcmu_mcgrps[] = {
|
||||||
[TCMU_MCGRP_CONFIG] = { .name = "config", },
|
[TCMU_MCGRP_CONFIG] = { .name = "config", },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
|
||||||
|
[TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
|
||||||
|
[TCMU_ATTR_MINOR] = { .type = NLA_U32 },
|
||||||
|
[TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
|
||||||
|
[TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
|
||||||
|
[TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
|
||||||
|
};
|
||||||
|
|
||||||
|
static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
|
||||||
|
{
|
||||||
|
struct se_device *dev;
|
||||||
|
struct tcmu_dev *udev;
|
||||||
|
struct tcmu_nl_cmd *nl_cmd;
|
||||||
|
int dev_id, rc, ret = 0;
|
||||||
|
bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
|
||||||
|
|
||||||
|
if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
|
||||||
|
!info->attrs[TCMU_ATTR_DEVICE_ID]) {
|
||||||
|
printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
|
||||||
|
rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
|
||||||
|
|
||||||
|
dev = target_find_device(dev_id, !is_removed);
|
||||||
|
if (!dev) {
|
||||||
|
printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
|
||||||
|
completed_cmd, rc, dev_id);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
udev = TCMU_DEV(dev);
|
||||||
|
|
||||||
|
spin_lock(&udev->nl_cmd_lock);
|
||||||
|
nl_cmd = &udev->curr_nl_cmd;
|
||||||
|
|
||||||
|
pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
|
||||||
|
nl_cmd->cmd, completed_cmd, rc);
|
||||||
|
|
||||||
|
if (nl_cmd->cmd != completed_cmd) {
|
||||||
|
printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
|
||||||
|
completed_cmd, nl_cmd->cmd);
|
||||||
|
ret = -EINVAL;
|
||||||
|
} else {
|
||||||
|
nl_cmd->status = rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock(&udev->nl_cmd_lock);
|
||||||
|
if (!is_removed)
|
||||||
|
target_undepend_item(&dev->dev_group.cg_item);
|
||||||
|
if (!ret)
|
||||||
|
complete(&nl_cmd->complete);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
|
||||||
|
{
|
||||||
|
return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
|
||||||
|
{
|
||||||
|
return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
|
||||||
|
struct genl_info *info)
|
||||||
|
{
|
||||||
|
return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
|
||||||
|
{
|
||||||
|
if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
|
||||||
|
tcmu_kern_cmd_reply_supported =
|
||||||
|
nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
|
||||||
|
printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
|
||||||
|
tcmu_kern_cmd_reply_supported);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct genl_ops tcmu_genl_ops[] = {
|
||||||
|
{
|
||||||
|
.cmd = TCMU_CMD_SET_FEATURES,
|
||||||
|
.flags = GENL_ADMIN_PERM,
|
||||||
|
.policy = tcmu_attr_policy,
|
||||||
|
.doit = tcmu_genl_set_features,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.cmd = TCMU_CMD_ADDED_DEVICE_DONE,
|
||||||
|
.flags = GENL_ADMIN_PERM,
|
||||||
|
.policy = tcmu_attr_policy,
|
||||||
|
.doit = tcmu_genl_add_dev_done,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
|
||||||
|
.flags = GENL_ADMIN_PERM,
|
||||||
|
.policy = tcmu_attr_policy,
|
||||||
|
.doit = tcmu_genl_rm_dev_done,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
|
||||||
|
.flags = GENL_ADMIN_PERM,
|
||||||
|
.policy = tcmu_attr_policy,
|
||||||
|
.doit = tcmu_genl_reconfig_dev_done,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
/* Our generic netlink family */
|
/* Our generic netlink family */
|
||||||
static struct genl_family tcmu_genl_family __ro_after_init = {
|
static struct genl_family tcmu_genl_family __ro_after_init = {
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.hdrsize = 0,
|
.hdrsize = 0,
|
||||||
.name = "TCM-USER",
|
.name = "TCM-USER",
|
||||||
.version = 1,
|
.version = 2,
|
||||||
.maxattr = TCMU_ATTR_MAX,
|
.maxattr = TCMU_ATTR_MAX,
|
||||||
.mcgrps = tcmu_mcgrps,
|
.mcgrps = tcmu_mcgrps,
|
||||||
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
|
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
|
||||||
.netnsok = true,
|
.netnsok = true,
|
||||||
|
.ops = tcmu_genl_ops,
|
||||||
|
.n_ops = ARRAY_SIZE(tcmu_genl_ops),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
|
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
|
||||||
|
@ -216,7 +342,6 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
|
||||||
|
|
||||||
page = radix_tree_lookup(&udev->data_blocks, dbi);
|
page = radix_tree_lookup(&udev->data_blocks, dbi);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
|
|
||||||
if (atomic_add_return(1, &global_db_count) >
|
if (atomic_add_return(1, &global_db_count) >
|
||||||
TCMU_GLOBAL_MAX_BLOCKS) {
|
TCMU_GLOBAL_MAX_BLOCKS) {
|
||||||
atomic_dec(&global_db_count);
|
atomic_dec(&global_db_count);
|
||||||
|
@ -226,14 +351,11 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
|
||||||
/* try to get new page from the mm */
|
/* try to get new page from the mm */
|
||||||
page = alloc_page(GFP_KERNEL);
|
page = alloc_page(GFP_KERNEL);
|
||||||
if (!page)
|
if (!page)
|
||||||
return false;
|
goto err_alloc;
|
||||||
|
|
||||||
ret = radix_tree_insert(&udev->data_blocks, dbi, page);
|
ret = radix_tree_insert(&udev->data_blocks, dbi, page);
|
||||||
if (ret) {
|
if (ret)
|
||||||
__free_page(page);
|
goto err_insert;
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dbi > udev->dbi_max)
|
if (dbi > udev->dbi_max)
|
||||||
|
@ -243,6 +365,11 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
|
||||||
tcmu_cmd_set_dbi(tcmu_cmd, dbi);
|
tcmu_cmd_set_dbi(tcmu_cmd, dbi);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
err_insert:
|
||||||
|
__free_page(page);
|
||||||
|
err_alloc:
|
||||||
|
atomic_dec(&global_db_count);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
|
static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
|
||||||
|
@ -401,7 +528,7 @@ static inline size_t get_block_offset_user(struct tcmu_dev *dev,
|
||||||
DATA_BLOCK_SIZE - remaining;
|
DATA_BLOCK_SIZE - remaining;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
|
static inline size_t iov_tail(struct iovec *iov)
|
||||||
{
|
{
|
||||||
return (size_t)iov->iov_base + iov->iov_len;
|
return (size_t)iov->iov_base + iov->iov_len;
|
||||||
}
|
}
|
||||||
|
@ -437,10 +564,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
|
||||||
to_offset = get_block_offset_user(udev, dbi,
|
to_offset = get_block_offset_user(udev, dbi,
|
||||||
block_remaining);
|
block_remaining);
|
||||||
offset = DATA_BLOCK_SIZE - block_remaining;
|
offset = DATA_BLOCK_SIZE - block_remaining;
|
||||||
to = (void *)(unsigned long)to + offset;
|
to += offset;
|
||||||
|
|
||||||
if (*iov_cnt != 0 &&
|
if (*iov_cnt != 0 &&
|
||||||
to_offset == iov_tail(udev, *iov)) {
|
to_offset == iov_tail(*iov)) {
|
||||||
(*iov)->iov_len += copy_bytes;
|
(*iov)->iov_len += copy_bytes;
|
||||||
} else {
|
} else {
|
||||||
new_iov(iov, iov_cnt, udev);
|
new_iov(iov, iov_cnt, udev);
|
||||||
|
@ -510,7 +637,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
|
||||||
copy_bytes = min_t(size_t, sg_remaining,
|
copy_bytes = min_t(size_t, sg_remaining,
|
||||||
block_remaining);
|
block_remaining);
|
||||||
offset = DATA_BLOCK_SIZE - block_remaining;
|
offset = DATA_BLOCK_SIZE - block_remaining;
|
||||||
from = (void *)(unsigned long)from + offset;
|
from += offset;
|
||||||
tcmu_flush_dcache_range(from, copy_bytes);
|
tcmu_flush_dcache_range(from, copy_bytes);
|
||||||
memcpy(to + sg->length - sg_remaining, from,
|
memcpy(to + sg->length - sg_remaining, from,
|
||||||
copy_bytes);
|
copy_bytes);
|
||||||
|
@ -596,10 +723,7 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!tcmu_get_empty_blocks(udev, cmd))
|
return tcmu_get_empty_blocks(udev, cmd);
|
||||||
return false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
|
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
|
||||||
|
@ -699,25 +823,24 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||||
size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
|
size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
|
||||||
|
|
||||||
entry = (void *) mb + CMDR_OFF + cmd_head;
|
entry = (void *) mb + CMDR_OFF + cmd_head;
|
||||||
tcmu_flush_dcache_range(entry, sizeof(*entry));
|
|
||||||
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
|
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
|
||||||
tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
|
tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
|
||||||
entry->hdr.cmd_id = 0; /* not used for PAD */
|
entry->hdr.cmd_id = 0; /* not used for PAD */
|
||||||
entry->hdr.kflags = 0;
|
entry->hdr.kflags = 0;
|
||||||
entry->hdr.uflags = 0;
|
entry->hdr.uflags = 0;
|
||||||
|
tcmu_flush_dcache_range(entry, sizeof(*entry));
|
||||||
|
|
||||||
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
|
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
|
||||||
|
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
||||||
|
|
||||||
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
|
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
|
||||||
WARN_ON(cmd_head != 0);
|
WARN_ON(cmd_head != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
entry = (void *) mb + CMDR_OFF + cmd_head;
|
entry = (void *) mb + CMDR_OFF + cmd_head;
|
||||||
tcmu_flush_dcache_range(entry, sizeof(*entry));
|
memset(entry, 0, command_size);
|
||||||
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
|
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
|
||||||
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
|
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
|
||||||
entry->hdr.kflags = 0;
|
|
||||||
entry->hdr.uflags = 0;
|
|
||||||
|
|
||||||
/* Handle allocating space from the data area */
|
/* Handle allocating space from the data area */
|
||||||
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
|
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
|
||||||
|
@ -736,11 +859,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
}
|
}
|
||||||
entry->req.iov_cnt = iov_cnt;
|
entry->req.iov_cnt = iov_cnt;
|
||||||
entry->req.iov_dif_cnt = 0;
|
|
||||||
|
|
||||||
/* Handle BIDI commands */
|
/* Handle BIDI commands */
|
||||||
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
|
||||||
iov_cnt = 0;
|
iov_cnt = 0;
|
||||||
|
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||||
iov++;
|
iov++;
|
||||||
ret = scatter_data_area(udev, tcmu_cmd,
|
ret = scatter_data_area(udev, tcmu_cmd,
|
||||||
se_cmd->t_bidi_data_sg,
|
se_cmd->t_bidi_data_sg,
|
||||||
|
@ -753,8 +875,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||||
pr_err("tcmu: alloc and scatter bidi data failed\n");
|
pr_err("tcmu: alloc and scatter bidi data failed\n");
|
||||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||||
}
|
}
|
||||||
entry->req.iov_bidi_cnt = iov_cnt;
|
|
||||||
}
|
}
|
||||||
|
entry->req.iov_bidi_cnt = iov_cnt;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Recalaulate the command's base size and size according
|
* Recalaulate the command's base size and size according
|
||||||
|
@ -830,8 +952,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
|
||||||
cmd->se_cmd);
|
cmd->se_cmd);
|
||||||
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
|
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
|
||||||
} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
|
} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
|
||||||
memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
|
transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
|
||||||
se_cmd->scsi_sense_length);
|
|
||||||
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||||
/* Get Data-In buffer before clean up */
|
/* Get Data-In buffer before clean up */
|
||||||
gather_data_area(udev, cmd, true);
|
gather_data_area(udev, cmd, true);
|
||||||
|
@ -989,6 +1110,9 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
||||||
setup_timer(&udev->timeout, tcmu_device_timedout,
|
setup_timer(&udev->timeout, tcmu_device_timedout,
|
||||||
(unsigned long)udev);
|
(unsigned long)udev);
|
||||||
|
|
||||||
|
init_waitqueue_head(&udev->nl_cmd_wq);
|
||||||
|
spin_lock_init(&udev->nl_cmd_lock);
|
||||||
|
|
||||||
return &udev->se_dev;
|
return &udev->se_dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1140,6 +1264,7 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
udev->inode = inode;
|
udev->inode = inode;
|
||||||
|
kref_get(&udev->kref);
|
||||||
|
|
||||||
pr_debug("open\n");
|
pr_debug("open\n");
|
||||||
|
|
||||||
|
@ -1171,12 +1296,59 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
|
||||||
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
|
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
|
||||||
|
|
||||||
pr_debug("close\n");
|
pr_debug("close\n");
|
||||||
/* release ref from configure */
|
/* release ref from open */
|
||||||
kref_put(&udev->kref, tcmu_dev_kref_release);
|
kref_put(&udev->kref, tcmu_dev_kref_release);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
|
static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
|
||||||
|
{
|
||||||
|
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
|
||||||
|
|
||||||
|
if (!tcmu_kern_cmd_reply_supported)
|
||||||
|
return;
|
||||||
|
relock:
|
||||||
|
spin_lock(&udev->nl_cmd_lock);
|
||||||
|
|
||||||
|
if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
|
||||||
|
spin_unlock(&udev->nl_cmd_lock);
|
||||||
|
pr_debug("sleeping for open nl cmd\n");
|
||||||
|
wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
|
||||||
|
goto relock;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(nl_cmd, 0, sizeof(*nl_cmd));
|
||||||
|
nl_cmd->cmd = cmd;
|
||||||
|
init_completion(&nl_cmd->complete);
|
||||||
|
|
||||||
|
spin_unlock(&udev->nl_cmd_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
|
||||||
|
{
|
||||||
|
struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
|
||||||
|
int ret;
|
||||||
|
DEFINE_WAIT(__wait);
|
||||||
|
|
||||||
|
if (!tcmu_kern_cmd_reply_supported)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
pr_debug("sleeping for nl reply\n");
|
||||||
|
wait_for_completion(&nl_cmd->complete);
|
||||||
|
|
||||||
|
spin_lock(&udev->nl_cmd_lock);
|
||||||
|
nl_cmd->cmd = TCMU_CMD_UNSPEC;
|
||||||
|
ret = nl_cmd->status;
|
||||||
|
nl_cmd->status = 0;
|
||||||
|
spin_unlock(&udev->nl_cmd_lock);
|
||||||
|
|
||||||
|
wake_up_all(&udev->nl_cmd_wq);
|
||||||
|
|
||||||
|
return ret;;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
|
||||||
|
int reconfig_attr, const void *reconfig_data)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
void *msg_header;
|
void *msg_header;
|
||||||
|
@ -1190,22 +1362,51 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino
|
||||||
if (!msg_header)
|
if (!msg_header)
|
||||||
goto free_skb;
|
goto free_skb;
|
||||||
|
|
||||||
ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
|
ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto free_skb;
|
goto free_skb;
|
||||||
|
|
||||||
ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
|
ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto free_skb;
|
goto free_skb;
|
||||||
|
|
||||||
|
ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
|
||||||
|
if (ret < 0)
|
||||||
|
goto free_skb;
|
||||||
|
|
||||||
|
if (cmd == TCMU_CMD_RECONFIG_DEVICE) {
|
||||||
|
switch (reconfig_attr) {
|
||||||
|
case TCMU_ATTR_DEV_CFG:
|
||||||
|
ret = nla_put_string(skb, reconfig_attr, reconfig_data);
|
||||||
|
break;
|
||||||
|
case TCMU_ATTR_DEV_SIZE:
|
||||||
|
ret = nla_put_u64_64bit(skb, reconfig_attr,
|
||||||
|
*((u64 *)reconfig_data),
|
||||||
|
TCMU_ATTR_PAD);
|
||||||
|
break;
|
||||||
|
case TCMU_ATTR_WRITECACHE:
|
||||||
|
ret = nla_put_u8(skb, reconfig_attr,
|
||||||
|
*((u8 *)reconfig_data));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret < 0)
|
||||||
|
goto free_skb;
|
||||||
|
}
|
||||||
|
|
||||||
genlmsg_end(skb, msg_header);
|
genlmsg_end(skb, msg_header);
|
||||||
|
|
||||||
|
tcmu_init_genl_cmd_reply(udev, cmd);
|
||||||
|
|
||||||
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
|
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
|
||||||
TCMU_MCGRP_CONFIG, GFP_KERNEL);
|
TCMU_MCGRP_CONFIG, GFP_KERNEL);
|
||||||
|
|
||||||
/* We don't care if no one is listening */
|
/* We don't care if no one is listening */
|
||||||
if (ret == -ESRCH)
|
if (ret == -ESRCH)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
if (!ret)
|
||||||
|
ret = tcmu_wait_genl_cmd_reply(udev);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
free_skb:
|
free_skb:
|
||||||
|
@ -1213,19 +1414,14 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tcmu_configure_device(struct se_device *dev)
|
static int tcmu_update_uio_info(struct tcmu_dev *udev)
|
||||||
{
|
{
|
||||||
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
||||||
struct tcmu_hba *hba = udev->hba->hba_ptr;
|
struct tcmu_hba *hba = udev->hba->hba_ptr;
|
||||||
struct uio_info *info;
|
struct uio_info *info;
|
||||||
struct tcmu_mailbox *mb;
|
size_t size, used;
|
||||||
size_t size;
|
|
||||||
size_t used;
|
|
||||||
int ret = 0;
|
|
||||||
char *str;
|
char *str;
|
||||||
|
|
||||||
info = &udev->uio_info;
|
info = &udev->uio_info;
|
||||||
|
|
||||||
size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
|
size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
|
||||||
udev->dev_config);
|
udev->dev_config);
|
||||||
size += 1; /* for \0 */
|
size += 1; /* for \0 */
|
||||||
|
@ -1234,12 +1430,27 @@ static int tcmu_configure_device(struct se_device *dev)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
|
used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
|
||||||
|
|
||||||
if (udev->dev_config[0])
|
if (udev->dev_config[0])
|
||||||
snprintf(str + used, size - used, "/%s", udev->dev_config);
|
snprintf(str + used, size - used, "/%s", udev->dev_config);
|
||||||
|
|
||||||
info->name = str;
|
info->name = str;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int tcmu_configure_device(struct se_device *dev)
|
||||||
|
{
|
||||||
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
||||||
|
struct uio_info *info;
|
||||||
|
struct tcmu_mailbox *mb;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
ret = tcmu_update_uio_info(udev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
info = &udev->uio_info;
|
||||||
|
|
||||||
udev->mb_addr = vzalloc(CMDR_SIZE);
|
udev->mb_addr = vzalloc(CMDR_SIZE);
|
||||||
if (!udev->mb_addr) {
|
if (!udev->mb_addr) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
@ -1290,6 +1501,8 @@ static int tcmu_configure_device(struct se_device *dev)
|
||||||
/* Other attributes can be configured in userspace */
|
/* Other attributes can be configured in userspace */
|
||||||
if (!dev->dev_attrib.hw_max_sectors)
|
if (!dev->dev_attrib.hw_max_sectors)
|
||||||
dev->dev_attrib.hw_max_sectors = 128;
|
dev->dev_attrib.hw_max_sectors = 128;
|
||||||
|
if (!dev->dev_attrib.emulate_write_cache)
|
||||||
|
dev->dev_attrib.emulate_write_cache = 0;
|
||||||
dev->dev_attrib.hw_queue_depth = 128;
|
dev->dev_attrib.hw_queue_depth = 128;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1298,8 +1511,7 @@ static int tcmu_configure_device(struct se_device *dev)
|
||||||
*/
|
*/
|
||||||
kref_get(&udev->kref);
|
kref_get(&udev->kref);
|
||||||
|
|
||||||
ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
|
ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL);
|
||||||
udev->uio_info.uio_dev->minor);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_netlink;
|
goto err_netlink;
|
||||||
|
|
||||||
|
@ -1353,6 +1565,14 @@ static void tcmu_blocks_release(struct tcmu_dev *udev)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcmu_free_device(struct se_device *dev)
|
static void tcmu_free_device(struct se_device *dev)
|
||||||
|
{
|
||||||
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
||||||
|
|
||||||
|
/* release ref from init */
|
||||||
|
kref_put(&udev->kref, tcmu_dev_kref_release);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void tcmu_destroy_device(struct se_device *dev)
|
||||||
{
|
{
|
||||||
struct tcmu_dev *udev = TCMU_DEV(dev);
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
||||||
struct tcmu_cmd *cmd;
|
struct tcmu_cmd *cmd;
|
||||||
|
@ -1379,14 +1599,11 @@ static void tcmu_free_device(struct se_device *dev)
|
||||||
|
|
||||||
tcmu_blocks_release(udev);
|
tcmu_blocks_release(udev);
|
||||||
|
|
||||||
if (tcmu_dev_configured(udev)) {
|
tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
|
||||||
tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
|
|
||||||
udev->uio_info.uio_dev->minor);
|
|
||||||
|
|
||||||
uio_unregister_device(&udev->uio_info);
|
uio_unregister_device(&udev->uio_info);
|
||||||
}
|
|
||||||
|
|
||||||
/* release ref from init */
|
/* release ref from configure */
|
||||||
kref_put(&udev->kref, tcmu_dev_kref_release);
|
kref_put(&udev->kref, tcmu_dev_kref_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1546,6 +1763,129 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
|
||||||
}
|
}
|
||||||
CONFIGFS_ATTR(tcmu_, cmd_time_out);
|
CONFIGFS_ATTR(tcmu_, cmd_time_out);
|
||||||
|
|
||||||
|
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
|
||||||
|
{
|
||||||
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
||||||
|
struct se_dev_attrib, da_group);
|
||||||
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
||||||
|
|
||||||
|
return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
|
||||||
|
size_t count)
|
||||||
|
{
|
||||||
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
||||||
|
struct se_dev_attrib, da_group);
|
||||||
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
||||||
|
int ret, len;
|
||||||
|
|
||||||
|
len = strlen(page);
|
||||||
|
if (!len || len > TCMU_CONFIG_LEN - 1)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* Check if device has been configured before */
|
||||||
|
if (tcmu_dev_configured(udev)) {
|
||||||
|
ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
|
||||||
|
TCMU_ATTR_DEV_CFG, page);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("Unable to reconfigure device\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
|
||||||
|
|
||||||
|
ret = tcmu_update_uio_info(udev);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
CONFIGFS_ATTR(tcmu_, dev_config);
|
||||||
|
|
||||||
|
static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
|
||||||
|
{
|
||||||
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
||||||
|
struct se_dev_attrib, da_group);
|
||||||
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
||||||
|
|
||||||
|
return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
|
||||||
|
size_t count)
|
||||||
|
{
|
||||||
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
||||||
|
struct se_dev_attrib, da_group);
|
||||||
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
||||||
|
u64 val;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = kstrtou64(page, 0, &val);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* Check if device has been configured before */
|
||||||
|
if (tcmu_dev_configured(udev)) {
|
||||||
|
ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
|
||||||
|
TCMU_ATTR_DEV_SIZE, &val);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("Unable to reconfigure device\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
udev->dev_size = val;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
CONFIGFS_ATTR(tcmu_, dev_size);
|
||||||
|
|
||||||
|
static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
|
||||||
|
char *page)
|
||||||
|
{
|
||||||
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
||||||
|
struct se_dev_attrib, da_group);
|
||||||
|
|
||||||
|
return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
|
||||||
|
const char *page, size_t count)
|
||||||
|
{
|
||||||
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
||||||
|
struct se_dev_attrib, da_group);
|
||||||
|
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
||||||
|
u8 val;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = kstrtou8(page, 0, &val);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* Check if device has been configured before */
|
||||||
|
if (tcmu_dev_configured(udev)) {
|
||||||
|
ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
|
||||||
|
TCMU_ATTR_WRITECACHE, &val);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("Unable to reconfigure device\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
da->emulate_write_cache = val;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
CONFIGFS_ATTR(tcmu_, emulate_write_cache);
|
||||||
|
|
||||||
|
static struct configfs_attribute *tcmu_attrib_attrs[] = {
|
||||||
|
&tcmu_attr_cmd_time_out,
|
||||||
|
&tcmu_attr_dev_config,
|
||||||
|
&tcmu_attr_dev_size,
|
||||||
|
&tcmu_attr_emulate_write_cache,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
static struct configfs_attribute **tcmu_attrs;
|
static struct configfs_attribute **tcmu_attrs;
|
||||||
|
|
||||||
static struct target_backend_ops tcmu_ops = {
|
static struct target_backend_ops tcmu_ops = {
|
||||||
|
@ -1556,6 +1896,7 @@ static struct target_backend_ops tcmu_ops = {
|
||||||
.detach_hba = tcmu_detach_hba,
|
.detach_hba = tcmu_detach_hba,
|
||||||
.alloc_device = tcmu_alloc_device,
|
.alloc_device = tcmu_alloc_device,
|
||||||
.configure_device = tcmu_configure_device,
|
.configure_device = tcmu_configure_device,
|
||||||
|
.destroy_device = tcmu_destroy_device,
|
||||||
.free_device = tcmu_free_device,
|
.free_device = tcmu_free_device,
|
||||||
.parse_cdb = tcmu_parse_cdb,
|
.parse_cdb = tcmu_parse_cdb,
|
||||||
.set_configfs_dev_params = tcmu_set_configfs_dev_params,
|
.set_configfs_dev_params = tcmu_set_configfs_dev_params,
|
||||||
|
@ -1573,7 +1914,7 @@ static int unmap_thread_fn(void *data)
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
while (1) {
|
while (!kthread_should_stop()) {
|
||||||
DEFINE_WAIT(__wait);
|
DEFINE_WAIT(__wait);
|
||||||
|
|
||||||
prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
|
prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
|
||||||
|
@ -1645,7 +1986,7 @@ static int unmap_thread_fn(void *data)
|
||||||
|
|
||||||
static int __init tcmu_module_init(void)
|
static int __init tcmu_module_init(void)
|
||||||
{
|
{
|
||||||
int ret, i, len = 0;
|
int ret, i, k, len = 0;
|
||||||
|
|
||||||
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
|
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
|
||||||
|
|
||||||
|
@ -1670,7 +2011,10 @@ static int __init tcmu_module_init(void)
|
||||||
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
|
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
|
||||||
len += sizeof(struct configfs_attribute *);
|
len += sizeof(struct configfs_attribute *);
|
||||||
}
|
}
|
||||||
len += sizeof(struct configfs_attribute *) * 2;
|
for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
|
||||||
|
len += sizeof(struct configfs_attribute *);
|
||||||
|
}
|
||||||
|
len += sizeof(struct configfs_attribute *);
|
||||||
|
|
||||||
tcmu_attrs = kzalloc(len, GFP_KERNEL);
|
tcmu_attrs = kzalloc(len, GFP_KERNEL);
|
||||||
if (!tcmu_attrs) {
|
if (!tcmu_attrs) {
|
||||||
|
@ -1681,7 +2025,10 @@ static int __init tcmu_module_init(void)
|
||||||
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
|
for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
|
||||||
tcmu_attrs[i] = passthrough_attrib_attrs[i];
|
tcmu_attrs[i] = passthrough_attrib_attrs[i];
|
||||||
}
|
}
|
||||||
tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
|
for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
|
||||||
|
tcmu_attrs[i] = tcmu_attrib_attrs[k];
|
||||||
|
i++;
|
||||||
|
}
|
||||||
tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
|
tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
|
||||||
|
|
||||||
ret = transport_backend_register(&tcmu_ops);
|
ret = transport_backend_register(&tcmu_ops);
|
||||||
|
|
|
@ -40,6 +40,8 @@
|
||||||
|
|
||||||
static struct workqueue_struct *xcopy_wq = NULL;
|
static struct workqueue_struct *xcopy_wq = NULL;
|
||||||
|
|
||||||
|
static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop);
|
||||||
|
|
||||||
static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
|
static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
|
||||||
{
|
{
|
||||||
int off = 0;
|
int off = 0;
|
||||||
|
@ -53,48 +55,60 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
|
struct xcopy_dev_search_info {
|
||||||
struct se_device **found_dev)
|
const unsigned char *dev_wwn;
|
||||||
|
struct se_device *found_dev;
|
||||||
|
};
|
||||||
|
|
||||||
|
static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
|
||||||
|
void *data)
|
||||||
{
|
{
|
||||||
struct se_device *se_dev;
|
struct xcopy_dev_search_info *info = data;
|
||||||
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
|
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
mutex_lock(&g_device_mutex);
|
|
||||||
list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
|
|
||||||
|
|
||||||
if (!se_dev->dev_attrib.emulate_3pc)
|
if (!se_dev->dev_attrib.emulate_3pc)
|
||||||
continue;
|
return 0;
|
||||||
|
|
||||||
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
|
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
|
||||||
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
|
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
|
||||||
|
|
||||||
rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
|
rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
|
||||||
if (rc != 0)
|
if (rc != 0)
|
||||||
continue;
|
return 0;
|
||||||
|
|
||||||
*found_dev = se_dev;
|
info->found_dev = se_dev;
|
||||||
pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
|
pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
|
||||||
|
|
||||||
rc = target_depend_item(&se_dev->dev_group.cg_item);
|
rc = target_depend_item(&se_dev->dev_group.cg_item);
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
pr_err("configfs_depend_item attempt failed:"
|
pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
|
||||||
" %d for se_dev: %p\n", rc, se_dev);
|
rc, se_dev);
|
||||||
mutex_unlock(&g_device_mutex);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_debug("Called configfs_depend_item for se_dev: %p"
|
pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
|
||||||
" se_dev->se_dev_group: %p\n", se_dev,
|
se_dev, &se_dev->dev_group);
|
||||||
&se_dev->dev_group);
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&g_device_mutex);
|
static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
|
||||||
|
struct se_device **found_dev)
|
||||||
|
{
|
||||||
|
struct xcopy_dev_search_info info;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
memset(&info, 0, sizeof(info));
|
||||||
|
info.dev_wwn = dev_wwn;
|
||||||
|
|
||||||
|
ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
|
||||||
|
if (ret == 1) {
|
||||||
|
*found_dev = info.found_dev;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
} else {
|
||||||
mutex_unlock(&g_device_mutex);
|
|
||||||
|
|
||||||
pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
|
pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
|
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
|
||||||
|
@ -311,9 +325,7 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
|
||||||
(unsigned long long)xop->dst_lba);
|
(unsigned long long)xop->dst_lba);
|
||||||
|
|
||||||
if (dc != 0) {
|
if (dc != 0) {
|
||||||
xop->dbl = (desc[29] & 0xff) << 16;
|
xop->dbl = get_unaligned_be24(&desc[29]);
|
||||||
xop->dbl |= (desc[30] & 0xff) << 8;
|
|
||||||
xop->dbl |= desc[31] & 0xff;
|
|
||||||
|
|
||||||
pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
|
pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
|
||||||
}
|
}
|
||||||
|
@ -781,13 +793,24 @@ static int target_xcopy_write_destination(
|
||||||
static void target_xcopy_do_work(struct work_struct *work)
|
static void target_xcopy_do_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
|
struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
|
||||||
struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev;
|
|
||||||
struct se_cmd *ec_cmd = xop->xop_se_cmd;
|
struct se_cmd *ec_cmd = xop->xop_se_cmd;
|
||||||
sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba;
|
struct se_device *src_dev, *dst_dev;
|
||||||
|
sector_t src_lba, dst_lba, end_lba;
|
||||||
unsigned int max_sectors;
|
unsigned int max_sectors;
|
||||||
int rc;
|
int rc = 0;
|
||||||
unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0;
|
unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0;
|
||||||
|
|
||||||
|
if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
|
||||||
|
goto err_free;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev))
|
||||||
|
goto err_free;
|
||||||
|
|
||||||
|
src_dev = xop->src_dev;
|
||||||
|
dst_dev = xop->dst_dev;
|
||||||
|
src_lba = xop->src_lba;
|
||||||
|
dst_lba = xop->dst_lba;
|
||||||
|
nolb = xop->nolb;
|
||||||
end_lba = src_lba + nolb;
|
end_lba = src_lba + nolb;
|
||||||
/*
|
/*
|
||||||
* Break up XCOPY I/O into hw_max_sectors sized I/O based on the
|
* Break up XCOPY I/O into hw_max_sectors sized I/O based on the
|
||||||
|
@ -855,6 +878,8 @@ static void target_xcopy_do_work(struct work_struct *work)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
xcopy_pt_undepend_remotedev(xop);
|
xcopy_pt_undepend_remotedev(xop);
|
||||||
|
|
||||||
|
err_free:
|
||||||
kfree(xop);
|
kfree(xop);
|
||||||
/*
|
/*
|
||||||
* Don't override an error scsi status if it has already been set
|
* Don't override an error scsi status if it has already been set
|
||||||
|
@ -867,48 +892,22 @@ static void target_xcopy_do_work(struct work_struct *work)
|
||||||
target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
|
target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
|
||||||
}
|
}
|
||||||
|
|
||||||
sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
|
/*
|
||||||
|
* Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing
|
||||||
|
* fails.
|
||||||
|
*/
|
||||||
|
static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop)
|
||||||
{
|
{
|
||||||
struct se_device *dev = se_cmd->se_dev;
|
struct se_cmd *se_cmd = xop->xop_se_cmd;
|
||||||
struct xcopy_op *xop = NULL;
|
|
||||||
unsigned char *p = NULL, *seg_desc;
|
unsigned char *p = NULL, *seg_desc;
|
||||||
unsigned int list_id, list_id_usage, sdll, inline_dl, sa;
|
unsigned int list_id, list_id_usage, sdll, inline_dl;
|
||||||
sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
|
sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
|
||||||
int rc;
|
int rc;
|
||||||
unsigned short tdll;
|
unsigned short tdll;
|
||||||
|
|
||||||
if (!dev->dev_attrib.emulate_3pc) {
|
|
||||||
pr_err("EXTENDED_COPY operation explicitly disabled\n");
|
|
||||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
|
||||||
}
|
|
||||||
|
|
||||||
sa = se_cmd->t_task_cdb[1] & 0x1f;
|
|
||||||
if (sa != 0x00) {
|
|
||||||
pr_err("EXTENDED_COPY(LID4) not supported\n");
|
|
||||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (se_cmd->data_length == 0) {
|
|
||||||
target_complete_cmd(se_cmd, SAM_STAT_GOOD);
|
|
||||||
return TCM_NO_SENSE;
|
|
||||||
}
|
|
||||||
if (se_cmd->data_length < XCOPY_HDR_LEN) {
|
|
||||||
pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
|
|
||||||
se_cmd->data_length, XCOPY_HDR_LEN);
|
|
||||||
return TCM_PARAMETER_LIST_LENGTH_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
|
|
||||||
if (!xop) {
|
|
||||||
pr_err("Unable to allocate xcopy_op\n");
|
|
||||||
return TCM_OUT_OF_RESOURCES;
|
|
||||||
}
|
|
||||||
xop->xop_se_cmd = se_cmd;
|
|
||||||
|
|
||||||
p = transport_kmap_data_sg(se_cmd);
|
p = transport_kmap_data_sg(se_cmd);
|
||||||
if (!p) {
|
if (!p) {
|
||||||
pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
|
pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
|
||||||
kfree(xop);
|
|
||||||
return TCM_OUT_OF_RESOURCES;
|
return TCM_OUT_OF_RESOURCES;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -977,18 +976,57 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
|
||||||
pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
|
pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
|
||||||
rc * XCOPY_TARGET_DESC_LEN);
|
rc * XCOPY_TARGET_DESC_LEN);
|
||||||
transport_kunmap_data_sg(se_cmd);
|
transport_kunmap_data_sg(se_cmd);
|
||||||
|
|
||||||
INIT_WORK(&xop->xop_work, target_xcopy_do_work);
|
|
||||||
queue_work(xcopy_wq, &xop->xop_work);
|
|
||||||
return TCM_NO_SENSE;
|
return TCM_NO_SENSE;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (p)
|
if (p)
|
||||||
transport_kunmap_data_sg(se_cmd);
|
transport_kunmap_data_sg(se_cmd);
|
||||||
kfree(xop);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
|
||||||
|
{
|
||||||
|
struct se_device *dev = se_cmd->se_dev;
|
||||||
|
struct xcopy_op *xop;
|
||||||
|
unsigned int sa;
|
||||||
|
|
||||||
|
if (!dev->dev_attrib.emulate_3pc) {
|
||||||
|
pr_err("EXTENDED_COPY operation explicitly disabled\n");
|
||||||
|
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||||
|
}
|
||||||
|
|
||||||
|
sa = se_cmd->t_task_cdb[1] & 0x1f;
|
||||||
|
if (sa != 0x00) {
|
||||||
|
pr_err("EXTENDED_COPY(LID4) not supported\n");
|
||||||
|
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (se_cmd->data_length == 0) {
|
||||||
|
target_complete_cmd(se_cmd, SAM_STAT_GOOD);
|
||||||
|
return TCM_NO_SENSE;
|
||||||
|
}
|
||||||
|
if (se_cmd->data_length < XCOPY_HDR_LEN) {
|
||||||
|
pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
|
||||||
|
se_cmd->data_length, XCOPY_HDR_LEN);
|
||||||
|
return TCM_PARAMETER_LIST_LENGTH_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
|
||||||
|
if (!xop)
|
||||||
|
goto err;
|
||||||
|
xop->xop_se_cmd = se_cmd;
|
||||||
|
INIT_WORK(&xop->xop_work, target_xcopy_do_work);
|
||||||
|
if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work)))
|
||||||
|
goto free;
|
||||||
|
return TCM_NO_SENSE;
|
||||||
|
|
||||||
|
free:
|
||||||
|
kfree(xop);
|
||||||
|
|
||||||
|
err:
|
||||||
|
return TCM_OUT_OF_RESOURCES;
|
||||||
|
}
|
||||||
|
|
||||||
static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
|
static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
|
||||||
{
|
{
|
||||||
unsigned char *p;
|
unsigned char *p;
|
||||||
|
|
|
@ -496,14 +496,12 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
|
||||||
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
|
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
|
||||||
vs_event_work);
|
vs_event_work);
|
||||||
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
|
struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
|
||||||
struct vhost_scsi_evt *evt;
|
struct vhost_scsi_evt *evt, *t;
|
||||||
struct llist_node *llnode;
|
struct llist_node *llnode;
|
||||||
|
|
||||||
mutex_lock(&vq->mutex);
|
mutex_lock(&vq->mutex);
|
||||||
llnode = llist_del_all(&vs->vs_event_list);
|
llnode = llist_del_all(&vs->vs_event_list);
|
||||||
while (llnode) {
|
llist_for_each_entry_safe(evt, t, llnode, list) {
|
||||||
evt = llist_entry(llnode, struct vhost_scsi_evt, list);
|
|
||||||
llnode = llist_next(llnode);
|
|
||||||
vhost_scsi_do_evt_work(vs, evt);
|
vhost_scsi_do_evt_work(vs, evt);
|
||||||
vhost_scsi_free_evt(vs, evt);
|
vhost_scsi_free_evt(vs, evt);
|
||||||
}
|
}
|
||||||
|
@ -529,10 +527,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
|
||||||
|
|
||||||
bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
|
bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
|
||||||
llnode = llist_del_all(&vs->vs_completion_list);
|
llnode = llist_del_all(&vs->vs_completion_list);
|
||||||
while (llnode) {
|
llist_for_each_entry(cmd, llnode, tvc_completion_list) {
|
||||||
cmd = llist_entry(llnode, struct vhost_scsi_cmd,
|
|
||||||
tvc_completion_list);
|
|
||||||
llnode = llist_next(llnode);
|
|
||||||
se_cmd = &cmd->tvc_se_cmd;
|
se_cmd = &cmd->tvc_se_cmd;
|
||||||
|
|
||||||
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
|
pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
|
||||||
|
|
|
@ -134,11 +134,8 @@ struct vscsibk_pend {
|
||||||
struct page *pages[VSCSI_MAX_GRANTS];
|
struct page *pages[VSCSI_MAX_GRANTS];
|
||||||
|
|
||||||
struct se_cmd se_cmd;
|
struct se_cmd se_cmd;
|
||||||
};
|
|
||||||
|
|
||||||
struct scsiback_tmr {
|
struct completion tmr_done;
|
||||||
atomic_t tmr_complete;
|
|
||||||
wait_queue_head_t tmr_wait;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define VSCSI_DEFAULT_SESSION_TAGS 128
|
#define VSCSI_DEFAULT_SESSION_TAGS 128
|
||||||
|
@ -599,36 +596,28 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
|
||||||
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
|
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
|
||||||
struct scsiback_nexus *nexus = tpg->tpg_nexus;
|
struct scsiback_nexus *nexus = tpg->tpg_nexus;
|
||||||
struct se_cmd *se_cmd = &pending_req->se_cmd;
|
struct se_cmd *se_cmd = &pending_req->se_cmd;
|
||||||
struct scsiback_tmr *tmr;
|
|
||||||
u64 unpacked_lun = pending_req->v2p->lun;
|
u64 unpacked_lun = pending_req->v2p->lun;
|
||||||
int rc, err = FAILED;
|
int rc, err = FAILED;
|
||||||
|
|
||||||
tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
|
init_completion(&pending_req->tmr_done);
|
||||||
if (!tmr) {
|
|
||||||
target_put_sess_cmd(se_cmd);
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
init_waitqueue_head(&tmr->tmr_wait);
|
|
||||||
|
|
||||||
rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
|
rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
|
||||||
&pending_req->sense_buffer[0],
|
&pending_req->sense_buffer[0],
|
||||||
unpacked_lun, tmr, act, GFP_KERNEL,
|
unpacked_lun, NULL, act, GFP_KERNEL,
|
||||||
tag, TARGET_SCF_ACK_KREF);
|
tag, TARGET_SCF_ACK_KREF);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
|
wait_for_completion(&pending_req->tmr_done);
|
||||||
|
|
||||||
err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
|
err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
|
||||||
SUCCESS : FAILED;
|
SUCCESS : FAILED;
|
||||||
|
|
||||||
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
|
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
|
||||||
transport_generic_free_cmd(&pending_req->se_cmd, 1);
|
transport_generic_free_cmd(&pending_req->se_cmd, 0);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
if (tmr)
|
|
||||||
kfree(tmr);
|
|
||||||
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
|
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1389,12 +1378,6 @@ static int scsiback_check_stop_free(struct se_cmd *se_cmd)
|
||||||
static void scsiback_release_cmd(struct se_cmd *se_cmd)
|
static void scsiback_release_cmd(struct se_cmd *se_cmd)
|
||||||
{
|
{
|
||||||
struct se_session *se_sess = se_cmd->se_sess;
|
struct se_session *se_sess = se_cmd->se_sess;
|
||||||
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
|
|
||||||
|
|
||||||
if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
|
|
||||||
struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
|
|
||||||
kfree(tmr);
|
|
||||||
}
|
|
||||||
|
|
||||||
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
|
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
|
||||||
}
|
}
|
||||||
|
@ -1455,11 +1438,10 @@ static int scsiback_queue_status(struct se_cmd *se_cmd)
|
||||||
|
|
||||||
static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd)
|
static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd)
|
||||||
{
|
{
|
||||||
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
|
struct vscsibk_pend *pending_req = container_of(se_cmd,
|
||||||
struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
|
struct vscsibk_pend, se_cmd);
|
||||||
|
|
||||||
atomic_set(&tmr->tmr_complete, 1);
|
complete(&pending_req->tmr_done);
|
||||||
wake_up(&tmr->tmr_wait);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void scsiback_aborted_task(struct se_cmd *se_cmd)
|
static void scsiback_aborted_task(struct se_cmd *se_cmd)
|
||||||
|
|
|
@ -158,6 +158,7 @@
|
||||||
#define READ_32 0x09
|
#define READ_32 0x09
|
||||||
#define VERIFY_32 0x0a
|
#define VERIFY_32 0x0a
|
||||||
#define WRITE_32 0x0b
|
#define WRITE_32 0x0b
|
||||||
|
#define WRITE_VERIFY_32 0x0c
|
||||||
#define WRITE_SAME_32 0x0d
|
#define WRITE_SAME_32 0x0d
|
||||||
#define ATA_32 0x1ff0
|
#define ATA_32 0x1ff0
|
||||||
|
|
||||||
|
|
|
@ -66,6 +66,14 @@ struct sock;
|
||||||
#define TA_DEFAULT_FABRIC_PROT_TYPE 0
|
#define TA_DEFAULT_FABRIC_PROT_TYPE 0
|
||||||
/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
|
/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
|
||||||
#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
|
#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
|
||||||
|
/*
|
||||||
|
* Used to control the sending of keys with optional to respond state bit,
|
||||||
|
* as a workaround for non RFC compliant initiators,that do not propose,
|
||||||
|
* nor respond to specific keys required for login to complete.
|
||||||
|
*
|
||||||
|
* See iscsi_check_proposer_for_optional_reply() for more details.
|
||||||
|
*/
|
||||||
|
#define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1
|
||||||
|
|
||||||
#define ISCSI_IOV_DATA_BUFFER 5
|
#define ISCSI_IOV_DATA_BUFFER 5
|
||||||
|
|
||||||
|
@ -560,7 +568,6 @@ struct iscsi_conn {
|
||||||
#define LOGIN_FLAGS_INITIAL_PDU 8
|
#define LOGIN_FLAGS_INITIAL_PDU 8
|
||||||
unsigned long login_flags;
|
unsigned long login_flags;
|
||||||
struct delayed_work login_work;
|
struct delayed_work login_work;
|
||||||
struct delayed_work login_cleanup_work;
|
|
||||||
struct iscsi_login *login;
|
struct iscsi_login *login;
|
||||||
struct timer_list nopin_timer;
|
struct timer_list nopin_timer;
|
||||||
struct timer_list nopin_response_timer;
|
struct timer_list nopin_response_timer;
|
||||||
|
@ -769,6 +776,7 @@ struct iscsi_tpg_attrib {
|
||||||
u8 t10_pi;
|
u8 t10_pi;
|
||||||
u32 fabric_prot_type;
|
u32 fabric_prot_type;
|
||||||
u32 tpg_enabled_sendtargets;
|
u32 tpg_enabled_sendtargets;
|
||||||
|
u32 login_keys_workaround;
|
||||||
struct iscsi_portal_group *tpg;
|
struct iscsi_portal_group *tpg;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
#define TARGET_CORE_BACKEND_H
|
#define TARGET_CORE_BACKEND_H
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <asm/unaligned.h>
|
||||||
#include <target/target_core_base.h>
|
#include <target/target_core_base.h>
|
||||||
|
|
||||||
#define TRANSPORT_FLAG_PASSTHROUGH 0x1
|
#define TRANSPORT_FLAG_PASSTHROUGH 0x1
|
||||||
|
@ -29,16 +30,13 @@ struct target_backend_ops {
|
||||||
|
|
||||||
struct se_device *(*alloc_device)(struct se_hba *, const char *);
|
struct se_device *(*alloc_device)(struct se_hba *, const char *);
|
||||||
int (*configure_device)(struct se_device *);
|
int (*configure_device)(struct se_device *);
|
||||||
|
void (*destroy_device)(struct se_device *);
|
||||||
void (*free_device)(struct se_device *device);
|
void (*free_device)(struct se_device *device);
|
||||||
|
|
||||||
ssize_t (*set_configfs_dev_params)(struct se_device *,
|
ssize_t (*set_configfs_dev_params)(struct se_device *,
|
||||||
const char *, ssize_t);
|
const char *, ssize_t);
|
||||||
ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
|
ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
|
||||||
|
|
||||||
void (*transport_complete)(struct se_cmd *cmd,
|
|
||||||
struct scatterlist *,
|
|
||||||
unsigned char *);
|
|
||||||
|
|
||||||
sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
|
sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
|
||||||
u32 (*get_device_type)(struct se_device *);
|
u32 (*get_device_type)(struct se_device *);
|
||||||
sector_t (*get_blocks)(struct se_device *);
|
sector_t (*get_blocks)(struct se_device *);
|
||||||
|
@ -71,6 +69,8 @@ void target_backend_unregister(const struct target_backend_ops *);
|
||||||
void target_complete_cmd(struct se_cmd *, u8);
|
void target_complete_cmd(struct se_cmd *, u8);
|
||||||
void target_complete_cmd_with_length(struct se_cmd *, u8, int);
|
void target_complete_cmd_with_length(struct se_cmd *, u8, int);
|
||||||
|
|
||||||
|
void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
|
||||||
|
|
||||||
sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
|
sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
|
||||||
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
|
sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
|
||||||
sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
|
sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
|
||||||
|
@ -104,9 +104,18 @@ bool target_lun_is_rdonly(struct se_cmd *);
|
||||||
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
|
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
|
||||||
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
|
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
|
||||||
|
|
||||||
|
struct se_device *target_find_device(int id, bool do_depend);
|
||||||
|
|
||||||
bool target_sense_desc_format(struct se_device *dev);
|
bool target_sense_desc_format(struct se_device *dev);
|
||||||
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
|
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
|
||||||
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
|
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
|
||||||
struct request_queue *q);
|
struct request_queue *q);
|
||||||
|
|
||||||
|
|
||||||
|
/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
|
||||||
|
static inline uint32_t get_unaligned_be24(const uint8_t *const p)
|
||||||
|
{
|
||||||
|
return get_unaligned_be32(p - 1) & 0xffffffU;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* TARGET_CORE_BACKEND_H */
|
#endif /* TARGET_CORE_BACKEND_H */
|
||||||
|
|
|
@ -189,6 +189,7 @@ enum target_sc_flags_table {
|
||||||
TARGET_SCF_ACK_KREF = 0x02,
|
TARGET_SCF_ACK_KREF = 0x02,
|
||||||
TARGET_SCF_UNKNOWN_SIZE = 0x04,
|
TARGET_SCF_UNKNOWN_SIZE = 0x04,
|
||||||
TARGET_SCF_USE_CPUID = 0x08,
|
TARGET_SCF_USE_CPUID = 0x08,
|
||||||
|
TARGET_SCF_LOOKUP_LUN_FROM_TAG = 0x10,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* fabric independent task management function values */
|
/* fabric independent task management function values */
|
||||||
|
@ -218,7 +219,6 @@ enum tcm_tmrsp_table {
|
||||||
*/
|
*/
|
||||||
typedef enum {
|
typedef enum {
|
||||||
SCSI_INST_INDEX,
|
SCSI_INST_INDEX,
|
||||||
SCSI_DEVICE_INDEX,
|
|
||||||
SCSI_AUTH_INTR_INDEX,
|
SCSI_AUTH_INTR_INDEX,
|
||||||
SCSI_INDEX_TYPE_MAX
|
SCSI_INDEX_TYPE_MAX
|
||||||
} scsi_index_t;
|
} scsi_index_t;
|
||||||
|
@ -701,8 +701,6 @@ struct scsi_port_stats {
|
||||||
|
|
||||||
struct se_lun {
|
struct se_lun {
|
||||||
u64 unpacked_lun;
|
u64 unpacked_lun;
|
||||||
#define SE_LUN_LINK_MAGIC 0xffff7771
|
|
||||||
u32 lun_link_magic;
|
|
||||||
bool lun_shutdown;
|
bool lun_shutdown;
|
||||||
bool lun_access_ro;
|
bool lun_access_ro;
|
||||||
u32 lun_index;
|
u32 lun_index;
|
||||||
|
@ -746,8 +744,6 @@ struct se_dev_stat_grps {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct se_device {
|
struct se_device {
|
||||||
#define SE_DEV_LINK_MAGIC 0xfeeddeef
|
|
||||||
u32 dev_link_magic;
|
|
||||||
/* RELATIVE TARGET PORT IDENTIFER Counter */
|
/* RELATIVE TARGET PORT IDENTIFER Counter */
|
||||||
u16 dev_rpti_counter;
|
u16 dev_rpti_counter;
|
||||||
/* Used for SAM Task Attribute ordering */
|
/* Used for SAM Task Attribute ordering */
|
||||||
|
@ -800,7 +796,6 @@ struct se_device {
|
||||||
struct list_head delayed_cmd_list;
|
struct list_head delayed_cmd_list;
|
||||||
struct list_head state_list;
|
struct list_head state_list;
|
||||||
struct list_head qf_cmd_list;
|
struct list_head qf_cmd_list;
|
||||||
struct list_head g_dev_node;
|
|
||||||
/* Pointer to associated SE HBA */
|
/* Pointer to associated SE HBA */
|
||||||
struct se_hba *se_hba;
|
struct se_hba *se_hba;
|
||||||
/* T10 Inquiry and VPD WWN Information */
|
/* T10 Inquiry and VPD WWN Information */
|
||||||
|
@ -819,8 +814,6 @@ struct se_device {
|
||||||
unsigned char udev_path[SE_UDEV_PATH_LEN];
|
unsigned char udev_path[SE_UDEV_PATH_LEN];
|
||||||
/* Pointer to template of function pointers for transport */
|
/* Pointer to template of function pointers for transport */
|
||||||
const struct target_backend_ops *transport;
|
const struct target_backend_ops *transport;
|
||||||
/* Linked list for struct se_hba struct se_device list */
|
|
||||||
struct list_head dev_list;
|
|
||||||
struct se_lun xcopy_lun;
|
struct se_lun xcopy_lun;
|
||||||
/* Protection Information */
|
/* Protection Information */
|
||||||
int prot_length;
|
int prot_length;
|
||||||
|
|
|
@ -160,6 +160,7 @@ int target_get_sess_cmd(struct se_cmd *, bool);
|
||||||
int target_put_sess_cmd(struct se_cmd *);
|
int target_put_sess_cmd(struct se_cmd *);
|
||||||
void target_sess_cmd_list_set_waiting(struct se_session *);
|
void target_sess_cmd_list_set_waiting(struct se_session *);
|
||||||
void target_wait_for_sess_cmds(struct se_session *);
|
void target_wait_for_sess_cmds(struct se_session *);
|
||||||
|
void target_show_cmd(const char *pfx, struct se_cmd *cmd);
|
||||||
|
|
||||||
int core_alua_check_nonop_delay(struct se_cmd *);
|
int core_alua_check_nonop_delay(struct se_cmd *);
|
||||||
|
|
||||||
|
|
|
@ -130,6 +130,11 @@ enum tcmu_genl_cmd {
|
||||||
TCMU_CMD_UNSPEC,
|
TCMU_CMD_UNSPEC,
|
||||||
TCMU_CMD_ADDED_DEVICE,
|
TCMU_CMD_ADDED_DEVICE,
|
||||||
TCMU_CMD_REMOVED_DEVICE,
|
TCMU_CMD_REMOVED_DEVICE,
|
||||||
|
TCMU_CMD_RECONFIG_DEVICE,
|
||||||
|
TCMU_CMD_ADDED_DEVICE_DONE,
|
||||||
|
TCMU_CMD_REMOVED_DEVICE_DONE,
|
||||||
|
TCMU_CMD_RECONFIG_DEVICE_DONE,
|
||||||
|
TCMU_CMD_SET_FEATURES,
|
||||||
__TCMU_CMD_MAX,
|
__TCMU_CMD_MAX,
|
||||||
};
|
};
|
||||||
#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
|
#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
|
||||||
|
@ -138,6 +143,13 @@ enum tcmu_genl_attr {
|
||||||
TCMU_ATTR_UNSPEC,
|
TCMU_ATTR_UNSPEC,
|
||||||
TCMU_ATTR_DEVICE,
|
TCMU_ATTR_DEVICE,
|
||||||
TCMU_ATTR_MINOR,
|
TCMU_ATTR_MINOR,
|
||||||
|
TCMU_ATTR_PAD,
|
||||||
|
TCMU_ATTR_DEV_CFG,
|
||||||
|
TCMU_ATTR_DEV_SIZE,
|
||||||
|
TCMU_ATTR_WRITECACHE,
|
||||||
|
TCMU_ATTR_CMD_STATUS,
|
||||||
|
TCMU_ATTR_DEVICE_ID,
|
||||||
|
TCMU_ATTR_SUPP_KERN_CMD_REPLY,
|
||||||
__TCMU_ATTR_MAX,
|
__TCMU_ATTR_MAX,
|
||||||
};
|
};
|
||||||
#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)
|
#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)
|
||||||
|
|
Loading…
Reference in New Issue