Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull scsi-target changes from Nicholas Bellinger:
 "There has been lots of work in existing code in a number of areas this
  past cycle.  The major highlights have been:

   * Removal of transport_do_task_sg_chain() from core + fabrics
     (Roland)
   * target-core: Removal of se_task abstraction from target-core and
     enforce hw_max_sectors for pSCSI backends (hch)
   * Re-factoring of iscsi-target tx immediate/response queues (agrover)
   * Conversion of iscsi-target back to using target core memory
     allocation logic (agrover)

  We've had one last minute iscsi-target patch go into for-next to
  address a nasty regression bug related to the target core allocation
  logic conversion from agrover that is not included in friday's
  linux-next build, but has been included in this series.

  On the new fabric module code front for-3.5, here is a brief status
  update for the three currently in flight this round:

   * usb-gadget target driver:

  Sebastian Siewior's driver for supporting usb-gadget target mode
  operation.  This will be going out as a separate PULL request from
  target-pending/usb-target-merge with subsystem maintainer ACKs.  There
  is one minor target-core patch in this series required to function.

   * sbp ieee-1394/firewire target driver:

  Chris Boot's driver for supportting the Serial Block Protocol (SBP)
  across IEEE-1394 Firewire hardware.  This will be going out as a
  separate PULL request from target-pending/sbp-target-merge with two
  additional drivers/firewire/ patches w/ subsystem maintainer ACKs.

   * qla2xxx LLD target mode infrastructure changes + tcm_qla2xxx:

  The Qlogic >= 24xx series HW target mode LLD infrastructure patch-set
  and tcm_qla2xxx fabric driver.  Support for FC target mode using
  qla2xxx LLD code has been officially submitted by Qlogic to James
  below, and is currently outstanding but not yet merged into
  scsi.git/for-next..

    [PATCH 00/22] qla2xxx: Updates for scsi "misc" branch
    http://www.spinics.net/lists/linux-scsi/msg59350.html

  Note there are *zero* direct dependencies upon this for-next series
  for the qla2xxx LLD target + tcm_qla2xxx patches submitted above, and
  over the last days the target mode team has been tracking down an
  tcm_qla2xxx specific active I/O shutdown bug that appears to now be
  almost squashed for 3.5-rc-fixes."

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (47 commits)
  iscsi-target: Fix iov_count calculation bug in iscsit_allocate_iovecs
  iscsi-target: remove dead code in iscsi_check_valuelist_for_support
  target: Handle ATA_16 passthrough for pSCSI backend devices
  target: Add MI_REPORT_TARGET_PGS ext. header + implict_trans_secs attribute
  target: Fix MAINTENANCE_IN service action CDB checks to use lower 5 bits
  target: add support for the WRITE_VERIFY command
  target: make target_put_session void
  target: cleanup transport_execute_tasks()
  target: Remove max_sectors device attribute for modern se_task less code
  target: lock => unlock typo in transport_lun_wait_for_tasks
  target: Enforce hw_max_sectors for SCF_SCSI_DATA_SG_IO_CDB
  target: remove the t_se_count field in struct se_cmd
  target: remove the t_task_cdbs_ex_left field in struct se_cmd
  target: remove the t_task_cdbs_left field in struct se_cmd
  target: remove struct se_task
  target: move the state and execute lists to the command
  target: simplify command to task linkage
  target: always allocate a single task
  target: replace ->execute_task with ->execute_cmd
  target: remove the task_sectors field in struct se_task
  ...
This commit is contained in:
Linus Torvalds 2012-05-21 17:37:09 -07:00
commit c9bfa7d75b
41 changed files with 1333 additions and 2322 deletions

View File

@ -1099,9 +1099,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
dir = cmd->data_direction;
BUG_ON(dir == DMA_NONE);
transport_do_task_sg_chain(cmd);
ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
ioctx->sg = sg = sg_orig = cmd->t_data_sg;
ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
opposite_dma_dir(dir));
@ -1769,7 +1768,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
goto send_sense;
}
ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
if (ret < 0) {
kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
@ -4004,9 +4003,6 @@ static int __init srpt_init_module(void)
srpt_target->tf_ops = srpt_template;
/* Enable SG chaining */
srpt_target->tf_ops.task_sg_chaining = true;
/*
* Set up default attribute lists.
*/

File diff suppressed because it is too large Load Diff

View File

@ -18,8 +18,7 @@ extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
extern int iscsit_send_r2t(struct iscsi_cmd *, struct iscsi_conn *);
extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, int);
extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, bool recovery);
extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
extern int iscsi_target_tx_thread(void *);
extern int iscsi_target_rx_thread(void *);

View File

@ -1538,7 +1538,7 @@ static int lio_write_pending(struct se_cmd *se_cmd)
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
if (!cmd->immediate_data && !cmd->unsolicited_data)
return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 1);
return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false);
return 0;
}

View File

@ -296,12 +296,11 @@ struct iscsi_datain_req {
u32 runlength;
u32 data_length;
u32 data_offset;
u32 data_offset_end;
u32 data_sn;
u32 next_burst_len;
u32 read_data_done;
u32 seq_send_order;
struct list_head dr_list;
struct list_head cmd_datain_node;
} ____cacheline_aligned;
struct iscsi_ooo_cmdsn {
@ -381,8 +380,6 @@ struct iscsi_cmd {
u32 buf_ptr_size;
/* Used to store DataDigest */
u32 data_crc;
/* Total size in bytes associated with command */
u32 data_length;
/* Counter for MaxOutstandingR2T */
u32 outstanding_r2ts;
/* Next R2T Offset when DataSequenceInOrder=Yes */
@ -464,16 +461,13 @@ struct iscsi_cmd {
/* Session the command is part of, used for connection recovery */
struct iscsi_session *sess;
/* list_head for connection list */
struct list_head i_list;
struct list_head i_conn_node;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd se_cmd;
/* Sense buffer that will be mapped into outgoing status */
#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
struct scatterlist *t_mem_sg;
u32 t_mem_sg_nents;
u32 padding;
u8 pad_bytes[4];
@ -500,8 +494,6 @@ struct iscsi_conn {
u8 network_transport;
enum iscsi_timer_flags_table nopin_timer_flags;
enum iscsi_timer_flags_table nopin_response_timer_flags;
u8 tx_immediate_queue;
u8 tx_response_queue;
/* Used to know what thread encountered a transport failure */
u8 which_thread;
/* connection id assigned by the Initiator */

View File

@ -37,7 +37,7 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void)
" struct iscsi_datain_req\n");
return NULL;
}
INIT_LIST_HEAD(&dr->dr_list);
INIT_LIST_HEAD(&dr->cmd_datain_node);
return dr;
}
@ -45,14 +45,14 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void)
void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
{
spin_lock(&cmd->datain_lock);
list_add_tail(&dr->dr_list, &cmd->datain_list);
list_add_tail(&dr->cmd_datain_node, &cmd->datain_list);
spin_unlock(&cmd->datain_lock);
}
void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
{
spin_lock(&cmd->datain_lock);
list_del(&dr->dr_list);
list_del(&dr->cmd_datain_node);
spin_unlock(&cmd->datain_lock);
kmem_cache_free(lio_dr_cache, dr);
@ -63,8 +63,8 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
struct iscsi_datain_req *dr, *dr_tmp;
spin_lock(&cmd->datain_lock);
list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, dr_list) {
list_del(&dr->dr_list);
list_for_each_entry_safe(dr, dr_tmp, &cmd->datain_list, cmd_datain_node) {
list_del(&dr->cmd_datain_node);
kmem_cache_free(lio_dr_cache, dr);
}
spin_unlock(&cmd->datain_lock);
@ -72,17 +72,14 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
{
struct iscsi_datain_req *dr;
if (list_empty(&cmd->datain_list)) {
pr_err("cmd->datain_list is empty for ITT:"
" 0x%08x\n", cmd->init_task_tag);
return NULL;
}
list_for_each_entry(dr, &cmd->datain_list, dr_list)
break;
return dr;
return list_first_entry(&cmd->datain_list, struct iscsi_datain_req,
cmd_datain_node);
}
/*
@ -113,7 +110,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
read_data_left = (cmd->data_length - read_data_done);
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
@ -212,7 +209,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
seq_send_order = (!dr->recovery) ?
cmd->seq_send_order : dr->seq_send_order;
read_data_left = (cmd->data_length - read_data_done);
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
@ -231,8 +228,8 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
offset = (seq->offset + seq->next_burst_len);
if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
cmd->data_length) {
datain->length = (cmd->data_length - offset);
cmd->se_cmd.data_length) {
datain->length = (cmd->se_cmd.data_length - offset);
datain->offset = offset;
datain->flags |= ISCSI_FLAG_CMD_FINAL;
@ -264,7 +261,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
}
}
if ((read_data_done + datain->length) == cmd->data_length)
if ((read_data_done + datain->length) == cmd->se_cmd.data_length)
datain->flags |= ISCSI_FLAG_DATA_STATUS;
datain->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;
@ -333,7 +330,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
read_data_done = (!dr->recovery) ?
cmd->read_data_done : dr->read_data_done;
read_data_left = (cmd->data_length - read_data_done);
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
@ -344,7 +341,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
if (!pdu)
return dr;
if ((read_data_done + pdu->length) == cmd->data_length) {
if ((read_data_done + pdu->length) == cmd->se_cmd.data_length) {
pdu->flags |= (ISCSI_FLAG_CMD_FINAL | ISCSI_FLAG_DATA_STATUS);
if (conn->sess->sess_ops->ErrorRecoveryLevel > 0)
pdu->flags |= ISCSI_FLAG_DATA_ACK;
@ -433,7 +430,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
seq_send_order = (!dr->recovery) ?
cmd->seq_send_order : dr->seq_send_order;
read_data_left = (cmd->data_length - read_data_done);
read_data_left = (cmd->se_cmd.data_length - read_data_done);
if (!read_data_left) {
pr_err("ITT: 0x%08x read_data_left is zero!\n",
cmd->init_task_tag);
@ -463,7 +460,7 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
} else
seq->next_burst_len += pdu->length;
if ((read_data_done + pdu->length) == cmd->data_length)
if ((read_data_done + pdu->length) == cmd->se_cmd.data_length)
pdu->flags |= ISCSI_FLAG_DATA_STATUS;
pdu->data_sn = (!dr->recovery) ? cmd->data_sn++ : dr->data_sn++;

View File

@ -48,9 +48,9 @@ void iscsit_set_dataout_sequence_values(
if (cmd->unsolicited_data) {
cmd->seq_start_offset = cmd->write_data_done;
cmd->seq_end_offset = (cmd->write_data_done +
(cmd->data_length >
(cmd->se_cmd.data_length >
conn->sess->sess_ops->FirstBurstLength) ?
conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length);
return;
}
@ -59,15 +59,15 @@ void iscsit_set_dataout_sequence_values(
if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
cmd->seq_start_offset = cmd->write_data_done;
cmd->seq_end_offset = (cmd->data_length >
cmd->seq_end_offset = (cmd->se_cmd.data_length >
conn->sess->sess_ops->MaxBurstLength) ?
(cmd->write_data_done +
conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
conn->sess->sess_ops->MaxBurstLength) : cmd->se_cmd.data_length;
} else {
cmd->seq_start_offset = cmd->seq_end_offset;
cmd->seq_end_offset = ((cmd->seq_end_offset +
conn->sess->sess_ops->MaxBurstLength) >=
cmd->data_length) ? cmd->data_length :
cmd->se_cmd.data_length) ? cmd->se_cmd.data_length :
(cmd->seq_end_offset +
conn->sess->sess_ops->MaxBurstLength);
}
@ -182,13 +182,13 @@ static int iscsit_dataout_check_unsolicited_sequence(
if (!conn->sess->sess_ops->DataPDUInOrder)
goto out;
if ((first_burst_len != cmd->data_length) &&
if ((first_burst_len != cmd->se_cmd.data_length) &&
(first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
pr_err("Unsolicited non-immediate data"
" received %u does not equal FirstBurstLength: %u, and"
" does not equal ExpXferLen %u.\n", first_burst_len,
conn->sess->sess_ops->FirstBurstLength,
cmd->data_length);
cmd->se_cmd.data_length);
transport_send_check_condition_and_sense(&cmd->se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return DATAOUT_CANNOT_RECOVER;
@ -201,10 +201,10 @@ static int iscsit_dataout_check_unsolicited_sequence(
conn->sess->sess_ops->FirstBurstLength);
return DATAOUT_CANNOT_RECOVER;
}
if (first_burst_len == cmd->data_length) {
if (first_burst_len == cmd->se_cmd.data_length) {
pr_err("Command ITT: 0x%08x reached"
" ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
" error.\n", cmd->init_task_tag, cmd->data_length);
" error.\n", cmd->init_task_tag, cmd->se_cmd.data_length);
return DATAOUT_CANNOT_RECOVER;
}
}
@ -294,7 +294,7 @@ static int iscsit_dataout_check_sequence(
if ((next_burst_len <
conn->sess->sess_ops->MaxBurstLength) &&
((cmd->write_data_done + payload_length) <
cmd->data_length)) {
cmd->se_cmd.data_length)) {
pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
" before end of DataOUT sequence, protocol"
" error.\n", cmd->init_task_tag);
@ -319,7 +319,7 @@ static int iscsit_dataout_check_sequence(
return DATAOUT_CANNOT_RECOVER;
}
if ((cmd->write_data_done + payload_length) ==
cmd->data_length) {
cmd->se_cmd.data_length) {
pr_err("Command ITT: 0x%08x reached"
" last DataOUT PDU in sequence but ISCSI_FLAG_"
"CMD_FINAL is not set, protocol error.\n",
@ -640,9 +640,12 @@ static int iscsit_dataout_post_crc_passed(
cmd->write_data_done += payload_length;
return (cmd->write_data_done == cmd->data_length) ?
DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
DATAOUT_SEND_R2T : DATAOUT_NORMAL;
if (cmd->write_data_done == cmd->se_cmd.data_length)
return DATAOUT_SEND_TO_TRANSPORT;
else if (send_r2t)
return DATAOUT_SEND_R2T;
else
return DATAOUT_NORMAL;
}
static int iscsit_dataout_post_crc_failed(

View File

@ -279,11 +279,9 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
* seq->first_datasn and seq->last_datasn have not been set.
*/
if (!seq->sent) {
#if 0
pr_err("Ignoring non-sent sequence 0x%08x ->"
" 0x%08x\n\n", seq->first_datasn,
seq->last_datasn);
#endif
continue;
}
@ -294,11 +292,10 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
*/
if ((seq->first_datasn < begrun) &&
(seq->last_datasn < begrun)) {
#if 0
pr_err("Pre BegRun sequence 0x%08x ->"
" 0x%08x\n", seq->first_datasn,
seq->last_datasn);
#endif
read_data_done += cmd->seq_list[i].xfer_len;
seq->next_burst_len = seq->pdu_send_order = 0;
continue;
@ -309,11 +306,10 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
*/
if ((seq->first_datasn <= begrun) &&
(seq->last_datasn >= begrun)) {
#if 0
pr_err("Found sequence begrun: 0x%08x in"
" 0x%08x -> 0x%08x\n", begrun,
seq->first_datasn, seq->last_datasn);
#endif
seq_send_order = seq->seq_send_order;
data_sn = seq->first_datasn;
seq->next_burst_len = seq->pdu_send_order = 0;
@ -369,10 +365,9 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_no(
*/
if ((seq->first_datasn > begrun) ||
(seq->last_datasn > begrun)) {
#if 0
pr_err("Post BegRun sequence 0x%08x -> 0x%08x\n",
seq->first_datasn, seq->last_datasn);
#endif
seq->next_burst_len = seq->pdu_send_order = 0;
continue;
}
@ -526,7 +521,7 @@ int iscsit_handle_status_snack(
found_cmd = 0;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->stat_sn == begrun) {
found_cmd = 1;
break;
@ -987,7 +982,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
return 0;
iscsit_set_dataout_sequence_values(cmd);
iscsit_build_r2ts_for_cmd(cmd, cmd->conn, 0);
iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false);
}
return 0;
}
@ -1121,8 +1116,8 @@ static int iscsit_set_dataout_timeout_values(
if (cmd->unsolicited_data) {
*offset = 0;
*length = (conn->sess->sess_ops->FirstBurstLength >
cmd->data_length) ?
cmd->data_length :
cmd->se_cmd.data_length) ?
cmd->se_cmd.data_length :
conn->sess->sess_ops->FirstBurstLength;
return 0;
}
@ -1193,8 +1188,8 @@ static void iscsit_handle_dataout_timeout(unsigned long data)
if (conn->sess->sess_ops->DataPDUInOrder) {
pdu_offset = cmd->write_data_done;
if ((pdu_offset + (conn->sess->sess_ops->MaxBurstLength -
cmd->next_burst_len)) > cmd->data_length)
pdu_length = (cmd->data_length -
cmd->next_burst_len)) > cmd->se_cmd.data_length)
pdu_length = (cmd->se_cmd.data_length -
cmd->write_data_done);
else
pdu_length = (conn->sess->sess_ops->MaxBurstLength -

View File

@ -138,9 +138,9 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_list) {
&cr->conn_recovery_cmd_list, i_conn_node) {
list_del(&cmd->i_list);
list_del(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd);
@ -160,9 +160,9 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_list) {
&cr->conn_recovery_cmd_list, i_conn_node) {
list_del(&cmd->i_list);
list_del(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd);
@ -220,7 +220,7 @@ int iscsit_remove_cmd_from_connection_recovery(
}
cr = cmd->cr;
list_del(&cmd->i_list);
list_del(&cmd->i_conn_node);
return --cr->cmd_count;
}
@ -234,7 +234,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_list) {
&cr->conn_recovery_cmd_list, i_conn_node) {
if (((cmd->deferred_i_state != ISTATE_SENT_STATUS) &&
(cmd->deferred_i_state != ISTATE_REMOVE)) ||
@ -297,11 +297,11 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
mutex_unlock(&sess->cmdsn_mutex);
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
continue;
list_del(&cmd->i_list);
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd);
@ -339,14 +339,14 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
/*
* Only perform connection recovery on ISCSI_OP_SCSI_CMD or
* ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
* list_del(&cmd->i_list); to release the command to the
* list_del(&cmd->i_conn_node); to release the command to the
* session pool and remove it from the connection's list.
*
* Also stop the DataOUT timer, which will be restarted after
* sending the TMR response.
*/
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) {
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
(cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
@ -355,7 +355,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
list_del(&cmd->i_list);
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd);
spin_lock_bh(&conn->cmd_lock);
@ -375,7 +375,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
(cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_list);
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd);
spin_lock_bh(&conn->cmd_lock);
@ -397,7 +397,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
cmd->sess = conn->sess;
list_del(&cmd->i_list);
list_del(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_all_datain_reqs(cmd);
@ -407,7 +407,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
* Add the struct iscsi_cmd to the connection recovery cmd list
*/
spin_lock(&cr->conn_recovery_cmd_lock);
list_add_tail(&cmd->i_list, &cr->conn_recovery_cmd_list);
list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list);
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_lock_bh(&conn->cmd_lock);

View File

@ -803,14 +803,6 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt
value = simple_strtoul(value_ptr, &tmpptr, 0);
/* #warning FIXME: Fix this */
#if 0
if (strspn(endptr, WHITE_SPACE) != strlen(endptr)) {
pr_err("Illegal value \"%s\" for \"%s\".\n",
value, param->name);
return -1;
}
#endif
if (IS_TYPERANGE_0_TO_2(param)) {
if ((value < 0) || (value > 2)) {
pr_err("Illegal value for \"%s\", must be"
@ -1045,13 +1037,6 @@ static char *iscsi_check_valuelist_for_support(
tmp2 = strchr(acceptor_values, ',');
if (tmp2)
*tmp2 = '\0';
if (!acceptor_values || !proposer_values) {
if (tmp1)
*tmp1 = ',';
if (tmp2)
*tmp2 = ',';
return NULL;
}
if (!strcmp(acceptor_values, proposer_values)) {
if (tmp2)
*tmp2 = ',';
@ -1061,8 +1046,6 @@ static char *iscsi_check_valuelist_for_support(
*tmp2++ = ',';
acceptor_values = tmp2;
if (!acceptor_values)
break;
} while (acceptor_values);
if (tmp1)
*tmp1++ = ',';

View File

@ -24,11 +24,13 @@
#include "iscsi_target_core.h"
#include "iscsi_target_util.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h"
#define OFFLOAD_BUF_SIZE 32768
void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
#ifdef DEBUG
static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
{
int i;
struct iscsi_seq *seq;
@ -46,7 +48,7 @@ void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
}
}
void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
{
int i;
struct iscsi_pdu *pdu;
@ -61,6 +63,10 @@ void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
pdu->length, pdu->pdu_send_order, pdu->seq_no);
}
}
#else
static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) {}
static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) {}
#endif
static void iscsit_ordered_seq_lists(
struct iscsi_cmd *cmd,
@ -135,11 +141,11 @@ static int iscsit_randomize_pdu_lists(
seq_count++;
continue;
}
array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory"
" for random array.\n");
return -1;
return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
@ -155,11 +161,11 @@ static int iscsit_randomize_pdu_lists(
}
if (seq_count) {
array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory for"
" random array.\n");
return -1;
return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
@ -187,10 +193,10 @@ static int iscsit_randomize_seq_lists(
if (!seq_count)
return 0;
array = kzalloc(seq_count * sizeof(u32), GFP_KERNEL);
array = kcalloc(seq_count, sizeof(u32), GFP_KERNEL);
if (!array) {
pr_err("Unable to allocate memory for random array.\n");
return -1;
return -ENOMEM;
}
iscsit_create_random_array(array, seq_count);
@ -221,11 +227,10 @@ static void iscsit_determine_counts_for_list(
if ((bl->type == PDULIST_UNSOLICITED) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
unsolicited_data_length = (cmd->data_length >
conn->sess->sess_ops->FirstBurstLength) ?
conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
unsolicited_data_length = min(cmd->se_cmd.data_length,
conn->sess->sess_ops->FirstBurstLength);
while (offset < cmd->data_length) {
while (offset < cmd->se_cmd.data_length) {
*pdu_count += 1;
if (check_immediate) {
@ -239,10 +244,10 @@ static void iscsit_determine_counts_for_list(
}
if (unsolicited_data_length > 0) {
if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
>= cmd->data_length) {
>= cmd->se_cmd.data_length) {
unsolicited_data_length -=
(cmd->data_length - offset);
offset += (cmd->data_length - offset);
(cmd->se_cmd.data_length - offset);
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((offset + conn->conn_ops->MaxRecvDataSegmentLength)
@ -263,8 +268,8 @@ static void iscsit_determine_counts_for_list(
continue;
}
if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
cmd->data_length) {
offset += (cmd->data_length - offset);
cmd->se_cmd.data_length) {
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
@ -284,9 +289,9 @@ static void iscsit_determine_counts_for_list(
/*
* Builds PDU and/or Sequence list, called while DataSequenceInOrder=No
* and DataPDUInOrder=No.
* or DataPDUInOrder=No.
*/
static int iscsit_build_pdu_and_seq_list(
static int iscsit_do_build_pdu_and_seq_lists(
struct iscsi_cmd *cmd,
struct iscsi_build_list *bl)
{
@ -306,11 +311,10 @@ static int iscsit_build_pdu_and_seq_list(
if ((bl->type == PDULIST_UNSOLICITED) ||
(bl->type == PDULIST_IMMEDIATE_AND_UNSOLICITED))
unsolicited_data_length = (cmd->data_length >
conn->sess->sess_ops->FirstBurstLength) ?
conn->sess->sess_ops->FirstBurstLength : cmd->data_length;
unsolicited_data_length = min(cmd->se_cmd.data_length,
conn->sess->sess_ops->FirstBurstLength);
while (offset < cmd->data_length) {
while (offset < cmd->se_cmd.data_length) {
pdu_count++;
if (!datapduinorder) {
pdu[i].offset = offset;
@ -346,21 +350,21 @@ static int iscsit_build_pdu_and_seq_list(
if (unsolicited_data_length > 0) {
if ((offset +
conn->conn_ops->MaxRecvDataSegmentLength) >=
cmd->data_length) {
cmd->se_cmd.data_length) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_UNSOLICITED;
pdu[i].length =
(cmd->data_length - offset);
(cmd->se_cmd.data_length - offset);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_UNSOLICITED;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
(cmd->data_length - offset));
(cmd->se_cmd.data_length - offset));
}
unsolicited_data_length -=
(cmd->data_length - offset);
offset += (cmd->data_length - offset);
(cmd->se_cmd.data_length - offset);
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((offset +
@ -402,18 +406,18 @@ static int iscsit_build_pdu_and_seq_list(
continue;
}
if ((offset + conn->conn_ops->MaxRecvDataSegmentLength) >=
cmd->data_length) {
cmd->se_cmd.data_length) {
if (!datapduinorder) {
pdu[i].type = PDUTYPE_NORMAL;
pdu[i].length = (cmd->data_length - offset);
pdu[i].length = (cmd->se_cmd.data_length - offset);
}
if (!datasequenceinorder) {
seq[seq_no].type = SEQTYPE_NORMAL;
seq[seq_no].pdu_count = pdu_count;
seq[seq_no].xfer_len = (burstlength +
(cmd->data_length - offset));
(cmd->se_cmd.data_length - offset));
}
offset += (cmd->data_length - offset);
offset += (cmd->se_cmd.data_length - offset);
continue;
}
if ((burstlength + conn->conn_ops->MaxRecvDataSegmentLength) >=
@ -464,9 +468,8 @@ static int iscsit_build_pdu_and_seq_list(
} else
iscsit_ordered_seq_lists(cmd, bl->type);
}
#if 0
iscsit_dump_seq_list(cmd);
#endif
}
if (!datapduinorder) {
if (bl->data_direction & ISCSI_PDU_WRITE) {
@ -484,50 +487,86 @@ static int iscsit_build_pdu_and_seq_list(
} else
iscsit_ordered_pdu_lists(cmd, bl->type);
}
#if 0
iscsit_dump_pdu_list(cmd);
#endif
}
return 0;
}
/*
* Only called while DataSequenceInOrder=No or DataPDUInOrder=No.
*/
int iscsit_do_build_list(
int iscsit_build_pdu_and_seq_lists(
struct iscsi_cmd *cmd,
struct iscsi_build_list *bl)
u32 immediate_data_length)
{
struct iscsi_build_list bl;
u32 pdu_count = 0, seq_count = 1;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = NULL;
struct iscsi_seq *seq = NULL;
iscsit_determine_counts_for_list(cmd, bl, &seq_count, &pdu_count);
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na;
/*
* Do nothing if no OOO shenanigans
*/
if (sess->sess_ops->DataSequenceInOrder &&
sess->sess_ops->DataPDUInOrder)
return 0;
if (cmd->data_direction == DMA_NONE)
return 0;
na = iscsit_tpg_get_node_attrib(sess);
memset(&bl, 0, sizeof(struct iscsi_build_list));
if (cmd->data_direction == DMA_FROM_DEVICE) {
bl.data_direction = ISCSI_PDU_READ;
bl.type = PDULIST_NORMAL;
if (na->random_datain_pdu_offsets)
bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
if (na->random_datain_seq_offsets)
bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
} else {
bl.data_direction = ISCSI_PDU_WRITE;
bl.immediate_data_length = immediate_data_length;
if (na->random_r2t_offsets)
bl.randomize |= RANDOM_R2T_OFFSETS;
if (!cmd->immediate_data && !cmd->unsolicited_data)
bl.type = PDULIST_NORMAL;
else if (cmd->immediate_data && !cmd->unsolicited_data)
bl.type = PDULIST_IMMEDIATE;
else if (!cmd->immediate_data && cmd->unsolicited_data)
bl.type = PDULIST_UNSOLICITED;
else if (cmd->immediate_data && cmd->unsolicited_data)
bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
}
iscsit_determine_counts_for_list(cmd, &bl, &seq_count, &pdu_count);
if (!conn->sess->sess_ops->DataSequenceInOrder) {
seq = kzalloc(seq_count * sizeof(struct iscsi_seq), GFP_ATOMIC);
seq = kcalloc(seq_count, sizeof(struct iscsi_seq), GFP_ATOMIC);
if (!seq) {
pr_err("Unable to allocate struct iscsi_seq list\n");
return -1;
return -ENOMEM;
}
cmd->seq_list = seq;
cmd->seq_count = seq_count;
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
pdu = kzalloc(pdu_count * sizeof(struct iscsi_pdu), GFP_ATOMIC);
pdu = kcalloc(pdu_count, sizeof(struct iscsi_pdu), GFP_ATOMIC);
if (!pdu) {
pr_err("Unable to allocate struct iscsi_pdu list.\n");
kfree(seq);
return -1;
return -ENOMEM;
}
cmd->pdu_list = pdu;
cmd->pdu_count = pdu_count;
}
return iscsit_build_pdu_and_seq_list(cmd, bl);
return iscsit_do_build_pdu_and_seq_lists(cmd, &bl);
}
struct iscsi_pdu *iscsit_get_pdu_holder(
@ -572,13 +611,12 @@ struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
pdu = &cmd->pdu_list[cmd->pdu_start];
for (i = 0; pdu[i].seq_no != cmd->seq_no; i++) {
#if 0
pr_debug("pdu[i].seq_no: %d, pdu[i].pdu"
"_send_order: %d, pdu[i].offset: %d,"
" pdu[i].length: %d\n", pdu[i].seq_no,
pdu[i].pdu_send_order, pdu[i].offset,
pdu[i].length);
#endif
if (pdu[i].pdu_send_order == cmd->pdu_send_order) {
cmd->pdu_send_order++;
return &pdu[i];
@ -601,11 +639,11 @@ struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
pr_err("struct iscsi_seq is NULL!\n");
return NULL;
}
#if 0
pr_debug("seq->pdu_start: %d, seq->pdu_count: %d,"
" seq->seq_no: %d\n", seq->pdu_start, seq->pdu_count,
seq->seq_no);
#endif
pdu = &cmd->pdu_list[seq->pdu_start];
if (seq->pdu_send_order == seq->pdu_count) {
@ -645,12 +683,11 @@ struct iscsi_seq *iscsit_get_seq_holder(
}
for (i = 0; i < cmd->seq_count; i++) {
#if 0
pr_debug("seq_list[i].orig_offset: %d, seq_list[i]."
"xfer_len: %d, seq_list[i].seq_no %u\n",
cmd->seq_list[i].orig_offset, cmd->seq_list[i].xfer_len,
cmd->seq_list[i].seq_no);
#endif
if ((cmd->seq_list[i].orig_offset +
cmd->seq_list[i].xfer_len) >=
(offset + length))

View File

@ -78,7 +78,7 @@ struct iscsi_seq {
u32 xfer_len;
} ____cacheline_aligned;
extern int iscsit_do_build_list(struct iscsi_cmd *, struct iscsi_build_list *);
extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);

View File

@ -78,10 +78,7 @@ int iscsit_tmr_task_warm_reset(
{
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
#if 0
struct iscsi_init_task_mgt_cmnd *hdr =
(struct iscsi_init_task_mgt_cmnd *) buf;
#endif
if (!na->tmr_warm_reset) {
pr_err("TMR Opcode TARGET_WARM_RESET authorization"
" failed for Initiator Node: %s\n",
@ -216,7 +213,7 @@ static int iscsit_task_reassign_complete_nop_out(
iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
cmd->i_state = ISTATE_SEND_NOPIN;
@ -272,9 +269,9 @@ static int iscsit_task_reassign_complete_write(
offset = cmd->next_burst_len = cmd->write_data_done;
if ((conn->sess->sess_ops->FirstBurstLength - offset) >=
cmd->data_length) {
cmd->se_cmd.data_length) {
no_build_r2ts = 1;
length = (cmd->data_length - offset);
length = (cmd->se_cmd.data_length - offset);
} else
length = (conn->sess->sess_ops->FirstBurstLength - offset);
@ -292,7 +289,7 @@ static int iscsit_task_reassign_complete_write(
/*
* iscsit_build_r2ts_for_cmd() can handle the rest from here.
*/
return iscsit_build_r2ts_for_cmd(cmd, conn, 2);
return iscsit_build_r2ts_for_cmd(cmd, conn, true);
}
static int iscsit_task_reassign_complete_read(
@ -385,7 +382,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
iscsit_task_reassign_remove_cmd(cmd, cr, conn->sess);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {

View File

@ -163,7 +163,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
}
cmd->conn = conn;
INIT_LIST_HEAD(&cmd->i_list);
INIT_LIST_HEAD(&cmd->i_conn_node);
INIT_LIST_HEAD(&cmd->datain_list);
INIT_LIST_HEAD(&cmd->cmd_r2t_list);
init_completion(&cmd->reject_comp);
@ -176,174 +176,6 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
return cmd;
}
/*
* Called from iscsi_handle_scsi_cmd()
*/
struct iscsi_cmd *iscsit_allocate_se_cmd(
struct iscsi_conn *conn,
u32 data_length,
int data_direction,
int iscsi_task_attr)
{
struct iscsi_cmd *cmd;
struct se_cmd *se_cmd;
int sam_task_attr;
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
return NULL;
cmd->data_direction = data_direction;
cmd->data_length = data_length;
/*
* Figure out the SAM Task Attribute for the incoming SCSI CDB
*/
if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
(iscsi_task_attr == ISCSI_ATTR_SIMPLE))
sam_task_attr = MSG_SIMPLE_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
sam_task_attr = MSG_ORDERED_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
sam_task_attr = MSG_HEAD_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_ACA)
sam_task_attr = MSG_ACA_TAG;
else {
pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
" MSG_SIMPLE_TAG\n", iscsi_task_attr);
sam_task_attr = MSG_SIMPLE_TAG;
}
se_cmd = &cmd->se_cmd;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
conn->sess->se_sess, data_length, data_direction,
sam_task_attr, &cmd->sense_buffer[0]);
return cmd;
}
struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
struct iscsi_conn *conn,
u8 function)
{
struct iscsi_cmd *cmd;
struct se_cmd *se_cmd;
int rc;
u8 tcm_function;
cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
if (!cmd)
return NULL;
cmd->data_direction = DMA_NONE;
cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
if (!cmd->tmr_req) {
pr_err("Unable to allocate memory for"
" Task Management command!\n");
goto out;
}
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
* LIO-Target $FABRIC_MOD
*/
if (function == ISCSI_TM_FUNC_TASK_REASSIGN)
return cmd;
se_cmd = &cmd->se_cmd;
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
transport_init_se_cmd(se_cmd, &lio_target_fabric_configfs->tf_ops,
conn->sess->se_sess, 0, DMA_NONE,
MSG_SIMPLE_TAG, &cmd->sense_buffer[0]);
switch (function) {
case ISCSI_TM_FUNC_ABORT_TASK:
tcm_function = TMR_ABORT_TASK;
break;
case ISCSI_TM_FUNC_ABORT_TASK_SET:
tcm_function = TMR_ABORT_TASK_SET;
break;
case ISCSI_TM_FUNC_CLEAR_ACA:
tcm_function = TMR_CLEAR_ACA;
break;
case ISCSI_TM_FUNC_CLEAR_TASK_SET:
tcm_function = TMR_CLEAR_TASK_SET;
break;
case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
tcm_function = TMR_LUN_RESET;
break;
case ISCSI_TM_FUNC_TARGET_WARM_RESET:
tcm_function = TMR_TARGET_WARM_RESET;
break;
case ISCSI_TM_FUNC_TARGET_COLD_RESET:
tcm_function = TMR_TARGET_COLD_RESET;
break;
default:
pr_err("Unknown iSCSI TMR Function:"
" 0x%02x\n", function);
goto out;
}
rc = core_tmr_alloc_req(se_cmd, cmd->tmr_req, tcm_function, GFP_KERNEL);
if (rc < 0)
goto out;
cmd->tmr_req->se_tmr_req = se_cmd->se_tmr_req;
return cmd;
out:
iscsit_release_cmd(cmd);
return NULL;
}
int iscsit_decide_list_to_build(
struct iscsi_cmd *cmd,
u32 immediate_data_length)
{
struct iscsi_build_list bl;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_session *sess = conn->sess;
struct iscsi_node_attrib *na;
if (sess->sess_ops->DataSequenceInOrder &&
sess->sess_ops->DataPDUInOrder)
return 0;
if (cmd->data_direction == DMA_NONE)
return 0;
na = iscsit_tpg_get_node_attrib(sess);
memset(&bl, 0, sizeof(struct iscsi_build_list));
if (cmd->data_direction == DMA_FROM_DEVICE) {
bl.data_direction = ISCSI_PDU_READ;
bl.type = PDULIST_NORMAL;
if (na->random_datain_pdu_offsets)
bl.randomize |= RANDOM_DATAIN_PDU_OFFSETS;
if (na->random_datain_seq_offsets)
bl.randomize |= RANDOM_DATAIN_SEQ_OFFSETS;
} else {
bl.data_direction = ISCSI_PDU_WRITE;
bl.immediate_data_length = immediate_data_length;
if (na->random_r2t_offsets)
bl.randomize |= RANDOM_R2T_OFFSETS;
if (!cmd->immediate_data && !cmd->unsolicited_data)
bl.type = PDULIST_NORMAL;
else if (cmd->immediate_data && !cmd->unsolicited_data)
bl.type = PDULIST_IMMEDIATE;
else if (!cmd->immediate_data && cmd->unsolicited_data)
bl.type = PDULIST_UNSOLICITED;
else if (cmd->immediate_data && cmd->unsolicited_data)
bl.type = PDULIST_IMMEDIATE_AND_UNSOLICITED;
}
return iscsit_do_build_list(cmd, &bl);
}
struct iscsi_seq *iscsit_get_seq_holder_for_datain(
struct iscsi_cmd *cmd,
u32 seq_send_order)
@ -502,14 +334,14 @@ int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
return 0;
if (((cmd->first_burst_len + payload_length) != cmd->data_length) &&
if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
((cmd->first_burst_len + payload_length) !=
conn->sess->sess_ops->FirstBurstLength)) {
pr_err("Unsolicited non-immediate data received %u"
" does not equal FirstBurstLength: %u, and does"
" not equal ExpXferLen %u.\n",
(cmd->first_burst_len + payload_length),
conn->sess->sess_ops->FirstBurstLength, cmd->data_length);
conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
transport_send_check_condition_and_sense(se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return -1;
@ -524,7 +356,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
struct iscsi_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
@ -545,7 +377,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
struct iscsi_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
@ -568,7 +400,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_ttt(
struct iscsi_cmd *cmd = NULL;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_list) {
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
if (cmd->targ_xfer_tag == targ_xfer_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
@ -596,7 +428,7 @@ int iscsit_find_cmd_for_recovery(
spin_lock(&sess->cr_i_lock);
list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_unlock(&sess->cr_i_lock);
@ -616,7 +448,7 @@ int iscsit_find_cmd_for_recovery(
spin_lock(&sess->cr_a_lock);
list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_list) {
list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
if (cmd->init_task_tag == init_task_tag) {
spin_unlock(&cr->conn_recovery_cmd_lock);
spin_unlock(&sess->cr_a_lock);
@ -813,7 +645,6 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
void iscsit_release_cmd(struct iscsi_cmd *cmd)
{
struct iscsi_conn *conn = cmd->conn;
int i;
iscsit_free_r2ts_from_list(cmd);
iscsit_free_all_datain_reqs(cmd);
@ -824,11 +655,6 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
kfree(cmd->tmr_req);
kfree(cmd->iov_data);
for (i = 0; i < cmd->t_mem_sg_nents; i++)
__free_page(sg_page(&cmd->t_mem_sg[i]));
kfree(cmd->t_mem_sg);
if (conn) {
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
iscsit_remove_cmd_from_response_queue(cmd, conn);
@ -1038,7 +864,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
spin_unlock_bh(&conn->sess->ttt_lock);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
if (want_response)

View File

@ -9,9 +9,6 @@ extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
extern struct iscsi_cmd *iscsit_allocate_se_cmd(struct iscsi_conn *, u32, int, int);
extern struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(struct iscsi_conn *, u8);
extern int iscsit_decide_list_to_build(struct iscsi_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);

View File

@ -213,7 +213,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
* associated read buffers, go ahead and do that here for type
* SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
* guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
* by target core in transport_generic_allocate_tasks() ->
* by target core in target_setup_cmd_from_cdb() ->
* transport_generic_cmd_sequencer().
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
@ -227,7 +227,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
}
}
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
if (ret == -ENOMEM) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);

View File

@ -59,26 +59,31 @@ struct t10_alua_lu_gp *default_lu_gp;
*
* See spc4r17 section 6.27
*/
int target_emulate_report_target_port_groups(struct se_task *task)
int target_emulate_report_target_port_groups(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *buf;
u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
Target port group descriptor */
u32 rd_len = 0, off;
int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
/*
* Need at least 4 bytes of response data or else we can't
* even fit the return data length.
* Skip over RESERVED area to first Target port group descriptor
* depending on the PARAMETER DATA FORMAT type..
*/
if (cmd->data_length < 4) {
pr_warn("REPORT TARGET PORT GROUPS allocation length %u"
" too small\n", cmd->data_length);
if (ext_hdr != 0)
off = 8;
else
off = 4;
if (cmd->data_length < off) {
pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
" small for %s header\n", cmd->data_length,
(ext_hdr) ? "extended" : "normal");
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
buf = transport_kmap_data_sg(cmd);
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
@ -159,15 +164,34 @@ int target_emulate_report_target_port_groups(struct se_task *task)
/*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/
buf[0] = ((rd_len >> 24) & 0xff);
buf[1] = ((rd_len >> 16) & 0xff);
buf[2] = ((rd_len >> 8) & 0xff);
buf[3] = (rd_len & 0xff);
put_unaligned_be32(rd_len, &buf[0]);
/*
* Fill in the Extended header parameter data format if requested
*/
if (ext_hdr != 0) {
buf[4] = 0x10;
/*
* Set the implict transition time (in seconds) for the application
* client to use as a base for it's transition timeout value.
*
* Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
* this CDB was received upon to determine this value individually
* for ALUA target port group.
*/
port = cmd->se_lun->lun_sep;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (tg_pt_gp_mem) {
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (tg_pt_gp)
buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
}
transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
target_complete_cmd(cmd, GOOD);
return 0;
}
@ -176,9 +200,8 @@ int target_emulate_report_target_port_groups(struct se_task *task)
*
* See spc4r17 section 6.35
*/
int target_emulate_set_target_port_groups(struct se_task *task)
int target_emulate_set_target_port_groups(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_port *port, *l_port = cmd->se_lun->lun_sep;
@ -351,8 +374,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
out:
transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
target_complete_cmd(cmd, GOOD);
return 0;
}
@ -391,7 +413,7 @@ static inline int core_alua_state_standby(
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
case MAINTENANCE_IN:
switch (cdb[1]) {
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
@ -433,7 +455,7 @@ static inline int core_alua_state_unavailable(
case INQUIRY:
case REPORT_LUNS:
case MAINTENANCE_IN:
switch (cdb[1]) {
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
@ -473,7 +495,7 @@ static inline int core_alua_state_transition(
case INQUIRY:
case REPORT_LUNS:
case MAINTENANCE_IN:
switch (cdb[1]) {
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
@ -1359,6 +1381,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
*/
tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
if (def_group) {
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
@ -1855,6 +1878,37 @@ ssize_t core_alua_store_trans_delay_msecs(
return count;
}
ssize_t core_alua_show_implict_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs);
}
ssize_t core_alua_store_implict_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract implict_trans_secs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
pr_err("Passed implict_trans_secs: %lu, exceeds"
" ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp,
ALUA_MAX_IMPLICT_TRANS_SECS);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp;
return count;
}
ssize_t core_alua_show_preferred_bit(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)

View File

@ -51,6 +51,12 @@
*/
#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
/*
* Used for the recommended application client implict transition timeout
* in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
*/
#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0
#define ALUA_MAX_IMPLICT_TRANS_SECS 255
/*
* Used by core_alua_update_tpg_primary_metadata() and
* core_alua_update_tpg_secondary_metadata()
@ -66,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern int target_emulate_report_target_port_groups(struct se_task *);
extern int target_emulate_set_target_port_groups(struct se_task *);
extern int target_emulate_report_target_port_groups(struct se_cmd *);
extern int target_emulate_set_target_port_groups(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
@ -107,6 +113,10 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,

View File

@ -432,6 +432,7 @@ static int
target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u32 max_sectors;
int have_tp = 0;
/*
@ -456,7 +457,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]);
max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors,
dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
put_unaligned_be32(max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
@ -598,9 +601,8 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
int target_emulate_inquiry(struct se_task *task)
int target_emulate_inquiry(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
unsigned char *buf, *map_buf;
@ -664,16 +666,13 @@ int target_emulate_inquiry(struct se_task *task)
}
transport_kunmap_data_sg(cmd);
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
}
int target_emulate_readcapacity(struct se_task *task)
int target_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
@ -697,14 +696,12 @@ int target_emulate_readcapacity(struct se_task *task)
transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
target_complete_cmd(cmd, GOOD);
return 0;
}
int target_emulate_readcapacity_16(struct se_task *task)
int target_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned char *buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
@ -732,8 +729,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
target_complete_cmd(cmd, GOOD);
return 0;
}
@ -872,9 +868,8 @@ target_modesense_dpofua(unsigned char *buf, int type)
}
}
int target_emulate_modesense(struct se_task *task)
int target_emulate_modesense(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
unsigned char *rbuf;
@ -947,14 +942,12 @@ int target_emulate_modesense(struct se_task *task)
memcpy(rbuf, buf, offset);
transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
target_complete_cmd(cmd, GOOD);
return 0;
}
int target_emulate_request_sense(struct se_task *task)
int target_emulate_request_sense(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf;
u8 ua_asc = 0, ua_ascq = 0;
@ -1008,8 +1001,7 @@ int target_emulate_request_sense(struct se_task *task)
end:
transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
target_complete_cmd(cmd, GOOD);
return 0;
}
@ -1017,9 +1009,8 @@ int target_emulate_request_sense(struct se_task *task)
* Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
* Note this is not used for TCM/pSCSI passthrough
*/
int target_emulate_unmap(struct se_task *task)
int target_emulate_unmap(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned char *buf, *ptr = NULL;
unsigned char *cdb = &cmd->t_task_cdb[0];
@ -1066,10 +1057,8 @@ int target_emulate_unmap(struct se_task *task)
err:
transport_kunmap_data_sg(cmd);
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
}
@ -1077,9 +1066,8 @@ int target_emulate_unmap(struct se_task *task)
* Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
* Note this is not used for TCM/pSCSI passthrough
*/
int target_emulate_write_same(struct se_task *task)
int target_emulate_write_same(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
sector_t range;
sector_t lba = cmd->t_task_lba;
@ -1118,79 +1106,25 @@ int target_emulate_write_same(struct se_task *task)
return ret;
}
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
target_complete_cmd(cmd, GOOD);
return 0;
}
int target_emulate_synchronize_cache(struct se_task *task)
int target_emulate_synchronize_cache(struct se_cmd *cmd)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct se_cmd *cmd = task->task_se_cmd;
if (!dev->transport->do_sync_cache) {
if (!cmd->se_dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
" for: %s\n", cmd->se_dev->transport->name);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
return -ENOSYS;
}
dev->transport->do_sync_cache(task);
cmd->se_dev->transport->do_sync_cache(cmd);
return 0;
}
int target_emulate_noop(struct se_task *task)
int target_emulate_noop(struct se_cmd *cmd)
{
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
target_complete_cmd(cmd, GOOD);
return 0;
}
/*
* Write a CDB into @cdb that is based on the one the intiator sent us,
* but updated to only cover the sectors that the current task handles.
*/
void target_get_task_cdb(struct se_task *task, unsigned char *cdb)
{
struct se_cmd *cmd = task->task_se_cmd;
unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb);
memcpy(cdb, cmd->t_task_cdb, cdb_len);
if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
unsigned long long lba = task->task_lba;
u32 sectors = task->task_sectors;
switch (cdb_len) {
case 6:
/* 21-bit LBA and 8-bit sectors */
cdb[1] = (lba >> 16) & 0x1f;
cdb[2] = (lba >> 8) & 0xff;
cdb[3] = lba & 0xff;
cdb[4] = sectors & 0xff;
break;
case 10:
/* 32-bit LBA and 16-bit sectors */
put_unaligned_be32(lba, &cdb[2]);
put_unaligned_be16(sectors, &cdb[7]);
break;
case 12:
/* 32-bit LBA and 32-bit sectors */
put_unaligned_be32(lba, &cdb[2]);
put_unaligned_be32(sectors, &cdb[6]);
break;
case 16:
/* 64-bit LBA and 32-bit sectors */
put_unaligned_be64(lba, &cdb[2]);
put_unaligned_be32(sectors, &cdb[10]);
break;
case 32:
/* 64-bit LBA and 32-bit sectors, extended CDB */
put_unaligned_be64(lba, &cdb[12]);
put_unaligned_be32(sectors, &cdb[28]);
break;
default:
BUG();
}
}
}
EXPORT_SYMBOL(target_get_task_cdb);

View File

@ -683,9 +683,6 @@ SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB_RO(hw_max_sectors);
SE_DEV_ATTR_RO(hw_max_sectors);
DEF_DEV_ATTRIB(max_sectors);
SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(fabric_max_sectors);
SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
@ -727,7 +724,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_hw_block_size.attr,
&target_core_dev_attrib_block_size.attr,
&target_core_dev_attrib_hw_max_sectors.attr,
&target_core_dev_attrib_max_sectors.attr,
&target_core_dev_attrib_fabric_max_sectors.attr,
&target_core_dev_attrib_optimal_sectors.attr,
&target_core_dev_attrib_hw_queue_depth.attr,
@ -2450,6 +2446,26 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
/*
* implict_trans_secs
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return core_alua_show_implict_trans_secs(tg_pt_gp, page);
}
static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
return core_alua_store_implict_trans_secs(tg_pt_gp, page, count);
}
SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR);
/*
* preferred
*/
@ -2574,6 +2590,7 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
&target_core_alua_tg_pt_gp_implict_trans_secs.attr,
&target_core_alua_tg_pt_gp_preferred.attr,
&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
&target_core_alua_tg_pt_gp_members.attr,

View File

@ -643,9 +643,8 @@ void core_dev_unexport(
lun->lun_se_dev = NULL;
}
int target_report_luns(struct se_task *se_task)
int target_report_luns(struct se_cmd *se_cmd)
{
struct se_cmd *se_cmd = se_task->task_se_cmd;
struct se_dev_entry *deve;
struct se_session *se_sess = se_cmd->se_sess;
unsigned char *buf;
@ -696,8 +695,7 @@ int target_report_luns(struct se_task *se_task)
buf[3] = (lun_count & 0xff);
transport_kunmap_data_sg(se_cmd);
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
target_complete_cmd(se_cmd, GOOD);
return 0;
}
@ -878,15 +876,12 @@ void se_dev_set_default_attribs(
dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
/*
* max_sectors is based on subsystem plugin dependent requirements.
* Align max_hw_sectors down to PAGE_SIZE I/O transfers
*/
dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
*/
limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
limits->logical_block_size);
dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
/*
* Set fabric_max_sectors, which is reported in block limits
* VPD page (B0h).
@ -1170,64 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
return 0;
}
int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
{
int force = 0; /* Force setting for VDEVS */
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
pr_err("dev[%p]: Unable to change SE Device"
" max_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
if (!max_sectors) {
pr_err("dev[%p]: Illegal ZERO value for"
" max_sectors\n", dev);
return -EINVAL;
}
if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
pr_err("dev[%p]: Passed max_sectors: %u less than"
" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MIN);
return -EINVAL;
}
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors:"
" %u\n", dev, max_sectors,
dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
return -EINVAL;
}
} else {
if (!force && (max_sectors >
dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors"
": %u, use force=1 to override.\n", dev,
max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
return -EINVAL;
}
if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
pr_err("dev[%p]: Passed max_sectors: %u"
" greater than DA_STATUS_MAX_SECTORS_MAX:"
" %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MAX);
return -EINVAL;
}
}
/*
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
*/
max_sectors = se_dev_align_max_sectors(max_sectors,
dev->se_sub_dev->se_dev_attrib.block_size);
dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
dev, max_sectors);
return 0;
}
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
@ -1341,7 +1278,6 @@ struct se_lun *core_dev_add_lun(
u32 lun)
{
struct se_lun *lun_p;
u32 lun_access = 0;
int rc;
if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
@ -1354,12 +1290,8 @@ struct se_lun *core_dev_add_lun(
if (IS_ERR(lun_p))
return lun_p;
if (dev->dev_flags & DF_READ_ONLY)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
rc = core_tpg_post_addlun(tpg, lun_p,
TRANSPORT_LUNFLAGS_READ_WRITE, dev);
if (rc < 0)
return ERR_PTR(rc);

View File

@ -133,15 +133,10 @@ static struct se_device *fd_create_virtdevice(
ret = PTR_ERR(dev_p);
goto fail;
}
#if 0
if (di->no_create_file)
flags = O_RDWR | O_LARGEFILE;
else
/* O_DIRECT too? */
flags = O_RDWR | O_CREAT | O_LARGEFILE;
#else
flags = O_RDWR | O_CREAT | O_LARGEFILE;
#endif
/* flags |= O_DIRECT; */
/*
* If fd_buffered_io=1 has not been set explicitly (the default),
* use O_SYNC to force FILEIO writes to disk.
@ -249,53 +244,33 @@ static void fd_free_device(void *p)
kfree(fd_dev);
}
static inline struct fd_request *FILE_REQ(struct se_task *task)
static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents)
{
return container_of(task, struct fd_request, fd_task);
}
static struct se_task *
fd_alloc_task(unsigned char *cdb)
{
struct fd_request *fd_req;
fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
if (!fd_req) {
pr_err("Unable to allocate struct fd_request\n");
return NULL;
}
return &fd_req->fd_task;
}
static int fd_do_readv(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct scatterlist *sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba *
loff_t pos = (cmd->t_task_lba *
se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
if (!iov) {
pr_err("Unable to allocate fd_do_readv iov[]\n");
return -ENOMEM;
}
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
for_each_sg(sgl, sg, sgl_nents, i) {
iov[i].iov_len = sg->length;
iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
set_fs(old_fs);
kfree(iov);
@ -305,10 +280,10 @@ static int fd_do_readv(struct se_task *task)
* block_device.
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != task->task_size) {
if (ret < 0 || ret != cmd->data_length) {
pr_err("vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret,
(int)task->task_size);
(int)cmd->data_length);
return (ret < 0 ? ret : -EINVAL);
}
} else {
@ -322,38 +297,38 @@ static int fd_do_readv(struct se_task *task)
return 1;
}
static int fd_do_writev(struct se_task *task)
static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents)
{
struct fd_request *req = FILE_REQ(task);
struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev;
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = se_dev->dev_ptr;
struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct scatterlist *sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba *
loff_t pos = (cmd->t_task_lba *
se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
if (!iov) {
pr_err("Unable to allocate fd_do_writev iov[]\n");
return -ENOMEM;
}
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
for_each_sg(sgl, sg, sgl_nents, i) {
iov[i].iov_len = sg->length;
iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
set_fs(old_fs);
kfree(iov);
if (ret < 0 || ret != task->task_size) {
if (ret < 0 || ret != cmd->data_length) {
pr_err("vfs_writev() returned %d\n", ret);
return (ret < 0 ? ret : -EINVAL);
}
@ -361,9 +336,8 @@ static int fd_do_writev(struct se_task *task)
return 1;
}
static void fd_emulate_sync_cache(struct se_task *task)
static void fd_emulate_sync_cache(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
int immed = (cmd->t_task_cdb[1] & 0x2);
@ -375,7 +349,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
* for this SYNCHRONIZE_CACHE op
*/
if (immed)
transport_complete_sync_cache(cmd, 1);
target_complete_cmd(cmd, SAM_STAT_GOOD);
/*
* Determine if we will be flushing the entire device.
@ -395,33 +369,37 @@ static void fd_emulate_sync_cache(struct se_task *task)
if (ret != 0)
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (!immed)
transport_complete_sync_cache(cmd, ret == 0);
if (immed)
return;
if (ret) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
} else {
target_complete_cmd(cmd, SAM_STAT_GOOD);
}
}
/*
* WRITE Force Unit Access (FUA) emulation on a per struct se_task
* LBA range basis..
*/
static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
static void fd_emulate_write_fua(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
loff_t end = start + task->task_size;
loff_t start = cmd->t_task_lba *
dev->se_sub_dev->se_dev_attrib.block_size;
loff_t end = start + cmd->data_length;
int ret;
pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
task->task_lba, task->task_size);
cmd->t_task_lba, cmd->data_length);
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
}
static int fd_do_task(struct se_task *task)
static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, enum dma_data_direction data_direction)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
int ret = 0;
@ -429,10 +407,10 @@ static int fd_do_task(struct se_task *task)
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
*/
if (task->task_data_direction == DMA_FROM_DEVICE) {
ret = fd_do_readv(task);
if (data_direction == DMA_FROM_DEVICE) {
ret = fd_do_readv(cmd, sgl, sgl_nents);
} else {
ret = fd_do_writev(task);
ret = fd_do_writev(cmd, sgl, sgl_nents);
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
@ -443,7 +421,7 @@ static int fd_do_task(struct se_task *task)
* and return some sense data to let the initiator
* know the FUA WRITE cache sync failed..?
*/
fd_emulate_write_fua(cmd, task);
fd_emulate_write_fua(cmd);
}
}
@ -452,24 +430,11 @@ static int fd_do_task(struct se_task *task)
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
}
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
if (ret)
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)
*
*
*/
static void fd_free_task(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
kfree(req);
}
enum {
Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
};
@ -632,10 +597,8 @@ static struct se_subsystem_api fileio_template = {
.allocate_virtdevice = fd_allocate_virtdevice,
.create_virtdevice = fd_create_virtdevice,
.free_device = fd_free_device,
.alloc_task = fd_alloc_task,
.do_task = fd_do_task,
.execute_cmd = fd_execute_cmd,
.do_sync_cache = fd_emulate_sync_cache,
.free_task = fd_free_task,
.check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,

View File

@ -12,10 +12,6 @@
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
struct fd_request {
struct se_task fd_task;
};
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
#define FDBD_USE_BUFFERED_IO 0x04

View File

@ -189,26 +189,6 @@ static void iblock_free_device(void *p)
kfree(ib_dev);
}
static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
{
return container_of(task, struct iblock_req, ib_task);
}
static struct se_task *
iblock_alloc_task(unsigned char *cdb)
{
struct iblock_req *ib_req;
ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ib_req) {
pr_err("Unable to allocate memory for struct iblock_req\n");
return NULL;
}
atomic_set(&ib_req->pending, 1);
return &ib_req->ib_task;
}
static unsigned long long iblock_emulate_read_cap_with_block_size(
struct se_device *dev,
struct block_device *bd,
@ -295,8 +275,16 @@ static void iblock_end_io_flush(struct bio *bio, int err)
if (err)
pr_err("IBLOCK: cache flush failed: %d\n", err);
if (cmd)
transport_complete_sync_cache(cmd, err == 0);
if (cmd) {
if (err) {
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
} else {
target_complete_cmd(cmd, SAM_STAT_GOOD);
}
}
bio_put(bio);
}
@ -304,9 +292,8 @@ static void iblock_end_io_flush(struct bio *bio, int err)
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache.
*/
static void iblock_emulate_sync_cache(struct se_task *task)
static void iblock_emulate_sync_cache(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (cmd->t_task_cdb[1] & 0x2);
struct bio *bio;
@ -316,7 +303,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
* for this SYNCHRONIZE_CACHE op.
*/
if (immed)
transport_complete_sync_cache(cmd, 1);
target_complete_cmd(cmd, SAM_STAT_GOOD);
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
@ -335,11 +322,6 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
}
static void iblock_free_task(struct se_task *task)
{
kfree(IBLOCK_REQ(task));
}
enum {
Opt_udev_path, Opt_force, Opt_err
};
@ -448,19 +430,35 @@ static ssize_t iblock_show_configfs_dev_params(
return bl;
}
static void iblock_complete_cmd(struct se_cmd *cmd)
{
struct iblock_req *ibr = cmd->priv;
u8 status;
if (!atomic_dec_and_test(&ibr->pending))
return;
if (atomic_read(&ibr->ib_bio_err_cnt))
status = SAM_STAT_CHECK_CONDITION;
else
status = SAM_STAT_GOOD;
target_complete_cmd(cmd, status);
kfree(ibr);
}
static void iblock_bio_destructor(struct bio *bio)
{
struct se_task *task = bio->bi_private;
struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
struct se_cmd *cmd = bio->bi_private;
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
bio_free(bio, ib_dev->ibd_bio_set);
}
static struct bio *
iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
{
struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;
struct iblock_req *ib_req = IBLOCK_REQ(task);
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
struct bio *bio;
/*
@ -476,19 +474,11 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
return NULL;
}
pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
" %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = task;
bio->bi_private = cmd;
bio->bi_destructor = iblock_bio_destructor;
bio->bi_end_io = &iblock_bio_done;
bio->bi_sector = lba;
atomic_inc(&ib_req->pending);
pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending));
return bio;
}
@ -503,20 +493,21 @@ static void iblock_submit_bios(struct bio_list *list, int rw)
blk_finish_plug(&plug);
}
static int iblock_do_task(struct se_task *task)
static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, enum dma_data_direction data_direction)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct iblock_req *ibr = IBLOCK_REQ(task);
struct iblock_req *ibr;
struct bio *bio;
struct bio_list list;
struct scatterlist *sg;
u32 i, sg_num = task->task_sg_nents;
u32 sg_num = sgl_nents;
sector_t block_lba;
unsigned bio_cnt;
int rw;
int i;
if (task->task_data_direction == DMA_TO_DEVICE) {
if (data_direction == DMA_TO_DEVICE) {
/*
* Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit.
@ -532,17 +523,17 @@ static int iblock_do_task(struct se_task *task)
}
/*
* Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
* Convert the blocksize advertised to the initiator to the 512 byte
* units unconditionally used by the Linux block layer.
*/
if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
block_lba = (task->task_lba << 3);
block_lba = (cmd->t_task_lba << 3);
else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
block_lba = (task->task_lba << 2);
block_lba = (cmd->t_task_lba << 2);
else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
block_lba = (task->task_lba << 1);
block_lba = (cmd->t_task_lba << 1);
else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
block_lba = task->task_lba;
block_lba = cmd->t_task_lba;
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
@ -550,17 +541,22 @@ static int iblock_do_task(struct se_task *task)
return -ENOSYS;
}
bio = iblock_get_bio(task, block_lba, sg_num);
if (!bio) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
goto fail;
cmd->priv = ibr;
bio = iblock_get_bio(cmd, block_lba, sgl_nents);
if (!bio)
goto fail_free_ibr;
bio_list_init(&list);
bio_list_add(&list, bio);
atomic_set(&ibr->pending, 2);
bio_cnt = 1;
for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
for_each_sg(sgl, sg, sgl_nents, i) {
/*
* XXX: if the length the device accepts is shorter than the
* length of the S/G list entry this will cause and
@ -573,9 +569,11 @@ static int iblock_do_task(struct se_task *task)
bio_cnt = 0;
}
bio = iblock_get_bio(task, block_lba, sg_num);
bio = iblock_get_bio(cmd, block_lba, sg_num);
if (!bio)
goto fail;
goto fail_put_bios;
atomic_inc(&ibr->pending);
bio_list_add(&list, bio);
bio_cnt++;
}
@ -586,17 +584,16 @@ static int iblock_do_task(struct se_task *task)
}
iblock_submit_bios(&list, rw);
if (atomic_dec_and_test(&ibr->pending)) {
transport_complete_task(task,
!atomic_read(&ibr->ib_bio_err_cnt));
}
iblock_complete_cmd(cmd);
return 0;
fail:
fail_put_bios:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
fail_free_ibr:
kfree(ibr);
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail:
return -ENOMEM;
}
@ -621,8 +618,8 @@ static sector_t iblock_get_blocks(struct se_device *dev)
static void iblock_bio_done(struct bio *bio, int err)
{
struct se_task *task = bio->bi_private;
struct iblock_req *ibr = IBLOCK_REQ(task);
struct se_cmd *cmd = bio->bi_private;
struct iblock_req *ibr = cmd->priv;
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
@ -642,14 +639,7 @@ static void iblock_bio_done(struct bio *bio, int err)
bio_put(bio);
if (!atomic_dec_and_test(&ibr->pending))
return;
pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
task, bio, task->task_lba,
(unsigned long long)bio->bi_sector, err);
transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
iblock_complete_cmd(cmd);
}
static struct se_subsystem_api iblock_template = {
@ -663,11 +653,9 @@ static struct se_subsystem_api iblock_template = {
.allocate_virtdevice = iblock_allocate_virtdevice,
.create_virtdevice = iblock_create_virtdevice,
.free_device = iblock_free_device,
.alloc_task = iblock_alloc_task,
.do_task = iblock_do_task,
.execute_cmd = iblock_execute_cmd,
.do_discard = iblock_do_discard,
.do_sync_cache = iblock_emulate_sync_cache,
.free_task = iblock_free_task,
.check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,

View File

@ -7,7 +7,6 @@
#define IBLOCK_LBA_SHIFT 9
struct iblock_req {
struct se_task ib_task;
atomic_t pending;
atomic_t ib_bio_err_cnt;
} ____cacheline_aligned;

View File

@ -5,15 +5,15 @@
extern struct t10_alua_lu_gp *default_lu_gp;
/* target_core_cdb.c */
int target_emulate_inquiry(struct se_task *task);
int target_emulate_readcapacity(struct se_task *task);
int target_emulate_readcapacity_16(struct se_task *task);
int target_emulate_modesense(struct se_task *task);
int target_emulate_request_sense(struct se_task *task);
int target_emulate_unmap(struct se_task *task);
int target_emulate_write_same(struct se_task *task);
int target_emulate_synchronize_cache(struct se_task *task);
int target_emulate_noop(struct se_task *task);
int target_emulate_inquiry(struct se_cmd *cmd);
int target_emulate_readcapacity(struct se_cmd *cmd);
int target_emulate_readcapacity_16(struct se_cmd *cmd);
int target_emulate_modesense(struct se_cmd *cmd);
int target_emulate_request_sense(struct se_cmd *cmd);
int target_emulate_unmap(struct se_cmd *cmd);
int target_emulate_write_same(struct se_cmd *cmd);
int target_emulate_synchronize_cache(struct se_cmd *cmd);
int target_emulate_noop(struct se_cmd *cmd);
/* target_core_device.c */
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
@ -28,7 +28,7 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
int target_report_luns(struct se_task *);
int target_report_luns(struct se_cmd *);
void se_release_device_for_hba(struct se_device *);
void se_release_vpd_for_dev(struct se_device *);
int se_free_virtual_device(struct se_device *, struct se_hba *);
@ -104,8 +104,7 @@ void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
void transport_cmd_finish_abort(struct se_cmd *, int);
void __transport_remove_task_from_execute_queue(struct se_task *,
struct se_device *);
void __target_remove_from_execute_list(struct se_cmd *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
@ -114,7 +113,7 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_task(struct se_task *task, unsigned long *flags);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);

View File

@ -193,9 +193,8 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
return 0;
}
int target_scsi2_reservation_release(struct se_task *task)
int target_scsi2_reservation_release(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
@ -237,16 +236,13 @@ int target_scsi2_reservation_release(struct se_task *task)
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
}
int target_scsi2_reservation_reserve(struct se_task *task)
int target_scsi2_reservation_reserve(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
@ -307,10 +303,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
}
@ -503,11 +497,10 @@ static int core_scsi3_pr_seq_non_holder(
* statement.
*/
if (!ret && !other_cdb) {
#if 0
pr_debug("Allowing explict CDB: 0x%02x for %s"
" reservation holder\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
#endif
return ret;
}
/*
@ -535,14 +528,14 @@ static int core_scsi3_pr_seq_non_holder(
* as we expect registered non-reservation holding
* nexuses to issue CDBs.
*/
#if 0
if (!registered_nexus) {
pr_debug("Allowing implict CDB: 0x%02x"
" for %s reservation on unregistered"
" nexus\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
}
#endif
return 0;
}
} else if ((reg_only) || (all_reg)) {
@ -551,11 +544,11 @@ static int core_scsi3_pr_seq_non_holder(
* For PR_*_REG_ONLY and PR_*_ALL_REG reservations,
* allow commands from registered nexuses.
*/
#if 0
pr_debug("Allowing implict CDB: 0x%02x for %s"
" reservation\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
#endif
return 0;
}
}
@ -1669,12 +1662,12 @@ static int core_scsi3_decode_spec_i_port(
ret = -EINVAL;
goto out;
}
#if 0
pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
" tid_len: %d for %s + %s\n",
dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
tpdl, tid_len, i_str, iport_ptr);
#endif
if (tid_len > tpdl) {
pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
" %u for Transport ID: %s\n", tid_len, ptr);
@ -1717,12 +1710,12 @@ static int core_scsi3_decode_spec_i_port(
ret = -EINVAL;
goto out;
}
#if 0
pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
" dest_se_deve mapped_lun: %u\n",
dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
#endif
/*
* Skip any TransportIDs that already have a registration for
* this target port.
@ -3476,10 +3469,10 @@ static int core_scsi3_emulate_pro_register_and_move(
buf = transport_kmap_data_sg(cmd);
proto_ident = (buf[24] & 0x0f);
#if 0
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
" 0x%02x\n", proto_ident);
#endif
if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
" proto_ident: 0x%02x does not match ident: 0x%02x"
@ -3578,11 +3571,11 @@ static int core_scsi3_emulate_pro_register_and_move(
ret = -EINVAL;
goto out;
}
#if 0
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
dest_node_acl->initiatorname);
#endif
/*
* Locate the struct se_dev_entry pointer for the matching RELATIVE TARGET
* PORT IDENTIFIER.
@ -3606,12 +3599,12 @@ static int core_scsi3_emulate_pro_register_and_move(
ret = -EINVAL;
goto out;
}
#if 0
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
" ACL for dest_se_deve->mapped_lun: %u\n",
dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
dest_se_deve->mapped_lun);
#endif
/*
* A persistent reservation needs to already existing in order to
* successfully complete the REGISTER_AND_MOVE service action..
@ -3802,9 +3795,8 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
/*
* See spc4r17 section 6.14 Table 170
*/
int target_scsi3_emulate_pr_out(struct se_task *task)
int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
unsigned char *cdb = &cmd->t_task_cdb[0];
unsigned char *buf;
u64 res_key, sa_res_key;
@ -3944,10 +3936,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
}
out:
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
}
@ -4302,9 +4292,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
return 0;
}
int target_scsi3_emulate_pr_in(struct se_task *task)
int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
{
struct se_cmd *cmd = task->task_se_cmd;
int ret;
/*
@ -4345,10 +4334,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
break;
}
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
if (!ret)
target_complete_cmd(cmd, GOOD);
return ret;
}

View File

@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern int target_scsi2_reservation_release(struct se_task *task);
extern int target_scsi2_reservation_reserve(struct se_task *task);
extern int target_scsi2_reservation_release(struct se_cmd *);
extern int target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
extern int target_scsi3_emulate_pr_in(struct se_task *task);
extern int target_scsi3_emulate_pr_out(struct se_task *task);
extern int target_scsi3_emulate_pr_in(struct se_cmd *);
extern int target_scsi3_emulate_pr_out(struct se_cmd *);
extern int core_setup_reservations(struct se_device *, int);
#endif /* TARGET_CORE_PR_H */

View File

@ -663,22 +663,12 @@ static void pscsi_free_device(void *p)
kfree(pdv);
}
static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
{
return container_of(task, struct pscsi_plugin_task, pscsi_task);
}
/* pscsi_transport_complete():
*
*
*/
static int pscsi_transport_complete(struct se_task *task)
{
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
struct scsi_device *sd = pdv->pdv_sd;
int result;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct pscsi_plugin_task *pt = cmd->priv;
unsigned char *cdb = &pt->pscsi_cdb[0];
result = pt->pscsi_result;
@ -688,12 +678,11 @@ static int pscsi_transport_complete(struct se_task *task)
*/
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
if (!task->task_se_cmd->se_deve)
if (!cmd->se_deve)
goto after_mode_sense;
if (task->task_se_cmd->se_deve->lun_flags &
TRANSPORT_LUNFLAGS_READ_ONLY) {
unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
unsigned char *buf = transport_kmap_data_sg(cmd);
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@ -703,7 +692,7 @@ static int pscsi_transport_complete(struct se_task *task)
buf[2] |= 0x80;
}
transport_kunmap_data_sg(task->task_se_cmd);
transport_kunmap_data_sg(cmd);
}
}
after_mode_sense:
@ -722,7 +711,6 @@ static int pscsi_transport_complete(struct se_task *task)
if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
unsigned char *buf;
struct scatterlist *sg = task->task_sg;
u16 bdl;
u32 blocksize;
@ -757,35 +745,6 @@ static int pscsi_transport_complete(struct se_task *task)
return 0;
}
static struct se_task *
pscsi_alloc_task(unsigned char *cdb)
{
struct pscsi_plugin_task *pt;
/*
* Dynamically alloc cdb space, since it may be larger than
* TCM_MAX_COMMAND_SIZE
*/
pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
if (!pt) {
pr_err("Unable to allocate struct pscsi_plugin_task\n");
return NULL;
}
return &pt->pscsi_task;
}
static void pscsi_free_task(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
/*
* We do not release the bio(s) here associated with this task, as
* this is handled by bio_put() and pscsi_bi_endio().
*/
kfree(pt);
}
enum {
Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
Opt_scsi_lun_id, Opt_err
@ -958,26 +917,25 @@ static inline struct bio *pscsi_get_bio(int sg_num)
return bio;
}
static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, enum dma_data_direction data_direction,
struct bio **hbio)
{
struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
u32 task_sg_num = task->task_sg_nents;
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
struct bio *bio = NULL, *tbio = NULL;
struct page *page;
struct scatterlist *sg;
u32 data_len = task->task_size, i, len, bytes, off;
int nr_pages = (task->task_size + task_sg[0].offset +
u32 data_len = cmd->data_length, i, len, bytes, off;
int nr_pages = (cmd->data_length + sgl[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
int nr_vecs = 0, rc;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
int rw = (data_direction == DMA_TO_DEVICE);
*hbio = NULL;
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
for_each_sg(task_sg, sg, task_sg_num, i) {
for_each_sg(sgl, sg, sgl_nents, i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
@ -1009,7 +967,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
* Set *hbio pointer to handle the case:
* nr_pages > BIO_MAX_PAGES, where additional
* bios need to be added to complete a given
* struct se_task
* command.
*/
if (!*hbio)
*hbio = tbio = bio;
@ -1049,7 +1007,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
}
}
return task->task_sg_nents;
return sgl_nents;
fail:
while (*hbio) {
bio = *hbio;
@ -1061,52 +1019,61 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
return -ENOMEM;
}
static int pscsi_do_task(struct se_task *task)
static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, enum dma_data_direction data_direction)
{
struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt;
struct request *req;
struct bio *hbio;
int ret;
target_get_task_cdb(task, pt->pscsi_cdb);
/*
* Dynamically alloc cdb space, since it may be larger than
* TCM_MAX_COMMAND_SIZE
*/
pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
if (!pt) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
cmd->priv = pt;
if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
scsi_command_size(cmd->t_task_cdb));
if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
req = blk_get_request(pdv->pdv_sd->request_queue,
(task->task_data_direction == DMA_TO_DEVICE),
(data_direction == DMA_TO_DEVICE),
GFP_KERNEL);
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENODEV;
goto fail;
}
} else {
BUG_ON(!task->task_size);
BUG_ON(!cmd->data_length);
/*
* Setup the main struct request for the task->task_sg[] payload
*/
ret = pscsi_map_sg(task, task->task_sg, &hbio);
ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
if (ret < 0) {
cmd->scsi_sense_reason =
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
goto fail;
}
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
if (IS_ERR(req)) {
pr_err("pSCSI: blk_make_request() failed\n");
goto fail;
goto fail_free_bio;
}
}
req->cmd_type = REQ_TYPE_BLOCK_PC;
req->end_io = pscsi_req_done;
req->end_io_data = task;
req->end_io_data = cmd;
req->cmd_len = scsi_command_size(pt->pscsi_cdb);
req->cmd = &pt->pscsi_cdb[0];
req->sense = &pt->pscsi_sense[0];
@ -1118,12 +1085,12 @@ static int pscsi_do_task(struct se_task *task)
req->retries = PS_RETRY;
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
(cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
return 0;
fail:
fail_free_bio:
while (hbio) {
struct bio *bio = hbio;
hbio = hbio->bi_next;
@ -1131,16 +1098,14 @@ static int pscsi_do_task(struct se_task *task)
bio_endio(bio, 0); /* XXX: should be error */
}
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
fail:
kfree(pt);
return -ENOMEM;
}
/* pscsi_get_sense_buffer():
*
*
*/
static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
static unsigned char *pscsi_get_sense_buffer(struct se_cmd *cmd)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct pscsi_plugin_task *pt = cmd->priv;
return pt->pscsi_sense;
}
@ -1180,48 +1145,36 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
return 0;
}
/* pscsi_handle_SAM_STATUS_failures():
*
*
*/
static inline void pscsi_process_SAM_status(
struct se_task *task,
struct pscsi_plugin_task *pt)
static void pscsi_req_done(struct request *req, int uptodate)
{
task->task_scsi_status = status_byte(pt->pscsi_result);
if (task->task_scsi_status) {
task->task_scsi_status <<= 1;
pr_debug("PSCSI Status Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
struct se_cmd *cmd = req->end_io_data;
struct pscsi_plugin_task *pt = cmd->priv;
pt->pscsi_result = req->errors;
pt->pscsi_resid = req->resid_len;
cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
if (cmd->scsi_status) {
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
pt->pscsi_result);
}
switch (host_byte(pt->pscsi_result)) {
case DID_OK:
transport_complete_task(task, (!task->task_scsi_status));
target_complete_cmd(cmd, cmd->scsi_status);
break;
default:
pr_debug("PSCSI Host Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
" 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_se_cmd->scsi_sense_reason =
TCM_UNSUPPORTED_SCSI_OPCODE;
transport_complete_task(task, 0);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break;
}
}
static void pscsi_req_done(struct request *req, int uptodate)
{
struct se_task *task = req->end_io_data;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
pt->pscsi_result = req->errors;
pt->pscsi_resid = req->resid_len;
pscsi_process_SAM_status(task, pt);
__blk_put_request(req->q, req);
kfree(pt);
}
static struct se_subsystem_api pscsi_template = {
@ -1235,9 +1188,7 @@ static struct se_subsystem_api pscsi_template = {
.create_virtdevice = pscsi_create_virtdevice,
.free_device = pscsi_free_device,
.transport_complete = pscsi_transport_complete,
.alloc_task = pscsi_alloc_task,
.do_task = pscsi_do_task,
.free_task = pscsi_free_task,
.execute_cmd = pscsi_execute_cmd,
.check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,

View File

@ -22,7 +22,6 @@
#include <linux/kobject.h>
struct pscsi_plugin_task {
struct se_task pscsi_task;
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
int pscsi_direction;
int pscsi_result;

View File

@ -64,9 +64,6 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
" MaxSectors: %u\n", hba->hba_id,
rd_host->rd_host_id, RD_MAX_SECTORS);
return 0;
}
@ -199,10 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
return 0;
}
static void *rd_allocate_virtdevice(
struct se_hba *hba,
const char *name,
int rd_direct)
static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
{
struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr;
@ -214,25 +208,12 @@ static void *rd_allocate_virtdevice(
}
rd_dev->rd_host = rd_host;
rd_dev->rd_direct = rd_direct;
return rd_dev;
}
static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
{
return rd_allocate_virtdevice(hba, name, 0);
}
/* rd_create_virtdevice():
*
*
*/
static struct se_device *rd_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p,
int rd_direct)
static struct se_device *rd_create_virtdevice(struct se_hba *hba,
struct se_subsystem_dev *se_dev, void *p)
{
struct se_device *dev;
struct se_dev_limits dev_limits;
@ -247,13 +228,12 @@ static struct se_device *rd_create_virtdevice(
if (ret < 0)
goto fail;
snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
RD_MCP_VERSION);
snprintf(prod, 16, "RAMDISK-MCP");
snprintf(rev, 4, "%s", RD_MCP_VERSION);
dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
dev_limits.limits.max_sectors = RD_MAX_SECTORS;
dev_limits.limits.max_hw_sectors = UINT_MAX;
dev_limits.limits.max_sectors = UINT_MAX;
dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
@ -264,12 +244,10 @@ static struct se_device *rd_create_virtdevice(
goto fail;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
rd_dev->rd_queue_depth = dev->queue_depth;
pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
" %u pages in %u tables, %lu total bytes\n",
rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
@ -280,18 +258,6 @@ static struct se_device *rd_create_virtdevice(
return ERR_PTR(ret);
}
static struct se_device *rd_MEMCPY_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{
return rd_create_virtdevice(hba, se_dev, p, 0);
}
/* rd_free_device(): (Part of se_subsystem_api_t template)
*
*
*/
static void rd_free_device(void *p)
{
struct rd_dev *rd_dev = p;
@ -300,29 +266,6 @@ static void rd_free_device(void *p)
kfree(rd_dev);
}
static inline struct rd_request *RD_REQ(struct se_task *task)
{
return container_of(task, struct rd_request, rd_task);
}
static struct se_task *
rd_alloc_task(unsigned char *cdb)
{
struct rd_request *rd_req;
rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
if (!rd_req) {
pr_err("Unable to allocate struct rd_request\n");
return NULL;
}
return &rd_req->rd_task;
}
/* rd_get_sg_table():
*
*
*/
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
{
u32 i;
@ -341,31 +284,41 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, enum dma_data_direction data_direction)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *rd_sg;
struct sg_mapping_iter m;
u32 rd_offset = req->rd_offset;
u32 rd_offset;
u32 rd_size;
u32 rd_page;
u32 src_len;
u64 tmp;
table = rd_get_sg_table(dev, req->rd_page);
tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size;
rd_offset = do_div(tmp, PAGE_SIZE);
rd_page = tmp;
rd_size = cmd->data_length;
table = rd_get_sg_table(dev, rd_page);
if (!table)
return -EINVAL;
rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
rd_sg = &table->sg_table[rd_page - table->page_start_offset];
pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
dev->rd_dev_id, read_rd ? "Read" : "Write",
task->task_lba, req->rd_size, req->rd_page,
rd_offset);
dev->rd_dev_id,
data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
cmd->t_task_lba, rd_size, rd_page, rd_offset);
src_len = PAGE_SIZE - rd_offset;
sg_miter_start(&m, task->task_sg, task->task_sg_nents,
read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (req->rd_size) {
sg_miter_start(&m, sgl, sgl_nents,
data_direction == DMA_FROM_DEVICE ?
SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (rd_size) {
u32 len;
void *rd_addr;
@ -375,13 +328,13 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
rd_addr = sg_virt(rd_sg) + rd_offset;
if (read_rd)
if (data_direction == DMA_FROM_DEVICE)
memcpy(m.addr, rd_addr, len);
else
memcpy(rd_addr, m.addr, len);
req->rd_size -= len;
if (!req->rd_size)
rd_size -= len;
if (!rd_size)
continue;
src_len -= len;
@ -391,15 +344,15 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
}
/* rd page completed, next one please */
req->rd_page++;
rd_page++;
rd_offset = 0;
src_len = PAGE_SIZE;
if (req->rd_page <= table->page_end_offset) {
if (rd_page <= table->page_end_offset) {
rd_sg++;
continue;
}
table = rd_get_sg_table(dev, req->rd_page);
table = rd_get_sg_table(dev, rd_page);
if (!table) {
sg_miter_stop(&m);
return -EINVAL;
@ -409,43 +362,11 @@ static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
rd_sg = table->sg_table;
}
sg_miter_stop(&m);
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
*
*
*/
static int rd_MEMCPY_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
u64 tmp;
int ret;
tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
req->rd_offset = do_div(tmp, PAGE_SIZE);
req->rd_page = tmp;
req->rd_size = task->task_size;
ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
if (ret != 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)
*
*
*/
static void rd_free_task(struct se_task *task)
{
kfree(RD_REQ(task));
}
enum {
Opt_rd_pages, Opt_err
};
@ -512,9 +433,8 @@ static ssize_t rd_show_configfs_dev_params(
char *b)
{
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
"rd_direct" : "rd_mcp");
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
" SG_table_count: %u\n", rd_dev->rd_page_count,
PAGE_SIZE, rd_dev->sg_table_count);
@ -545,12 +465,10 @@ static struct se_subsystem_api rd_mcp_template = {
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
.allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
.create_virtdevice = rd_MEMCPY_create_virtdevice,
.allocate_virtdevice = rd_allocate_virtdevice,
.create_virtdevice = rd_create_virtdevice,
.free_device = rd_free_device,
.alloc_task = rd_alloc_task,
.do_task = rd_MEMCPY_do_task,
.free_task = rd_free_task,
.execute_cmd = rd_execute_cmd,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,

View File

@ -2,7 +2,6 @@
#define TARGET_CORE_RD_H
#define RD_HBA_VERSION "v4.0"
#define RD_DR_VERSION "4.0"
#define RD_MCP_VERSION "4.0"
/* Largest piece of memory kmalloc can allocate */
@ -10,28 +9,11 @@
#define RD_DEVICE_QUEUE_DEPTH 32
#define RD_MAX_DEVICE_QUEUE_DEPTH 128
#define RD_BLOCKSIZE 512
#define RD_MAX_SECTORS 1024
/* Used in target_core_init_configfs() for virtual LUN 0 access */
int __init rd_module_init(void);
void rd_module_exit(void);
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
struct rd_request {
struct se_task rd_task;
/* Offset from start of page */
u32 rd_offset;
/* Starting page in Ramdisk for request */
u32 rd_page;
/* Total number of pages needed for request */
u32 rd_page_count;
/* Scatterlist count */
u32 rd_size;
} ____cacheline_aligned;
struct rd_dev_sg_table {
u32 page_start_offset;
u32 page_end_offset;
@ -42,7 +24,6 @@ struct rd_dev_sg_table {
#define RDF_HAS_PAGE_COUNT 0x01
struct rd_dev {
int rd_direct;
u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id;
@ -50,7 +31,6 @@ struct rd_dev {
u32 rd_page_count;
/* Number of SG tables in sg_table_array */
u32 sg_table_count;
u32 rd_queue_depth;
/* Array of rd_dev_sg_table_t containing scatterlists */
struct rd_dev_sg_table *sg_table_array;
/* Ramdisk HBA device is connected to */

View File

@ -244,7 +244,7 @@ static void core_tmr_drain_tmr_list(
}
}
static void core_tmr_drain_task_list(
static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
struct se_node_acl *tmr_nacl,
@ -252,12 +252,13 @@ static void core_tmr_drain_task_list(
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_task_list);
struct se_cmd *cmd;
struct se_task *task, *task_tmp;
struct se_cmd *cmd, *next;
unsigned long flags;
int fe_count;
/*
* Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
* Complete outstanding commands with TASK_ABORTED SAM status.
*
* This is following sam4r17, section 5.6 Aborting commands, Table 38
* for TMR LUN_RESET:
*
@ -278,56 +279,43 @@ static void core_tmr_drain_task_list(
* in the Control Mode Page.
*/
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
t_state_list) {
if (!task->task_se_cmd) {
pr_err("task->task_se_cmd is NULL!\n");
continue;
}
cmd = task->task_se_cmd;
list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
list_move_tail(&task->t_state_list, &drain_task_list);
task->t_state_active = false;
/*
* Remove from task execute list before processing drain_task_list
*/
if (!list_empty(&task->t_execute_list))
__transport_remove_task_from_execute_queue(task, dev);
list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false;
if (!list_empty(&cmd->execute_list))
__target_remove_from_execute_list(cmd);
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
while (!list_empty(&drain_task_list)) {
task = list_entry(drain_task_list.next, struct se_task, t_state_list);
list_del(&task->t_state_list);
cmd = task->task_se_cmd;
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
list_del(&cmd->state_list);
pr_debug("LUN_RESET: %s cmd: %p task: %p"
pr_debug("LUN_RESET: %s cmd: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
"cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
(preempt_and_abort_list) ? "Preempt" : "", cmd,
cmd->se_tfo->get_task_tag(cmd), 0,
cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
cmd->t_task_cdb[0]);
pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- CMD_T_ACTIVE: %d"
" -- CMD_T_ACTIVE: %d"
" CMD_T_STOP: %d CMD_T_SENT: %d\n",
cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
cmd->t_task_list_num,
atomic_read(&cmd->t_task_cdbs_left),
atomic_read(&cmd->t_task_cdbs_sent),
(cmd->transport_state & CMD_T_ACTIVE) != 0,
(cmd->transport_state & CMD_T_STOP) != 0,
(cmd->transport_state & CMD_T_SENT) != 0);
@ -343,20 +331,13 @@ static void core_tmr_drain_task_list(
cancel_work_sync(&cmd->work);
spin_lock_irqsave(&cmd->t_state_lock, flags);
target_stop_task(task, &flags);
target_stop_cmd(cmd, &flags);
if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
atomic_read(&cmd->t_task_cdbs_ex_left));
continue;
}
fe_count = atomic_read(&cmd->t_fe_count);
if (!(cmd->transport_state & CMD_T_ACTIVE)) {
pr_debug("LUN_RESET: got CMD_T_ACTIVE for"
" task: %p, t_fe_count: %d dev: %p\n", task,
" cdb: %p, t_fe_count: %d dev: %p\n", cmd,
fe_count, dev);
cmd->transport_state |= CMD_T_ABORTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@ -364,8 +345,8 @@ static void core_tmr_drain_task_list(
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
continue;
}
pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
pr_debug("LUN_RESET: Got !CMD_T_ACTIVE for cdb: %p,"
" t_fe_count: %d dev: %p\n", cmd, fe_count, dev);
cmd->transport_state |= CMD_T_ABORTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@ -384,13 +365,11 @@ static void core_tmr_drain_cmd_list(
struct se_queue_obj *qobj = &dev->dev_queue_obj;
struct se_cmd *cmd, *tcmd;
unsigned long flags;
/*
* Release all commands remaining in the struct se_device cmd queue.
* Release all commands remaining in the per-device command queue.
*
* This follows the same logic as above for the struct se_device
* struct se_task state list, where commands are returned with
* TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
* reference, otherwise the struct se_cmd is released.
* This follows the same logic as above for the state list.
*/
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
@ -466,7 +445,7 @@ int core_tmr_lun_reset(
dev->transport->name, tas);
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas,
core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
preempt_and_abort_list);
core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
preempt_and_abort_list);

View File

@ -153,9 +153,6 @@ void core_tpg_add_node_to_devs(
* demo_mode_write_protect is ON, or READ_ONLY;
*/
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
if (dev->dev_flags & DF_READ_ONLY)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
/*

File diff suppressed because it is too large Load Diff

View File

@ -215,20 +215,10 @@ int ft_write_pending(struct se_cmd *se_cmd)
*/
if ((ep->xid <= lport->lro_xid) &&
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
/*
* cmd may have been broken up into multiple
* tasks. Link their sgs together so we can
* operate on them all at once.
*/
transport_do_task_sg_chain(se_cmd);
cmd->sg = se_cmd->t_tasks_sg_chained;
cmd->sg_cnt =
se_cmd->t_tasks_sg_chained_no;
}
if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
cmd->sg,
cmd->sg_cnt))
if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) &&
lport->tt.ddp_target(lport, ep->xid,
se_cmd->t_data_sg,
se_cmd->t_data_nents))
cmd->was_ddp_setup = 1;
}
}

View File

@ -576,9 +576,6 @@ int ft_register_configfs(void)
}
fabric->tf_ops = ft_fabric_ops;
/* Allowing support for task_sg_chaining */
fabric->tf_ops.task_sg_chaining = 1;
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/

View File

@ -228,7 +228,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
"payload, Frame will be dropped if"
"'Sequence Initiative' bit in f_ctl is"
"not set\n", __func__, ep->xid, f_ctl,
cmd->sg, cmd->sg_cnt);
se_cmd->t_data_sg, se_cmd->t_data_nents);
/*
* Invalidate HW DDP context if it was setup for respective
* command. Invalidation of HW DDP context is requited in both

View File

@ -23,12 +23,11 @@ struct se_subsystem_api {
struct se_device *(*create_virtdevice)(struct se_hba *,
struct se_subsystem_dev *, void *);
void (*free_device)(void *);
int (*transport_complete)(struct se_task *task);
struct se_task *(*alloc_task)(unsigned char *cdb);
int (*do_task)(struct se_task *);
int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *);
int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32,
enum dma_data_direction);
int (*do_discard)(struct se_device *, sector_t, u32);
void (*do_sync_cache)(struct se_task *);
void (*free_task)(struct se_task *);
void (*do_sync_cache)(struct se_cmd *);
ssize_t (*check_configfs_dev_params)(struct se_hba *,
struct se_subsystem_dev *);
ssize_t (*set_configfs_dev_params)(struct se_hba *,
@ -38,7 +37,7 @@ struct se_subsystem_api {
u32 (*get_device_rev)(struct se_device *);
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
unsigned char *(*get_sense_buffer)(struct se_task *);
unsigned char *(*get_sense_buffer)(struct se_cmd *);
};
int transport_subsystem_register(struct se_subsystem_api *);
@ -48,10 +47,7 @@ struct se_device *transport_add_device_to_core_hba(struct se_hba *,
struct se_subsystem_api *, struct se_subsystem_dev *, u32,
void *, struct se_dev_limits *, const char *, const char *);
void transport_complete_sync_cache(struct se_cmd *, int);
void transport_complete_task(struct se_task *, int);
void target_get_task_cdb(struct se_task *, unsigned char *);
void target_complete_cmd(struct se_cmd *, u8);
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);

View File

@ -73,9 +73,8 @@
/*
* struct se_device->dev_flags
*/
#define DF_READ_ONLY 0x00000001
#define DF_SPC2_RESERVATIONS 0x00000002
#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
#define DF_SPC2_RESERVATIONS 0x00000001
#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000002
/* struct se_dev_attrib sanity values */
/* Default max_unmap_lba_count */
@ -141,14 +140,6 @@ enum transport_tpg_type_table {
TRANSPORT_TPG_TYPE_DISCOVERY = 1,
};
/* struct se_task->task_flags */
enum se_task_flags {
TF_ACTIVE = (1 << 0),
TF_SENT = (1 << 1),
TF_REQUEST_STOP = (1 << 2),
TF_HAS_SENSE = (1 << 3),
};
/* Special transport agnostic struct se_cmd->t_states */
enum transport_state_table {
TRANSPORT_NO_STATE = 0,
@ -234,6 +225,7 @@ enum tcm_sense_reason_table {
enum target_sc_flags_table {
TARGET_SCF_BIDI_OP = 0x01,
TARGET_SCF_ACK_KREF = 0x02,
TARGET_SCF_UNKNOWN_SIZE = 0x04,
};
/* fabric independent task management function values */
@ -338,6 +330,7 @@ struct t10_alua_tg_pt_gp {
int tg_pt_gp_alua_access_type;
int tg_pt_gp_nonop_delay_msecs;
int tg_pt_gp_trans_delay_msecs;
int tg_pt_gp_implict_trans_secs;
int tg_pt_gp_pref;
int tg_pt_gp_write_metadata;
/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
@ -485,23 +478,6 @@ struct se_queue_obj {
wait_queue_head_t thread_wq;
};
struct se_task {
unsigned long long task_lba;
u32 task_sectors;
u32 task_size;
struct se_cmd *task_se_cmd;
struct scatterlist *task_sg;
u32 task_sg_nents;
u16 task_flags;
u8 task_scsi_status;
enum dma_data_direction task_data_direction;
struct list_head t_list;
struct list_head t_execute_list;
struct list_head t_state_list;
bool t_state_active;
struct completion task_stop_comp;
};
struct se_tmr_req {
/* Task Management function to be performed */
u8 function;
@ -538,6 +514,7 @@ struct se_cmd {
/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
unsigned check_release:1;
unsigned cmd_wait_set:1;
unsigned unknown_data_length:1;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
u32 se_ordered_id;
@ -565,18 +542,13 @@ struct se_cmd {
struct completion cmd_wait_comp;
struct kref cmd_kref;
struct target_core_fabric_ops *se_tfo;
int (*execute_task)(struct se_task *);
int (*execute_cmd)(struct se_cmd *);
void (*transport_complete_callback)(struct se_cmd *);
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
u32 t_tasks_sg_chained_no;
atomic_t t_fe_count;
atomic_t t_se_count;
atomic_t t_task_cdbs_left;
atomic_t t_task_cdbs_ex_left;
atomic_t t_task_cdbs_sent;
unsigned int transport_state;
#define CMD_T_ABORTED (1 << 0)
#define CMD_T_ACTIVE (1 << 1)
@ -588,11 +560,12 @@ struct se_cmd {
#define CMD_T_LUN_STOP (1 << 7)
#define CMD_T_LUN_FE_STOP (1 << 8)
#define CMD_T_DEV_ACTIVE (1 << 9)
#define CMD_T_REQUEST_STOP (1 << 10)
#define CMD_T_BUSY (1 << 11)
spinlock_t t_state_lock;
struct completion t_transport_stop_comp;
struct completion transport_lun_fe_stop_comp;
struct completion transport_lun_stop_comp;
struct scatterlist *t_tasks_sg_chained;
struct work_struct work;
@ -602,10 +575,15 @@ struct se_cmd {
struct scatterlist *t_bidi_data_sg;
unsigned int t_bidi_data_nents;
/* Used for BIDI READ */
struct list_head t_task_list;
u32 t_task_list_num;
struct list_head execute_list;
struct list_head state_list;
bool state_active;
/* old task stop completion, consider merging with some of the above */
struct completion task_stop_comp;
/* backend private data */
void *priv;
};
struct se_ua {
@ -731,7 +709,6 @@ struct se_dev_attrib {
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
u32 max_sectors;
u32 fabric_max_sectors;
u32 optimal_sectors;
u32 hw_queue_depth;
@ -829,8 +806,8 @@ struct se_device {
struct task_struct *process_thread;
struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
struct list_head execute_task_list;
struct list_head state_task_list;
struct list_head execute_list;
struct list_head state_list;
struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;

View File

@ -3,12 +3,6 @@
struct target_core_fabric_ops {
struct configfs_subsystem *tf_subsys;
/*
* Optional to signal struct se_task->task_sg[] padding entries
* for scatterlist chaining using transport_do_task_sg_link(),
* disabled by default
*/
bool task_sg_chaining;
char *(*get_fabric_name)(void);
u8 (*get_fabric_proto_ident)(struct se_portal_group *);
char *(*tpg_get_wwn)(struct se_portal_group *);
@ -102,7 +96,7 @@ void __transport_register_session(struct se_portal_group *,
void transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void target_get_session(struct se_session *);
int target_put_session(struct se_session *);
void target_put_session(struct se_session *);
void transport_free_session(struct se_session *);
void target_put_nacl(struct se_node_acl *);
void transport_deregister_session_configfs(struct se_session *);
@ -112,7 +106,7 @@ void transport_deregister_session(struct se_session *);
void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
struct se_session *, u32, int, int, unsigned char *);
int transport_lookup_cmd_lun(struct se_cmd *, u32);
int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
unsigned char *, u32, u32, int, int, int);
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
@ -124,7 +118,6 @@ int transport_generic_handle_cdb_map(struct se_cmd *);
int transport_generic_handle_data(struct se_cmd *);
int transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
struct scatterlist *, u32, struct scatterlist *, u32);
void transport_do_task_sg_chain(struct se_cmd *);
int transport_generic_new_cmd(struct se_cmd *);
void transport_generic_process_write(struct se_cmd *);