mirror of https://gitee.com/openkylin/linux.git
SCSI misc on 20210428
This series consists of the usual driver updates (ufs, target, tcmu, smartpqi, lpfc, zfcp, qla2xxx, mpt3sas, pm80xx). The major core change is using a sbitmap instead of an atomic for queue tracking. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYInvqCYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishYh2AP0SgqqL WYZRT2oiyBOKD28v+ceOSiXvgjPlqABwVMC0BAEAn29/wNCxyvzZ1k/b0iPJ4M+S klkSxLzXKQLzJBgdK5w= =p5B/ -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI updates from James Bottomley: "This consists of the usual driver updates (ufs, target, tcmu, smartpqi, lpfc, zfcp, qla2xxx, mpt3sas, pm80xx). The major core change is using a sbitmap instead of an atomic for queue tracking" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (412 commits) scsi: target: tcm_fc: Fix a kernel-doc header scsi: target: Shorten ALUA error messages scsi: target: Fix two format specifiers scsi: target: Compare explicitly with SAM_STAT_GOOD scsi: sd: Introduce a new local variable in sd_check_events() scsi: dc395x: Open-code status_byte(u8) calls scsi: 53c700: Open-code status_byte(u8) calls scsi: smartpqi: Remove unused functions scsi: qla4xxx: Remove an unused function scsi: myrs: Remove unused functions scsi: myrb: Remove unused functions scsi: mpt3sas: Fix two kernel-doc headers scsi: fcoe: Suppress a compiler warning scsi: libfc: Fix a format specifier scsi: aacraid: Remove an unused function scsi: core: Introduce enum scsi_disposition scsi: core: Modify the scsi_send_eh_cmnd() return value for the SDEV_BLOCK case scsi: core: Rename scsi_softirq_done() into scsi_complete() scsi: core: Remove an incorrect comment scsi: core: Make the scsi_alloc_sgtables() documentation more accurate ...
This commit is contained in:
commit
d72cd4ad41
|
@ -14,6 +14,8 @@ Required properties:
|
||||||
"qcom,msm8998-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
|
"qcom,msm8998-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
|
||||||
"qcom,sdm845-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
|
"qcom,sdm845-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
|
||||||
"qcom,sm8150-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
|
"qcom,sm8150-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
|
||||||
|
"qcom,sm8250-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
|
||||||
|
"qcom,sm8350-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
|
||||||
- interrupts : <interrupt mapping for UFS host controller IRQ>
|
- interrupts : <interrupt mapping for UFS host controller IRQ>
|
||||||
- reg : <registers mapping>
|
- reg : <registers mapping>
|
||||||
|
|
||||||
|
|
|
@ -132,6 +132,7 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
|
||||||
|
|
||||||
do {
|
do {
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
int budget_token;
|
||||||
|
|
||||||
if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
|
if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
|
||||||
break;
|
break;
|
||||||
|
@ -141,12 +142,13 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!blk_mq_get_dispatch_budget(q))
|
budget_token = blk_mq_get_dispatch_budget(q);
|
||||||
|
if (budget_token < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
rq = e->type->ops.dispatch_request(hctx);
|
rq = e->type->ops.dispatch_request(hctx);
|
||||||
if (!rq) {
|
if (!rq) {
|
||||||
blk_mq_put_dispatch_budget(q);
|
blk_mq_put_dispatch_budget(q, budget_token);
|
||||||
/*
|
/*
|
||||||
* We're releasing without dispatching. Holding the
|
* We're releasing without dispatching. Holding the
|
||||||
* budget could have blocked any "hctx"s with the
|
* budget could have blocked any "hctx"s with the
|
||||||
|
@ -158,6 +160,8 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blk_mq_set_rq_budget_token(rq, budget_token);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now this rq owns the budget which has to be released
|
* Now this rq owns the budget which has to be released
|
||||||
* if this rq won't be queued to driver via .queue_rq()
|
* if this rq won't be queued to driver via .queue_rq()
|
||||||
|
@ -231,6 +235,8 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
int budget_token;
|
||||||
|
|
||||||
if (!list_empty_careful(&hctx->dispatch)) {
|
if (!list_empty_careful(&hctx->dispatch)) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
break;
|
break;
|
||||||
|
@ -239,12 +245,13 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
|
||||||
if (!sbitmap_any_bit_set(&hctx->ctx_map))
|
if (!sbitmap_any_bit_set(&hctx->ctx_map))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!blk_mq_get_dispatch_budget(q))
|
budget_token = blk_mq_get_dispatch_budget(q);
|
||||||
|
if (budget_token < 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
rq = blk_mq_dequeue_from_ctx(hctx, ctx);
|
rq = blk_mq_dequeue_from_ctx(hctx, ctx);
|
||||||
if (!rq) {
|
if (!rq) {
|
||||||
blk_mq_put_dispatch_budget(q);
|
blk_mq_put_dispatch_budget(q, budget_token);
|
||||||
/*
|
/*
|
||||||
* We're releasing without dispatching. Holding the
|
* We're releasing without dispatching. Holding the
|
||||||
* budget could have blocked any "hctx"s with the
|
* budget could have blocked any "hctx"s with the
|
||||||
|
@ -256,6 +263,8 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
blk_mq_set_rq_budget_token(rq, budget_token);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now this rq owns the budget which has to be released
|
* Now this rq owns the budget which has to be released
|
||||||
* if this rq won't be queued to driver via .queue_rq()
|
* if this rq won't be queued to driver via .queue_rq()
|
||||||
|
|
|
@ -1278,10 +1278,15 @@ static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
|
||||||
bool need_budget)
|
bool need_budget)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||||
|
int budget_token = -1;
|
||||||
|
|
||||||
if (need_budget && !blk_mq_get_dispatch_budget(rq->q)) {
|
if (need_budget) {
|
||||||
blk_mq_put_driver_tag(rq);
|
budget_token = blk_mq_get_dispatch_budget(rq->q);
|
||||||
return PREP_DISPATCH_NO_BUDGET;
|
if (budget_token < 0) {
|
||||||
|
blk_mq_put_driver_tag(rq);
|
||||||
|
return PREP_DISPATCH_NO_BUDGET;
|
||||||
|
}
|
||||||
|
blk_mq_set_rq_budget_token(rq, budget_token);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!blk_mq_get_driver_tag(rq)) {
|
if (!blk_mq_get_driver_tag(rq)) {
|
||||||
|
@ -1298,7 +1303,7 @@ static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
|
||||||
* together during handling partial dispatch
|
* together during handling partial dispatch
|
||||||
*/
|
*/
|
||||||
if (need_budget)
|
if (need_budget)
|
||||||
blk_mq_put_dispatch_budget(rq->q);
|
blk_mq_put_dispatch_budget(rq->q, budget_token);
|
||||||
return PREP_DISPATCH_NO_TAG;
|
return PREP_DISPATCH_NO_TAG;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1308,12 +1313,16 @@ static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
|
||||||
|
|
||||||
/* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
|
/* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
|
||||||
static void blk_mq_release_budgets(struct request_queue *q,
|
static void blk_mq_release_budgets(struct request_queue *q,
|
||||||
unsigned int nr_budgets)
|
struct list_head *list)
|
||||||
{
|
{
|
||||||
int i;
|
struct request *rq;
|
||||||
|
|
||||||
for (i = 0; i < nr_budgets; i++)
|
list_for_each_entry(rq, list, queuelist) {
|
||||||
blk_mq_put_dispatch_budget(q);
|
int budget_token = blk_mq_get_rq_budget_token(rq);
|
||||||
|
|
||||||
|
if (budget_token >= 0)
|
||||||
|
blk_mq_put_dispatch_budget(q, budget_token);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1411,7 +1420,8 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||||
(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
|
(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
|
||||||
bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
|
bool no_budget_avail = prep == PREP_DISPATCH_NO_BUDGET;
|
||||||
|
|
||||||
blk_mq_release_budgets(q, nr_budgets);
|
if (nr_budgets)
|
||||||
|
blk_mq_release_budgets(q, list);
|
||||||
|
|
||||||
spin_lock(&hctx->lock);
|
spin_lock(&hctx->lock);
|
||||||
list_splice_tail_init(list, &hctx->dispatch);
|
list_splice_tail_init(list, &hctx->dispatch);
|
||||||
|
@ -2011,6 +2021,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
bool run_queue = true;
|
bool run_queue = true;
|
||||||
|
int budget_token;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RCU or SRCU read lock is needed before checking quiesced flag.
|
* RCU or SRCU read lock is needed before checking quiesced flag.
|
||||||
|
@ -2028,11 +2039,14 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
if (q->elevator && !bypass_insert)
|
if (q->elevator && !bypass_insert)
|
||||||
goto insert;
|
goto insert;
|
||||||
|
|
||||||
if (!blk_mq_get_dispatch_budget(q))
|
budget_token = blk_mq_get_dispatch_budget(q);
|
||||||
|
if (budget_token < 0)
|
||||||
goto insert;
|
goto insert;
|
||||||
|
|
||||||
|
blk_mq_set_rq_budget_token(rq, budget_token);
|
||||||
|
|
||||||
if (!blk_mq_get_driver_tag(rq)) {
|
if (!blk_mq_get_driver_tag(rq)) {
|
||||||
blk_mq_put_dispatch_budget(q);
|
blk_mq_put_dispatch_budget(q, budget_token);
|
||||||
goto insert;
|
goto insert;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2704,7 +2718,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
|
||||||
goto free_cpumask;
|
goto free_cpumask;
|
||||||
|
|
||||||
if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
|
if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
|
||||||
gfp, node))
|
gfp, node, false, false))
|
||||||
goto free_ctxs;
|
goto free_ctxs;
|
||||||
hctx->nr_ctx = 0;
|
hctx->nr_ctx = 0;
|
||||||
|
|
||||||
|
|
|
@ -187,17 +187,34 @@ unsigned int blk_mq_in_flight(struct request_queue *q,
|
||||||
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
|
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
|
||||||
unsigned int inflight[2]);
|
unsigned int inflight[2]);
|
||||||
|
|
||||||
static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
|
static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
|
||||||
|
int budget_token)
|
||||||
{
|
{
|
||||||
if (q->mq_ops->put_budget)
|
if (q->mq_ops->put_budget)
|
||||||
q->mq_ops->put_budget(q);
|
q->mq_ops->put_budget(q, budget_token);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool blk_mq_get_dispatch_budget(struct request_queue *q)
|
static inline int blk_mq_get_dispatch_budget(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (q->mq_ops->get_budget)
|
if (q->mq_ops->get_budget)
|
||||||
return q->mq_ops->get_budget(q);
|
return q->mq_ops->get_budget(q);
|
||||||
return true;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void blk_mq_set_rq_budget_token(struct request *rq, int token)
|
||||||
|
{
|
||||||
|
if (token < 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (rq->q->mq_ops->set_rq_budget_token)
|
||||||
|
rq->q->mq_ops->set_rq_budget_token(rq, token);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int blk_mq_get_rq_budget_token(struct request *rq)
|
||||||
|
{
|
||||||
|
if (rq->q->mq_ops->get_rq_budget_token)
|
||||||
|
return rq->q->mq_ops->get_rq_budget_token(rq);
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
|
static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
|
||||||
|
|
|
@ -478,7 +478,8 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||||
|
|
||||||
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
|
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
|
||||||
if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
|
if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
|
||||||
ilog2(8), GFP_KERNEL, hctx->numa_node)) {
|
ilog2(8), GFP_KERNEL, hctx->numa_node,
|
||||||
|
false, false)) {
|
||||||
while (--i >= 0)
|
while (--i >= 0)
|
||||||
sbitmap_free(&khd->kcq_map[i]);
|
sbitmap_free(&khd->kcq_map[i]);
|
||||||
goto err_kcqs;
|
goto err_kcqs;
|
||||||
|
|
|
@ -1599,7 +1599,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
|
if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
|
||||||
int ret = scsi_check_sense(qc->scsicmd);
|
enum scsi_disposition ret = scsi_check_sense(qc->scsicmd);
|
||||||
/*
|
/*
|
||||||
* SUCCESS here means that the sense code could be
|
* SUCCESS here means that the sense code could be
|
||||||
* evaluated and should be passed to the upper layers
|
* evaluated and should be passed to the upper layers
|
||||||
|
|
|
@ -1528,16 +1528,20 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
|
||||||
goto busy;
|
goto busy;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
|
rc = target_init_cmd(cmd, ch->sess, &send_ioctx->sense_data[0],
|
||||||
&send_ioctx->sense_data[0],
|
scsilun_to_int(&srp_cmd->lun), data_len,
|
||||||
scsilun_to_int(&srp_cmd->lun), data_len,
|
TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
|
||||||
TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
|
|
||||||
sg, sg_cnt, NULL, 0, NULL, 0);
|
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
|
pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
|
||||||
srp_cmd->tag);
|
srp_cmd->tag);
|
||||||
goto busy;
|
goto busy;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (target_submit_prep(cmd, srp_cmd->cdb, sg, sg_cnt, NULL, 0, NULL, 0,
|
||||||
|
GFP_KERNEL))
|
||||||
|
return;
|
||||||
|
|
||||||
|
target_submit(cmd);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
busy:
|
busy:
|
||||||
|
|
|
@ -424,8 +424,8 @@ typedef struct _SGE_TRANSACTION32
|
||||||
U8 ContextSize;
|
U8 ContextSize;
|
||||||
U8 DetailsLength;
|
U8 DetailsLength;
|
||||||
U8 Flags;
|
U8 Flags;
|
||||||
U32 TransactionContext[1];
|
U32 TransactionContext;
|
||||||
U32 TransactionDetails[1];
|
U32 TransactionDetails[];
|
||||||
} SGE_TRANSACTION32, MPI_POINTER PTR_SGE_TRANSACTION32,
|
} SGE_TRANSACTION32, MPI_POINTER PTR_SGE_TRANSACTION32,
|
||||||
SGETransaction32_t, MPI_POINTER pSGETransaction32_t;
|
SGETransaction32_t, MPI_POINTER pSGETransaction32_t;
|
||||||
|
|
||||||
|
|
|
@ -448,7 +448,7 @@ typedef struct _MSG_EVENT_NOTIFY_REPLY
|
||||||
U32 IOCLogInfo; /* 10h */
|
U32 IOCLogInfo; /* 10h */
|
||||||
U32 Event; /* 14h */
|
U32 Event; /* 14h */
|
||||||
U32 EventContext; /* 18h */
|
U32 EventContext; /* 18h */
|
||||||
U32 Data[1]; /* 1Ch */
|
U32 Data[]; /* 1Ch */
|
||||||
} MSG_EVENT_NOTIFY_REPLY, MPI_POINTER PTR_MSG_EVENT_NOTIFY_REPLY,
|
} MSG_EVENT_NOTIFY_REPLY, MPI_POINTER PTR_MSG_EVENT_NOTIFY_REPLY,
|
||||||
EventNotificationReply_t, MPI_POINTER pEventNotificationReply_t;
|
EventNotificationReply_t, MPI_POINTER pEventNotificationReply_t;
|
||||||
|
|
||||||
|
|
|
@ -3084,7 +3084,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
|
||||||
int req_sz;
|
int req_sz;
|
||||||
int reply_sz;
|
int reply_sz;
|
||||||
int sz;
|
int sz;
|
||||||
u32 status, vv;
|
u32 vv;
|
||||||
u8 shiftFactor=1;
|
u8 shiftFactor=1;
|
||||||
|
|
||||||
/* IOC *must* NOT be in RESET state! */
|
/* IOC *must* NOT be in RESET state! */
|
||||||
|
@ -3142,7 +3142,6 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
|
||||||
facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
|
facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
|
||||||
facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
|
facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
|
||||||
facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
|
facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
|
||||||
status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK;
|
|
||||||
/* CHECKME! IOCStatus, IOCLogInfo */
|
/* CHECKME! IOCStatus, IOCLogInfo */
|
||||||
|
|
||||||
facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
|
facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
|
||||||
|
@ -4974,7 +4973,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
|
||||||
|
|
||||||
if (hdr.PageLength > 0) {
|
if (hdr.PageLength > 0) {
|
||||||
data_sz = hdr.PageLength * 4;
|
data_sz = hdr.PageLength * 4;
|
||||||
ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
|
ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
if (ppage0_alloc) {
|
if (ppage0_alloc) {
|
||||||
memset((u8 *)ppage0_alloc, 0, data_sz);
|
memset((u8 *)ppage0_alloc, 0, data_sz);
|
||||||
|
@ -5020,7 +5019,7 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
|
||||||
|
|
||||||
data_sz = hdr.PageLength * 4;
|
data_sz = hdr.PageLength * 4;
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
|
ppage1_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
|
||||||
if (ppage1_alloc) {
|
if (ppage1_alloc) {
|
||||||
memset((u8 *)ppage1_alloc, 0, data_sz);
|
memset((u8 *)ppage1_alloc, 0, data_sz);
|
||||||
cfg.physAddr = page1_dma;
|
cfg.physAddr = page1_dma;
|
||||||
|
@ -5321,7 +5320,7 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
|
||||||
/* Read the config page */
|
/* Read the config page */
|
||||||
data_sz = hdr.PageLength * 4;
|
data_sz = hdr.PageLength * 4;
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
|
ppage_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
|
||||||
if (ppage_alloc) {
|
if (ppage_alloc) {
|
||||||
memset((u8 *)ppage_alloc, 0, data_sz);
|
memset((u8 *)ppage_alloc, 0, data_sz);
|
||||||
cfg.physAddr = page_dma;
|
cfg.physAddr = page_dma;
|
||||||
|
|
|
@ -274,7 +274,7 @@ typedef union _MPT_FRAME_TRACKER {
|
||||||
} linkage;
|
} linkage;
|
||||||
/*
|
/*
|
||||||
* NOTE: When request frames are free, on the linkage structure
|
* NOTE: When request frames are free, on the linkage structure
|
||||||
* contets are valid. All other values are invalid.
|
* contents are valid. All other values are invalid.
|
||||||
* In particular, do NOT reply on offset [2]
|
* In particular, do NOT reply on offset [2]
|
||||||
* (in words) being the * message context.
|
* (in words) being the * message context.
|
||||||
* The message context must be reset (computed via base address
|
* The message context must be reset (computed via base address
|
||||||
|
|
|
@ -321,7 +321,6 @@ mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
|
||||||
int ii;
|
int ii;
|
||||||
int retval;
|
int retval;
|
||||||
unsigned long timeout;
|
unsigned long timeout;
|
||||||
unsigned long time_count;
|
|
||||||
u16 iocstatus;
|
u16 iocstatus;
|
||||||
|
|
||||||
|
|
||||||
|
@ -383,7 +382,6 @@ mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
|
||||||
ioc->name, tm_type, timeout));
|
ioc->name, tm_type, timeout));
|
||||||
|
|
||||||
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
|
INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
|
||||||
time_count = jiffies;
|
|
||||||
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
|
if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
|
||||||
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
|
(ioc->facts.MsgVersion >= MPI_VERSION_01_05))
|
||||||
mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
|
mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
|
||||||
|
@ -1369,7 +1367,6 @@ mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
|
||||||
int lun;
|
int lun;
|
||||||
int maxWordsLeft;
|
int maxWordsLeft;
|
||||||
int numBytes;
|
int numBytes;
|
||||||
u8 port;
|
|
||||||
struct scsi_device *sdev;
|
struct scsi_device *sdev;
|
||||||
|
|
||||||
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) {
|
if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) {
|
||||||
|
@ -1381,13 +1378,8 @@ mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg)
|
||||||
|
|
||||||
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
|
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
|
||||||
ioc->name));
|
ioc->name));
|
||||||
/* Get the port number and set the maximum number of bytes
|
|
||||||
* in the returned structure.
|
|
||||||
* Ignore the port setting.
|
|
||||||
*/
|
|
||||||
numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
|
numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
|
||||||
maxWordsLeft = numBytes/sizeof(int);
|
maxWordsLeft = numBytes/sizeof(int);
|
||||||
port = karg.hdr.port;
|
|
||||||
|
|
||||||
if (maxWordsLeft <= 0) {
|
if (maxWordsLeft <= 0) {
|
||||||
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n",
|
printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n",
|
||||||
|
|
|
@ -67,12 +67,13 @@
|
||||||
|
|
||||||
#ifdef CONFIG_FUSION_LOGGING
|
#ifdef CONFIG_FUSION_LOGGING
|
||||||
#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
|
#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
|
||||||
{ \
|
do { \
|
||||||
if (IOC->debug_level & BITS) \
|
if (IOC->debug_level & BITS) \
|
||||||
CMD; \
|
CMD; \
|
||||||
}
|
} while (0)
|
||||||
#else
|
#else
|
||||||
#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
|
#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
|
||||||
|
do { } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -72,9 +72,6 @@ MODULE_VERSION(my_VERSION);
|
||||||
#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
|
#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
|
||||||
(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
|
(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
|
||||||
|
|
||||||
#define MPT_LAN_TRANSACTION32_SIZE \
|
|
||||||
(sizeof(SGETransaction32_t) - sizeof(u32))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fusion MPT LAN private structures
|
* Fusion MPT LAN private structures
|
||||||
*/
|
*/
|
||||||
|
@ -745,7 +742,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
|
||||||
pTrans->ContextSize = sizeof(u32);
|
pTrans->ContextSize = sizeof(u32);
|
||||||
pTrans->DetailsLength = 2 * sizeof(u32);
|
pTrans->DetailsLength = 2 * sizeof(u32);
|
||||||
pTrans->Flags = 0;
|
pTrans->Flags = 0;
|
||||||
pTrans->TransactionContext[0] = cpu_to_le32(ctx);
|
pTrans->TransactionContext = cpu_to_le32(ctx);
|
||||||
|
|
||||||
// dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
|
// dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
|
||||||
// IOC_AND_NETDEV_NAMES_s_s(dev),
|
// IOC_AND_NETDEV_NAMES_s_s(dev),
|
||||||
|
@ -1159,7 +1156,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
|
||||||
__func__, buckets, curr));
|
__func__, buckets, curr));
|
||||||
|
|
||||||
max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
|
max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
|
||||||
(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
|
(sizeof(SGETransaction32_t) + sizeof(SGESimple64_t));
|
||||||
|
|
||||||
while (buckets) {
|
while (buckets) {
|
||||||
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
|
mf = mpt_get_msg_frame(LanCtx, mpt_dev);
|
||||||
|
@ -1234,7 +1231,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
|
||||||
pTrans->ContextSize = sizeof(u32);
|
pTrans->ContextSize = sizeof(u32);
|
||||||
pTrans->DetailsLength = 0;
|
pTrans->DetailsLength = 0;
|
||||||
pTrans->Flags = 0;
|
pTrans->Flags = 0;
|
||||||
pTrans->TransactionContext[0] = cpu_to_le32(ctx);
|
pTrans->TransactionContext = cpu_to_le32(ctx);
|
||||||
|
|
||||||
pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
|
pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
|
||||||
|
|
||||||
|
|
|
@ -780,13 +780,11 @@ static void
|
||||||
mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
|
mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
|
||||||
struct scsi_target *starget)
|
struct scsi_target *starget)
|
||||||
{
|
{
|
||||||
VirtTarget *vtarget;
|
|
||||||
struct sas_rphy *rphy;
|
struct sas_rphy *rphy;
|
||||||
struct mptsas_phyinfo *phy_info = NULL;
|
struct mptsas_phyinfo *phy_info = NULL;
|
||||||
struct mptsas_enclosure enclosure_info;
|
struct mptsas_enclosure enclosure_info;
|
||||||
|
|
||||||
rphy = dev_to_rphy(starget->dev.parent);
|
rphy = dev_to_rphy(starget->dev.parent);
|
||||||
vtarget = starget->hostdata;
|
|
||||||
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
|
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
|
||||||
rphy->identify.sas_address);
|
rphy->identify.sas_address);
|
||||||
if (!phy_info)
|
if (!phy_info)
|
||||||
|
@ -3442,14 +3440,12 @@ mptsas_expander_event_add(MPT_ADAPTER *ioc,
|
||||||
__le64 sas_address;
|
__le64 sas_address;
|
||||||
|
|
||||||
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
|
port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
|
||||||
if (!port_info)
|
BUG_ON(!port_info);
|
||||||
BUG();
|
|
||||||
port_info->num_phys = (expander_data->NumPhys) ?
|
port_info->num_phys = (expander_data->NumPhys) ?
|
||||||
expander_data->NumPhys : 1;
|
expander_data->NumPhys : 1;
|
||||||
port_info->phy_info = kcalloc(port_info->num_phys,
|
port_info->phy_info = kcalloc(port_info->num_phys,
|
||||||
sizeof(struct mptsas_phyinfo), GFP_KERNEL);
|
sizeof(struct mptsas_phyinfo), GFP_KERNEL);
|
||||||
if (!port_info->phy_info)
|
BUG_ON(!port_info->phy_info);
|
||||||
BUG();
|
|
||||||
memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
|
memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
|
||||||
for (i = 0; i < port_info->num_phys; i++) {
|
for (i = 0; i < port_info->num_phys; i++) {
|
||||||
port_info->phy_info[i].portinfo = port_info;
|
port_info->phy_info[i].portinfo = port_info;
|
||||||
|
@ -3781,7 +3777,7 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
|
||||||
printk(MYIOC_s_DEBUG_FMT
|
printk(MYIOC_s_DEBUG_FMT
|
||||||
"SDEV OUTSTANDING CMDS"
|
"SDEV OUTSTANDING CMDS"
|
||||||
"%d\n", ioc->name,
|
"%d\n", ioc->name,
|
||||||
atomic_read(&sdev->device_busy)));
|
scsi_device_busy(sdev)));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -413,12 +413,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
|
||||||
|
|
||||||
dev_set_drvdata(&ccw_device->dev, adapter);
|
dev_set_drvdata(&ccw_device->dev, adapter);
|
||||||
|
|
||||||
if (sysfs_create_group(&ccw_device->dev.kobj,
|
if (device_add_groups(&ccw_device->dev, zfcp_sysfs_adapter_attr_groups))
|
||||||
&zfcp_sysfs_adapter_attrs))
|
goto err_sysfs;
|
||||||
goto failed;
|
|
||||||
|
|
||||||
if (zfcp_diag_sysfs_setup(adapter))
|
|
||||||
goto failed;
|
|
||||||
|
|
||||||
/* report size limit per scatter-gather segment */
|
/* report size limit per scatter-gather segment */
|
||||||
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
|
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
|
||||||
|
@ -427,8 +423,23 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
|
||||||
|
|
||||||
return adapter;
|
return adapter;
|
||||||
|
|
||||||
|
err_sysfs:
|
||||||
failed:
|
failed:
|
||||||
zfcp_adapter_unregister(adapter);
|
/* TODO: make this more fine-granular */
|
||||||
|
cancel_delayed_work_sync(&adapter->scan_work);
|
||||||
|
cancel_work_sync(&adapter->stat_work);
|
||||||
|
cancel_work_sync(&adapter->ns_up_work);
|
||||||
|
cancel_work_sync(&adapter->version_change_lost_work);
|
||||||
|
zfcp_destroy_adapter_work_queue(adapter);
|
||||||
|
|
||||||
|
zfcp_fc_wka_ports_force_offline(adapter->gs);
|
||||||
|
zfcp_scsi_adapter_unregister(adapter);
|
||||||
|
|
||||||
|
zfcp_erp_thread_kill(adapter);
|
||||||
|
zfcp_dbf_adapter_unregister(adapter);
|
||||||
|
zfcp_qdio_destroy(adapter->qdio);
|
||||||
|
|
||||||
|
zfcp_ccw_adapter_put(adapter); /* final put to release */
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -444,8 +455,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
|
||||||
|
|
||||||
zfcp_fc_wka_ports_force_offline(adapter->gs);
|
zfcp_fc_wka_ports_force_offline(adapter->gs);
|
||||||
zfcp_scsi_adapter_unregister(adapter);
|
zfcp_scsi_adapter_unregister(adapter);
|
||||||
zfcp_diag_sysfs_destroy(adapter);
|
device_remove_groups(&cdev->dev, zfcp_sysfs_adapter_attr_groups);
|
||||||
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
|
|
||||||
|
|
||||||
zfcp_erp_thread_kill(adapter);
|
zfcp_erp_thread_kill(adapter);
|
||||||
zfcp_dbf_adapter_unregister(adapter);
|
zfcp_dbf_adapter_unregister(adapter);
|
||||||
|
|
|
@ -156,7 +156,7 @@ struct zfcp_adapter {
|
||||||
u32 fsf_lic_version;
|
u32 fsf_lic_version;
|
||||||
u32 adapter_features; /* FCP channel features */
|
u32 adapter_features; /* FCP channel features */
|
||||||
u32 connection_features; /* host connection features */
|
u32 connection_features; /* host connection features */
|
||||||
u32 hardware_version; /* of FCP channel */
|
u32 hardware_version; /* of FCP channel */
|
||||||
u32 fc_security_algorithms; /* of FCP channel */
|
u32 fc_security_algorithms; /* of FCP channel */
|
||||||
u32 fc_security_algorithms_old; /* of FCP channel */
|
u32 fc_security_algorithms_old; /* of FCP channel */
|
||||||
u16 timer_ticks; /* time int for a tick */
|
u16 timer_ticks; /* time int for a tick */
|
||||||
|
@ -180,7 +180,7 @@ struct zfcp_adapter {
|
||||||
rwlock_t erp_lock;
|
rwlock_t erp_lock;
|
||||||
wait_queue_head_t erp_done_wqh;
|
wait_queue_head_t erp_done_wqh;
|
||||||
struct zfcp_erp_action erp_action; /* pending error recovery */
|
struct zfcp_erp_action erp_action; /* pending error recovery */
|
||||||
atomic_t erp_counter;
|
atomic_t erp_counter;
|
||||||
u32 erp_total_count; /* total nr of enqueued erp
|
u32 erp_total_count; /* total nr of enqueued erp
|
||||||
actions */
|
actions */
|
||||||
u32 erp_low_mem_count; /* nr of erp actions waiting
|
u32 erp_low_mem_count; /* nr of erp actions waiting
|
||||||
|
@ -217,7 +217,7 @@ struct zfcp_port {
|
||||||
u32 d_id; /* D_ID */
|
u32 d_id; /* D_ID */
|
||||||
u32 handle; /* handle assigned by FSF */
|
u32 handle; /* handle assigned by FSF */
|
||||||
struct zfcp_erp_action erp_action; /* pending error recovery */
|
struct zfcp_erp_action erp_action; /* pending error recovery */
|
||||||
atomic_t erp_counter;
|
atomic_t erp_counter;
|
||||||
u32 maxframe_size;
|
u32 maxframe_size;
|
||||||
u32 supported_classes;
|
u32 supported_classes;
|
||||||
u32 connection_info;
|
u32 connection_info;
|
||||||
|
|
|
@ -10,8 +10,6 @@
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/kernfs.h>
|
|
||||||
#include <linux/sysfs.h>
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
@ -79,46 +77,6 @@ void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter)
|
||||||
adapter->diagnostics = NULL;
|
adapter->diagnostics = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* zfcp_diag_sysfs_setup() - Setup the sysfs-group for adapter-diagnostics.
|
|
||||||
* @adapter: target adapter to which the group should be added.
|
|
||||||
*
|
|
||||||
* Return: 0 on success; Something else otherwise (see sysfs_create_group()).
|
|
||||||
*/
|
|
||||||
int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter)
|
|
||||||
{
|
|
||||||
int rc = sysfs_create_group(&adapter->ccw_device->dev.kobj,
|
|
||||||
&zfcp_sysfs_diag_attr_group);
|
|
||||||
if (rc == 0)
|
|
||||||
adapter->diagnostics->sysfs_established = 1;
|
|
||||||
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* zfcp_diag_sysfs_destroy() - Remove the sysfs-group for adapter-diagnostics.
|
|
||||||
* @adapter: target adapter from which the group should be removed.
|
|
||||||
*/
|
|
||||||
void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter)
|
|
||||||
{
|
|
||||||
if (adapter->diagnostics == NULL ||
|
|
||||||
!adapter->diagnostics->sysfs_established)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We need this state-handling so we can prevent warnings being printed
|
|
||||||
* on the kernel-console in case we have to abort a halfway done
|
|
||||||
* zfcp_adapter_enqueue(), in which the sysfs-group was not yet
|
|
||||||
* established. sysfs_remove_group() does this checking as well, but
|
|
||||||
* still prints a warning in case we try to remove a group that has not
|
|
||||||
* been established before
|
|
||||||
*/
|
|
||||||
adapter->diagnostics->sysfs_established = 0;
|
|
||||||
sysfs_remove_group(&adapter->ccw_device->dev.kobj,
|
|
||||||
&zfcp_sysfs_diag_attr_group);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* zfcp_diag_update_xdata() - Update a diagnostics buffer.
|
* zfcp_diag_update_xdata() - Update a diagnostics buffer.
|
||||||
* @hdr: the meta data to update.
|
* @hdr: the meta data to update.
|
||||||
|
|
|
@ -40,8 +40,6 @@ struct zfcp_diag_header {
|
||||||
/**
|
/**
|
||||||
* struct zfcp_diag_adapter - central storage for all diagnostics concerning an
|
* struct zfcp_diag_adapter - central storage for all diagnostics concerning an
|
||||||
* adapter.
|
* adapter.
|
||||||
* @sysfs_established: flag showing that the associated sysfs-group was created
|
|
||||||
* during run of zfcp_adapter_enqueue().
|
|
||||||
* @max_age: maximum age of data in diagnostic buffers before they need to be
|
* @max_age: maximum age of data in diagnostic buffers before they need to be
|
||||||
* refreshed (in ms).
|
* refreshed (in ms).
|
||||||
* @port_data: data retrieved using exchange port data.
|
* @port_data: data retrieved using exchange port data.
|
||||||
|
@ -52,8 +50,6 @@ struct zfcp_diag_header {
|
||||||
* @config_data.data: cached QTCB Bottom of command exchange config data.
|
* @config_data.data: cached QTCB Bottom of command exchange config data.
|
||||||
*/
|
*/
|
||||||
struct zfcp_diag_adapter {
|
struct zfcp_diag_adapter {
|
||||||
u64 sysfs_established :1;
|
|
||||||
|
|
||||||
unsigned long max_age;
|
unsigned long max_age;
|
||||||
|
|
||||||
struct zfcp_diag_adapter_port_data {
|
struct zfcp_diag_adapter_port_data {
|
||||||
|
@ -69,9 +65,6 @@ struct zfcp_diag_adapter {
|
||||||
int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter);
|
int zfcp_diag_adapter_setup(struct zfcp_adapter *const adapter);
|
||||||
void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter);
|
void zfcp_diag_adapter_free(struct zfcp_adapter *const adapter);
|
||||||
|
|
||||||
int zfcp_diag_sysfs_setup(struct zfcp_adapter *const adapter);
|
|
||||||
void zfcp_diag_sysfs_destroy(struct zfcp_adapter *const adapter);
|
|
||||||
|
|
||||||
void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
|
void zfcp_diag_update_xdata(struct zfcp_diag_header *const hdr,
|
||||||
const void *const data, const bool incomplete);
|
const void *const data, const bool incomplete);
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#define ZFCP_EXT_H
|
#define ZFCP_EXT_H
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <linux/sysfs.h>
|
||||||
#include <scsi/fc/fc_els.h>
|
#include <scsi/fc/fc_els.h>
|
||||||
#include "zfcp_def.h"
|
#include "zfcp_def.h"
|
||||||
#include "zfcp_fc.h"
|
#include "zfcp_fc.h"
|
||||||
|
@ -179,13 +180,12 @@ extern void zfcp_scsi_shost_update_port_data(
|
||||||
const struct fsf_qtcb_bottom_port *const bottom);
|
const struct fsf_qtcb_bottom_port *const bottom);
|
||||||
|
|
||||||
/* zfcp_sysfs.c */
|
/* zfcp_sysfs.c */
|
||||||
|
extern const struct attribute_group *zfcp_sysfs_adapter_attr_groups[];
|
||||||
extern const struct attribute_group *zfcp_unit_attr_groups[];
|
extern const struct attribute_group *zfcp_unit_attr_groups[];
|
||||||
extern struct attribute_group zfcp_sysfs_adapter_attrs;
|
|
||||||
extern const struct attribute_group *zfcp_port_attr_groups[];
|
extern const struct attribute_group *zfcp_port_attr_groups[];
|
||||||
extern struct mutex zfcp_sysfs_port_units_mutex;
|
extern struct mutex zfcp_sysfs_port_units_mutex;
|
||||||
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
|
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
|
||||||
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
|
extern struct device_attribute *zfcp_sysfs_shost_attrs[];
|
||||||
extern const struct attribute_group zfcp_sysfs_diag_attr_group;
|
|
||||||
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
|
bool zfcp_sysfs_port_is_removing(const struct zfcp_port *const port);
|
||||||
|
|
||||||
/* zfcp_unit.c */
|
/* zfcp_unit.c */
|
||||||
|
|
|
@ -846,7 +846,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
|
||||||
if (adapter->req_no == 0)
|
if (adapter->req_no == 0)
|
||||||
adapter->req_no++;
|
adapter->req_no++;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&req->list);
|
|
||||||
timer_setup(&req->timer, NULL, 0);
|
timer_setup(&req->timer, NULL, 0);
|
||||||
init_completion(&req->completion);
|
init_completion(&req->completion);
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,9 @@ static bool enable_multibuffer = true;
|
||||||
module_param_named(datarouter, enable_multibuffer, bool, 0400);
|
module_param_named(datarouter, enable_multibuffer, bool, 0400);
|
||||||
MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
|
MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
|
||||||
|
|
||||||
|
#define ZFCP_QDIO_REQUEST_RESCAN_MSECS (MSEC_PER_SEC * 10)
|
||||||
|
#define ZFCP_QDIO_REQUEST_SCAN_MSECS MSEC_PER_SEC
|
||||||
|
|
||||||
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
|
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
|
||||||
unsigned int qdio_err)
|
unsigned int qdio_err)
|
||||||
{
|
{
|
||||||
|
@ -70,15 +73,41 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
|
||||||
zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
|
zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* cleanup all SBALs being program-owned now */
|
static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
|
||||||
zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
|
{
|
||||||
|
struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
|
||||||
|
struct ccw_device *cdev = qdio->adapter->ccw_device;
|
||||||
|
unsigned int start, error;
|
||||||
|
int completed;
|
||||||
|
|
||||||
spin_lock_irq(&qdio->stat_lock);
|
completed = qdio_inspect_queue(cdev, 0, false, &start, &error);
|
||||||
zfcp_qdio_account(qdio);
|
if (completed > 0) {
|
||||||
spin_unlock_irq(&qdio->stat_lock);
|
if (error) {
|
||||||
atomic_add(count, &qdio->req_q_free);
|
zfcp_qdio_handler_error(qdio, "qdreqt1", error);
|
||||||
wake_up(&qdio->req_q_wq);
|
} else {
|
||||||
|
/* cleanup all SBALs being program-owned now */
|
||||||
|
zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
|
||||||
|
|
||||||
|
spin_lock_irq(&qdio->stat_lock);
|
||||||
|
zfcp_qdio_account(qdio);
|
||||||
|
spin_unlock_irq(&qdio->stat_lock);
|
||||||
|
atomic_add(completed, &qdio->req_q_free);
|
||||||
|
wake_up(&qdio->req_q_wq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
|
||||||
|
timer_reduce(&qdio->request_timer,
|
||||||
|
jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void zfcp_qdio_request_timer(struct timer_list *timer)
|
||||||
|
{
|
||||||
|
struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
|
||||||
|
|
||||||
|
tasklet_schedule(&qdio->request_tasklet);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
|
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
|
||||||
|
@ -139,8 +168,11 @@ static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
|
||||||
unsigned int start, error;
|
unsigned int start, error;
|
||||||
int completed;
|
int completed;
|
||||||
|
|
||||||
/* Check the Response Queue, and kick off the Request Queue tasklet: */
|
if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
|
||||||
completed = qdio_get_next_buffers(cdev, 0, &start, &error);
|
tasklet_schedule(&qdio->request_tasklet);
|
||||||
|
|
||||||
|
/* Check the Response Queue: */
|
||||||
|
completed = qdio_inspect_queue(cdev, 0, true, &start, &error);
|
||||||
if (completed < 0)
|
if (completed < 0)
|
||||||
return;
|
return;
|
||||||
if (completed > 0)
|
if (completed > 0)
|
||||||
|
@ -286,7 +318,7 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This should actually be a spin_lock_bh(stat_lock), to protect against
|
* This should actually be a spin_lock_bh(stat_lock), to protect against
|
||||||
* zfcp_qdio_int_req() in tasklet context.
|
* Request Queue completion processing in tasklet context.
|
||||||
* But we can't do so (and are safe), as we always get called with IRQs
|
* But we can't do so (and are safe), as we always get called with IRQs
|
||||||
* disabled by spin_lock_irq[save](req_q_lock).
|
* disabled by spin_lock_irq[save](req_q_lock).
|
||||||
*/
|
*/
|
||||||
|
@ -308,6 +340,12 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
|
||||||
|
tasklet_schedule(&qdio->request_tasklet);
|
||||||
|
else
|
||||||
|
timer_reduce(&qdio->request_timer,
|
||||||
|
jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
|
||||||
|
|
||||||
/* account for transferred buffers */
|
/* account for transferred buffers */
|
||||||
qdio->req_q_idx += sbal_number;
|
qdio->req_q_idx += sbal_number;
|
||||||
qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
|
qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
|
||||||
|
@ -368,6 +406,8 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
|
||||||
wake_up(&qdio->req_q_wq);
|
wake_up(&qdio->req_q_wq);
|
||||||
|
|
||||||
tasklet_disable(&qdio->irq_tasklet);
|
tasklet_disable(&qdio->irq_tasklet);
|
||||||
|
tasklet_disable(&qdio->request_tasklet);
|
||||||
|
del_timer_sync(&qdio->request_timer);
|
||||||
qdio_stop_irq(adapter->ccw_device);
|
qdio_stop_irq(adapter->ccw_device);
|
||||||
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
|
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
|
||||||
|
|
||||||
|
@ -428,8 +468,6 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
|
||||||
init_data.int_parm = (unsigned long) qdio;
|
init_data.int_parm = (unsigned long) qdio;
|
||||||
init_data.input_sbal_addr_array = input_sbals;
|
init_data.input_sbal_addr_array = input_sbals;
|
||||||
init_data.output_sbal_addr_array = output_sbals;
|
init_data.output_sbal_addr_array = output_sbals;
|
||||||
init_data.scan_threshold =
|
|
||||||
QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
|
|
||||||
|
|
||||||
if (qdio_establish(cdev, &init_data))
|
if (qdio_establish(cdev, &init_data))
|
||||||
goto failed_establish;
|
goto failed_establish;
|
||||||
|
@ -472,6 +510,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
|
||||||
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
|
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
|
||||||
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
|
atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
|
||||||
|
|
||||||
|
/* Enable processing for Request Queue completions: */
|
||||||
|
tasklet_enable(&qdio->request_tasklet);
|
||||||
/* Enable processing for QDIO interrupts: */
|
/* Enable processing for QDIO interrupts: */
|
||||||
tasklet_enable(&qdio->irq_tasklet);
|
tasklet_enable(&qdio->irq_tasklet);
|
||||||
/* This results in a qdio_start_irq(): */
|
/* This results in a qdio_start_irq(): */
|
||||||
|
@ -495,6 +535,7 @@ void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
tasklet_kill(&qdio->irq_tasklet);
|
tasklet_kill(&qdio->irq_tasklet);
|
||||||
|
tasklet_kill(&qdio->request_tasklet);
|
||||||
|
|
||||||
if (qdio->adapter->ccw_device)
|
if (qdio->adapter->ccw_device)
|
||||||
qdio_free(qdio->adapter->ccw_device);
|
qdio_free(qdio->adapter->ccw_device);
|
||||||
|
@ -521,8 +562,11 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter)
|
||||||
|
|
||||||
spin_lock_init(&qdio->req_q_lock);
|
spin_lock_init(&qdio->req_q_lock);
|
||||||
spin_lock_init(&qdio->stat_lock);
|
spin_lock_init(&qdio->stat_lock);
|
||||||
|
timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
|
||||||
tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
|
tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
|
||||||
|
tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
|
||||||
tasklet_disable(&qdio->irq_tasklet);
|
tasklet_disable(&qdio->irq_tasklet);
|
||||||
|
tasklet_disable(&qdio->request_tasklet);
|
||||||
|
|
||||||
adapter->qdio = qdio;
|
adapter->qdio = qdio;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -30,6 +30,9 @@
|
||||||
* @req_q_util: used for accounting
|
* @req_q_util: used for accounting
|
||||||
* @req_q_full: queue full incidents
|
* @req_q_full: queue full incidents
|
||||||
* @req_q_wq: used to wait for SBAL availability
|
* @req_q_wq: used to wait for SBAL availability
|
||||||
|
* @irq_tasklet: used for QDIO interrupt processing
|
||||||
|
* @request_tasklet: used for Request Queue completion processing
|
||||||
|
* @request_timer: used to trigger the Request Queue completion processing
|
||||||
* @adapter: adapter used in conjunction with this qdio structure
|
* @adapter: adapter used in conjunction with this qdio structure
|
||||||
* @max_sbale_per_sbal: qdio limit per sbal
|
* @max_sbale_per_sbal: qdio limit per sbal
|
||||||
* @max_sbale_per_req: qdio limit per request
|
* @max_sbale_per_req: qdio limit per request
|
||||||
|
@ -46,6 +49,8 @@ struct zfcp_qdio {
|
||||||
atomic_t req_q_full;
|
atomic_t req_q_full;
|
||||||
wait_queue_head_t req_q_wq;
|
wait_queue_head_t req_q_wq;
|
||||||
struct tasklet_struct irq_tasklet;
|
struct tasklet_struct irq_tasklet;
|
||||||
|
struct tasklet_struct request_tasklet;
|
||||||
|
struct timer_list request_timer;
|
||||||
struct zfcp_adapter *adapter;
|
struct zfcp_adapter *adapter;
|
||||||
u16 max_sbale_per_sbal;
|
u16 max_sbale_per_sbal;
|
||||||
u16 max_sbale_per_req;
|
u16 max_sbale_per_req;
|
||||||
|
|
|
@ -327,10 +327,10 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
|
||||||
list_del(&port->list);
|
list_del(&port->list);
|
||||||
write_unlock_irq(&adapter->port_list_lock);
|
write_unlock_irq(&adapter->port_list_lock);
|
||||||
|
|
||||||
put_device(&port->dev);
|
|
||||||
|
|
||||||
zfcp_erp_port_shutdown(port, 0, "syprs_1");
|
zfcp_erp_port_shutdown(port, 0, "syprs_1");
|
||||||
device_unregister(&port->dev);
|
device_unregister(&port->dev);
|
||||||
|
|
||||||
|
put_device(&port->dev); /* undo zfcp_get_port_by_wwpn() */
|
||||||
out:
|
out:
|
||||||
zfcp_ccw_adapter_put(adapter);
|
zfcp_ccw_adapter_put(adapter);
|
||||||
return retval ? retval : (ssize_t) count;
|
return retval ? retval : (ssize_t) count;
|
||||||
|
@ -435,7 +435,7 @@ static struct attribute *zfcp_adapter_attrs[] = {
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
struct attribute_group zfcp_sysfs_adapter_attrs = {
|
static const struct attribute_group zfcp_sysfs_adapter_attr_group = {
|
||||||
.attrs = zfcp_adapter_attrs,
|
.attrs = zfcp_adapter_attrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -906,7 +906,13 @@ static struct attribute *zfcp_sysfs_diag_attrs[] = {
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct attribute_group zfcp_sysfs_diag_attr_group = {
|
static const struct attribute_group zfcp_sysfs_diag_attr_group = {
|
||||||
.name = "diagnostics",
|
.name = "diagnostics",
|
||||||
.attrs = zfcp_sysfs_diag_attrs,
|
.attrs = zfcp_sysfs_diag_attrs,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const struct attribute_group *zfcp_sysfs_adapter_attr_groups[] = {
|
||||||
|
&zfcp_sysfs_adapter_attr_group,
|
||||||
|
&zfcp_sysfs_diag_attr_group,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
|
@ -255,9 +255,9 @@ int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
|
||||||
scsi_device_put(sdev);
|
scsi_device_put(sdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
put_device(&unit->dev);
|
|
||||||
|
|
||||||
device_unregister(&unit->dev);
|
device_unregister(&unit->dev);
|
||||||
|
|
||||||
|
put_device(&unit->dev); /* undo _zfcp_unit_find() */
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -939,13 +939,13 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
|
||||||
/* This function will empty the response queue */
|
/* This function will empty the response queue */
|
||||||
static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
|
static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
|
||||||
{
|
{
|
||||||
u32 status_reg_value, response_que_value;
|
u32 status_reg_value;
|
||||||
int count = 0, retval = 1;
|
int count = 0, retval = 1;
|
||||||
|
|
||||||
status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
|
status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
|
||||||
|
|
||||||
while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
|
while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
|
||||||
response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
|
readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
|
||||||
status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
|
status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
@ -1698,9 +1698,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
|
||||||
static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
|
static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
|
||||||
{
|
{
|
||||||
int heads, sectors, cylinders;
|
int heads, sectors, cylinders;
|
||||||
TW_Device_Extension *tw_dev;
|
|
||||||
|
|
||||||
tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
|
|
||||||
|
|
||||||
if (capacity >= 0x200000) {
|
if (capacity >= 0x200000) {
|
||||||
heads = 255;
|
heads = 255;
|
||||||
|
@ -1809,14 +1806,11 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
|
||||||
u32 num_sectors = 0x0;
|
u32 num_sectors = 0x0;
|
||||||
int i, sg_count;
|
int i, sg_count;
|
||||||
struct scsi_cmnd *srb = NULL;
|
struct scsi_cmnd *srb = NULL;
|
||||||
struct scatterlist *sglist = NULL, *sg;
|
struct scatterlist *sg;
|
||||||
int retval = 1;
|
int retval = 1;
|
||||||
|
|
||||||
if (tw_dev->srb[request_id]) {
|
if (tw_dev->srb[request_id])
|
||||||
srb = tw_dev->srb[request_id];
|
srb = tw_dev->srb[request_id];
|
||||||
if (scsi_sglist(srb))
|
|
||||||
sglist = scsi_sglist(srb);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize command packet */
|
/* Initialize command packet */
|
||||||
full_command_packet = tw_dev->command_packet_virt[request_id];
|
full_command_packet = tw_dev->command_packet_virt[request_id];
|
||||||
|
|
|
@ -295,14 +295,11 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
|
||||||
TW_Command_Apache *command_packet;
|
TW_Command_Apache *command_packet;
|
||||||
int i, sg_count;
|
int i, sg_count;
|
||||||
struct scsi_cmnd *srb = NULL;
|
struct scsi_cmnd *srb = NULL;
|
||||||
struct scatterlist *sglist = NULL, *sg;
|
struct scatterlist *sg;
|
||||||
int retval = 1;
|
int retval = 1;
|
||||||
|
|
||||||
if (tw_dev->srb[request_id]) {
|
if (tw_dev->srb[request_id])
|
||||||
srb = tw_dev->srb[request_id];
|
srb = tw_dev->srb[request_id];
|
||||||
if (scsi_sglist(srb))
|
|
||||||
sglist = scsi_sglist(srb);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialize command packet */
|
/* Initialize command packet */
|
||||||
full_command_packet = tw_dev->command_packet_virt[request_id];
|
full_command_packet = tw_dev->command_packet_virt[request_id];
|
||||||
|
@ -863,7 +860,6 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
|
||||||
TW_Command_Full *full_command_packet;
|
TW_Command_Full *full_command_packet;
|
||||||
unsigned short error;
|
unsigned short error;
|
||||||
char *error_str;
|
char *error_str;
|
||||||
int retval = 1;
|
|
||||||
|
|
||||||
header = tw_dev->sense_buffer_virt[i];
|
header = tw_dev->sense_buffer_virt[i];
|
||||||
full_command_packet = tw_dev->command_packet_virt[request_id];
|
full_command_packet = tw_dev->command_packet_virt[request_id];
|
||||||
|
@ -895,7 +891,7 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
return retval;
|
return 1;
|
||||||
} /* End twl_fill_sense() */
|
} /* End twl_fill_sense() */
|
||||||
|
|
||||||
/* This function will free up device extension resources */
|
/* This function will free up device extension resources */
|
||||||
|
@ -1408,9 +1404,6 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
|
||||||
static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
|
static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
|
||||||
{
|
{
|
||||||
int heads, sectors;
|
int heads, sectors;
|
||||||
TW_Device_Extension *tw_dev;
|
|
||||||
|
|
||||||
tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
|
|
||||||
|
|
||||||
if (capacity >= 0x200000) {
|
if (capacity >= 0x200000) {
|
||||||
heads = 255;
|
heads = 255;
|
||||||
|
|
|
@ -460,12 +460,12 @@ static int tw_check_errors(TW_Device_Extension *tw_dev)
|
||||||
/* This function will empty the response que */
|
/* This function will empty the response que */
|
||||||
static void tw_empty_response_que(TW_Device_Extension *tw_dev)
|
static void tw_empty_response_que(TW_Device_Extension *tw_dev)
|
||||||
{
|
{
|
||||||
u32 status_reg_value, response_que_value;
|
u32 status_reg_value;
|
||||||
|
|
||||||
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
|
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
|
||||||
|
|
||||||
while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
|
while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
|
||||||
response_que_value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
|
inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
|
||||||
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
|
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
|
||||||
}
|
}
|
||||||
} /* End tw_empty_response_que() */
|
} /* End tw_empty_response_que() */
|
||||||
|
@ -1342,10 +1342,8 @@ static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev
|
||||||
sector_t capacity, int geom[])
|
sector_t capacity, int geom[])
|
||||||
{
|
{
|
||||||
int heads, sectors, cylinders;
|
int heads, sectors, cylinders;
|
||||||
TW_Device_Extension *tw_dev;
|
|
||||||
|
|
||||||
dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam()\n");
|
dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam()\n");
|
||||||
tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
|
|
||||||
|
|
||||||
heads = 64;
|
heads = 64;
|
||||||
sectors = 32;
|
sectors = 32;
|
||||||
|
|
|
@ -980,9 +980,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
|
||||||
NCR_700_set_tag_neg_state(SCp->device,
|
NCR_700_set_tag_neg_state(SCp->device,
|
||||||
NCR_700_FINISHED_TAG_NEGOTIATION);
|
NCR_700_FINISHED_TAG_NEGOTIATION);
|
||||||
|
|
||||||
/* check for contingent allegiance contitions */
|
/* check for contingent allegiance conditions */
|
||||||
if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
|
if (hostdata->status[0] >> 1 == CHECK_CONDITION ||
|
||||||
status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
|
hostdata->status[0] >> 1 == COMMAND_TERMINATED) {
|
||||||
struct NCR_700_command_slot *slot =
|
struct NCR_700_command_slot *slot =
|
||||||
(struct NCR_700_command_slot *)SCp->host_scribble;
|
(struct NCR_700_command_slot *)SCp->host_scribble;
|
||||||
if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
|
if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
|
||||||
|
|
|
@ -3426,7 +3426,7 @@ Target Requested Completed Requested Completed Requested Completed\n\
|
||||||
/*
|
/*
|
||||||
blogic_msg prints Driver Messages.
|
blogic_msg prints Driver Messages.
|
||||||
*/
|
*/
|
||||||
|
__printf(2, 4)
|
||||||
static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
|
static void blogic_msg(enum blogic_msglevel msglevel, char *fmt,
|
||||||
struct blogic_adapter *adapter, ...)
|
struct blogic_adapter *adapter, ...)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1615,7 +1615,6 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
|
||||||
|
|
||||||
unsigned char thisCard;
|
unsigned char thisCard;
|
||||||
CALL_BK_FN callback;
|
CALL_BK_FN callback;
|
||||||
unsigned char TID;
|
|
||||||
struct sccb *pSaveSCCB;
|
struct sccb *pSaveSCCB;
|
||||||
struct sccb_mgr_tar_info *currTar_Info;
|
struct sccb_mgr_tar_info *currTar_Info;
|
||||||
|
|
||||||
|
@ -1652,9 +1651,6 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
|
||||||
}
|
}
|
||||||
|
|
||||||
else {
|
else {
|
||||||
|
|
||||||
TID = p_Sccb->TargID;
|
|
||||||
|
|
||||||
if (p_Sccb->Sccb_tag) {
|
if (p_Sccb->Sccb_tag) {
|
||||||
MDISABLE_INT(ioport);
|
MDISABLE_INT(ioport);
|
||||||
if (((struct sccb_card *)pCurrCard)->
|
if (((struct sccb_card *)pCurrCard)->
|
||||||
|
@ -4534,7 +4530,7 @@ static void FPT_phaseBusFree(u32 port, unsigned char p_card)
|
||||||
*
|
*
|
||||||
* Function: Auto Load Default Map
|
* Function: Auto Load Default Map
|
||||||
*
|
*
|
||||||
* Description: Load the Automation RAM with the defualt map values.
|
* Description: Load the Automation RAM with the default map values.
|
||||||
*
|
*
|
||||||
*---------------------------------------------------------------------*/
|
*---------------------------------------------------------------------*/
|
||||||
static void FPT_autoLoadDefaultMap(u32 p_port)
|
static void FPT_autoLoadDefaultMap(u32 p_port)
|
||||||
|
|
|
@ -269,7 +269,7 @@ static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* orc_exec_sb - Queue an SCB with the HA
|
* orc_exec_scb - Queue an SCB with the HA
|
||||||
* @host: host adapter the SCB belongs to
|
* @host: host adapter the SCB belongs to
|
||||||
* @scb: SCB to queue for execution
|
* @scb: SCB to queue for execution
|
||||||
*/
|
*/
|
||||||
|
@ -586,7 +586,7 @@ static int orc_reset_scsi_bus(struct orc_host * host)
|
||||||
* orc_device_reset - device reset handler
|
* orc_device_reset - device reset handler
|
||||||
* @host: host to reset
|
* @host: host to reset
|
||||||
* @cmd: command causing the reset
|
* @cmd: command causing the reset
|
||||||
* @target; target device
|
* @target: target device
|
||||||
*
|
*
|
||||||
* Reset registers, reset a hanging bus and kill active and disconnected
|
* Reset registers, reset a hanging bus and kill active and disconnected
|
||||||
* commands for target w/o soft reset
|
* commands for target w/o soft reset
|
||||||
|
@ -727,7 +727,7 @@ static void orc_release_scb(struct orc_host *host, struct orc_scb *scb)
|
||||||
spin_unlock_irqrestore(&(host->allocation_lock), flags);
|
spin_unlock_irqrestore(&(host->allocation_lock), flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* orchid_abort_scb - abort a command
|
* orchid_abort_scb - abort a command
|
||||||
*
|
*
|
||||||
* Abort a queued command that has been passed to the firmware layer
|
* Abort a queued command that has been passed to the firmware layer
|
||||||
|
@ -902,7 +902,7 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* inia100_queue - queue command with host
|
* inia100_queue_lck - queue command with host
|
||||||
* @cmd: Command block
|
* @cmd: Command block
|
||||||
* @done: Completion function
|
* @done: Completion function
|
||||||
*
|
*
|
||||||
|
@ -1088,8 +1088,6 @@ static int inia100_probe_one(struct pci_dev *pdev,
|
||||||
unsigned long port, bios;
|
unsigned long port, bios;
|
||||||
int error = -ENODEV;
|
int error = -ENODEV;
|
||||||
u32 sz;
|
u32 sz;
|
||||||
unsigned long biosaddr;
|
|
||||||
char *bios_phys;
|
|
||||||
|
|
||||||
if (pci_enable_device(pdev))
|
if (pci_enable_device(pdev))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1139,9 +1137,6 @@ static int inia100_probe_one(struct pci_dev *pdev,
|
||||||
goto out_free_scb_array;
|
goto out_free_scb_array;
|
||||||
}
|
}
|
||||||
|
|
||||||
biosaddr = host->BIOScfg;
|
|
||||||
biosaddr = (biosaddr << 4);
|
|
||||||
bios_phys = phys_to_virt(biosaddr);
|
|
||||||
if (init_orchid(host)) { /* Initialize orchid chip */
|
if (init_orchid(host)) { /* Initialize orchid chip */
|
||||||
printk("inia100: initial orchid fail!!\n");
|
printk("inia100: initial orchid fail!!\n");
|
||||||
goto out_free_escb_array;
|
goto out_free_escb_array;
|
||||||
|
|
|
@ -786,8 +786,8 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aac_probe_container - query a logical volume
|
* aac_probe_container_callback1 - query a logical volume
|
||||||
* @scsicmd: the scsi command block
|
* @scsicmd: the scsi command block
|
||||||
*
|
*
|
||||||
* Queries the controller about the given volume. The volume information
|
* Queries the controller about the given volume. The volume information
|
||||||
* is updated in the struct fsa_dev_info structure rather than returned.
|
* is updated in the struct fsa_dev_info structure rather than returned.
|
||||||
|
@ -838,7 +838,7 @@ struct scsi_inq {
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* InqStrCopy - string merge
|
* inqstrcpy - string merge
|
||||||
* @a: string to copy from
|
* @a: string to copy from
|
||||||
* @b: string to copy to
|
* @b: string to copy to
|
||||||
*
|
*
|
||||||
|
@ -1804,7 +1804,7 @@ static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aac_get_safw_ciss_luns() Process topology change
|
* aac_get_safw_ciss_luns() - Process topology change
|
||||||
* @dev: aac_dev structure
|
* @dev: aac_dev structure
|
||||||
*
|
*
|
||||||
* Execute a CISS REPORT PHYS LUNS and process the results into
|
* Execute a CISS REPORT PHYS LUNS and process the results into
|
||||||
|
@ -1881,11 +1881,6 @@ static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
|
||||||
return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
|
return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
|
|
||||||
{
|
|
||||||
return dev->safw_phys_luns->lun[lun].node_ident[8];
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
|
static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
|
||||||
int bus, int target)
|
int bus, int target)
|
||||||
{
|
{
|
||||||
|
|
|
@ -472,7 +472,7 @@ static int check_revision(struct aac_dev *dev, void __user *arg)
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aac_send_raw_scb
|
* aac_send_raw_srb()
|
||||||
* @dev: adapter is being processed
|
* @dev: adapter is being processed
|
||||||
* @arg: arguments to the send call
|
* @arg: arguments to the send call
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -323,7 +323,7 @@ void aac_fib_init(struct fib *fibptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fib_deallocate - deallocate a fib
|
* fib_dealloc - deallocate a fib
|
||||||
* @fibptr: fib to deallocate
|
* @fibptr: fib to deallocate
|
||||||
*
|
*
|
||||||
* Will deallocate and return to the free pool the FIB pointed to by the
|
* Will deallocate and return to the free pool the FIB pointed to by the
|
||||||
|
@ -1950,7 +1950,7 @@ void aac_src_reinit_aif_worker(struct work_struct *work)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aac_handle_sa_aif Handle a message from the firmware
|
* aac_handle_sa_aif - Handle a message from the firmware
|
||||||
* @dev: Which adapter this fib is from
|
* @dev: Which adapter this fib is from
|
||||||
* @fibptr: Pointer to fibptr from adapter
|
* @fibptr: Pointer to fibptr from adapter
|
||||||
*
|
*
|
||||||
|
|
|
@ -532,7 +532,7 @@ int aac_rx_select_comm(struct aac_dev *dev, int comm)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* aac_rx_init - initialize an i960 based AAC card
|
* _aac_rx_init - initialize an i960 based AAC card
|
||||||
* @dev: device to configure
|
* @dev: device to configure
|
||||||
*
|
*
|
||||||
* Allocate and set up resources for the i960 based AAC variants. The
|
* Allocate and set up resources for the i960 based AAC variants. The
|
||||||
|
|
|
@ -1799,7 +1799,7 @@ typedef struct adv_req {
|
||||||
* Field naming convention:
|
* Field naming convention:
|
||||||
*
|
*
|
||||||
* *_able indicates both whether a feature should be enabled or disabled
|
* *_able indicates both whether a feature should be enabled or disabled
|
||||||
* and whether a device isi capable of the feature. At initialization
|
* and whether a device is capable of the feature. At initialization
|
||||||
* this field may be set, but later if a device is found to be incapable
|
* this field may be set, but later if a device is found to be incapable
|
||||||
* of the feature, the field is cleared.
|
* of the feature, the field is cleared.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
#ifdef ASD_DEBUG
|
#ifdef ASD_DEBUG
|
||||||
#define ASD_DPRINTK asd_printk
|
#define ASD_DPRINTK asd_printk
|
||||||
#else
|
#else
|
||||||
#define ASD_DPRINTK(fmt, ...)
|
#define ASD_DPRINTK(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* 2*ITNL timeout + 1 second */
|
/* 2*ITNL timeout + 1 second */
|
||||||
|
|
|
@ -720,154 +720,8 @@ static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq)
|
||||||
PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS);
|
PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* asd_dump_ddb_site -- dump a CSEQ DDB site
|
* asd_dump_seq_state -- dump CSEQ and LSEQ states
|
||||||
* @asd_ha: pointer to host adapter structure
|
|
||||||
* @site_no: site number of interest
|
|
||||||
*/
|
|
||||||
void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no)
|
|
||||||
{
|
|
||||||
if (site_no >= asd_ha->hw_prof.max_ddbs)
|
|
||||||
return;
|
|
||||||
|
|
||||||
#define DDB_FIELDB(__name) \
|
|
||||||
asd_ddbsite_read_byte(asd_ha, site_no, \
|
|
||||||
offsetof(struct asd_ddb_ssp_smp_target_port, __name))
|
|
||||||
#define DDB2_FIELDB(__name) \
|
|
||||||
asd_ddbsite_read_byte(asd_ha, site_no, \
|
|
||||||
offsetof(struct asd_ddb_stp_sata_target_port, __name))
|
|
||||||
#define DDB_FIELDW(__name) \
|
|
||||||
asd_ddbsite_read_word(asd_ha, site_no, \
|
|
||||||
offsetof(struct asd_ddb_ssp_smp_target_port, __name))
|
|
||||||
|
|
||||||
#define DDB_FIELDD(__name) \
|
|
||||||
asd_ddbsite_read_dword(asd_ha, site_no, \
|
|
||||||
offsetof(struct asd_ddb_ssp_smp_target_port, __name))
|
|
||||||
|
|
||||||
asd_printk("DDB: 0x%02x\n", site_no);
|
|
||||||
asd_printk("conn_type: 0x%02x\n", DDB_FIELDB(conn_type));
|
|
||||||
asd_printk("conn_rate: 0x%02x\n", DDB_FIELDB(conn_rate));
|
|
||||||
asd_printk("init_conn_tag: 0x%04x\n", be16_to_cpu(DDB_FIELDW(init_conn_tag)));
|
|
||||||
asd_printk("send_queue_head: 0x%04x\n", be16_to_cpu(DDB_FIELDW(send_queue_head)));
|
|
||||||
asd_printk("sq_suspended: 0x%02x\n", DDB_FIELDB(sq_suspended));
|
|
||||||
asd_printk("DDB Type: 0x%02x\n", DDB_FIELDB(ddb_type));
|
|
||||||
asd_printk("AWT Default: 0x%04x\n", DDB_FIELDW(awt_def));
|
|
||||||
asd_printk("compat_features: 0x%02x\n", DDB_FIELDB(compat_features));
|
|
||||||
asd_printk("Pathway Blocked Count: 0x%02x\n",
|
|
||||||
DDB_FIELDB(pathway_blocked_count));
|
|
||||||
asd_printk("arb_wait_time: 0x%04x\n", DDB_FIELDW(arb_wait_time));
|
|
||||||
asd_printk("more_compat_features: 0x%08x\n",
|
|
||||||
DDB_FIELDD(more_compat_features));
|
|
||||||
asd_printk("Conn Mask: 0x%02x\n", DDB_FIELDB(conn_mask));
|
|
||||||
asd_printk("flags: 0x%02x\n", DDB_FIELDB(flags));
|
|
||||||
asd_printk("flags2: 0x%02x\n", DDB2_FIELDB(flags2));
|
|
||||||
asd_printk("ExecQ Tail: 0x%04x\n",DDB_FIELDW(exec_queue_tail));
|
|
||||||
asd_printk("SendQ Tail: 0x%04x\n",DDB_FIELDW(send_queue_tail));
|
|
||||||
asd_printk("Active Task Count: 0x%04x\n",
|
|
||||||
DDB_FIELDW(active_task_count));
|
|
||||||
asd_printk("ITNL Reason: 0x%02x\n", DDB_FIELDB(itnl_reason));
|
|
||||||
asd_printk("ITNL Timeout Const: 0x%04x\n", DDB_FIELDW(itnl_timeout));
|
|
||||||
asd_printk("ITNL timestamp: 0x%08x\n", DDB_FIELDD(itnl_timestamp));
|
|
||||||
}
|
|
||||||
|
|
||||||
void asd_dump_ddb_0(struct asd_ha_struct *asd_ha)
|
|
||||||
{
|
|
||||||
#define DDB0_FIELDB(__name) \
|
|
||||||
asd_ddbsite_read_byte(asd_ha, 0, \
|
|
||||||
offsetof(struct asd_ddb_seq_shared, __name))
|
|
||||||
#define DDB0_FIELDW(__name) \
|
|
||||||
asd_ddbsite_read_word(asd_ha, 0, \
|
|
||||||
offsetof(struct asd_ddb_seq_shared, __name))
|
|
||||||
|
|
||||||
#define DDB0_FIELDD(__name) \
|
|
||||||
asd_ddbsite_read_dword(asd_ha,0 , \
|
|
||||||
offsetof(struct asd_ddb_seq_shared, __name))
|
|
||||||
|
|
||||||
#define DDB0_FIELDA(__name, _o) \
|
|
||||||
asd_ddbsite_read_byte(asd_ha, 0, \
|
|
||||||
offsetof(struct asd_ddb_seq_shared, __name)+_o)
|
|
||||||
|
|
||||||
|
|
||||||
asd_printk("DDB: 0\n");
|
|
||||||
asd_printk("q_free_ddb_head:%04x\n", DDB0_FIELDW(q_free_ddb_head));
|
|
||||||
asd_printk("q_free_ddb_tail:%04x\n", DDB0_FIELDW(q_free_ddb_tail));
|
|
||||||
asd_printk("q_free_ddb_cnt:%04x\n", DDB0_FIELDW(q_free_ddb_cnt));
|
|
||||||
asd_printk("q_used_ddb_head:%04x\n", DDB0_FIELDW(q_used_ddb_head));
|
|
||||||
asd_printk("q_used_ddb_tail:%04x\n", DDB0_FIELDW(q_used_ddb_tail));
|
|
||||||
asd_printk("shared_mem_lock:%04x\n", DDB0_FIELDW(shared_mem_lock));
|
|
||||||
asd_printk("smp_conn_tag:%04x\n", DDB0_FIELDW(smp_conn_tag));
|
|
||||||
asd_printk("est_nexus_buf_cnt:%04x\n", DDB0_FIELDW(est_nexus_buf_cnt));
|
|
||||||
asd_printk("est_nexus_buf_thresh:%04x\n",
|
|
||||||
DDB0_FIELDW(est_nexus_buf_thresh));
|
|
||||||
asd_printk("conn_not_active:%02x\n", DDB0_FIELDB(conn_not_active));
|
|
||||||
asd_printk("phy_is_up:%02x\n", DDB0_FIELDB(phy_is_up));
|
|
||||||
asd_printk("port_map_by_links:%02x %02x %02x %02x "
|
|
||||||
"%02x %02x %02x %02x\n",
|
|
||||||
DDB0_FIELDA(port_map_by_links, 0),
|
|
||||||
DDB0_FIELDA(port_map_by_links, 1),
|
|
||||||
DDB0_FIELDA(port_map_by_links, 2),
|
|
||||||
DDB0_FIELDA(port_map_by_links, 3),
|
|
||||||
DDB0_FIELDA(port_map_by_links, 4),
|
|
||||||
DDB0_FIELDA(port_map_by_links, 5),
|
|
||||||
DDB0_FIELDA(port_map_by_links, 6),
|
|
||||||
DDB0_FIELDA(port_map_by_links, 7));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void asd_dump_scb_site(struct asd_ha_struct *asd_ha, u16 site_no)
|
|
||||||
{
|
|
||||||
|
|
||||||
#define SCB_FIELDB(__name) \
|
|
||||||
asd_scbsite_read_byte(asd_ha, site_no, sizeof(struct scb_header) \
|
|
||||||
+ offsetof(struct initiate_ssp_task, __name))
|
|
||||||
#define SCB_FIELDW(__name) \
|
|
||||||
asd_scbsite_read_word(asd_ha, site_no, sizeof(struct scb_header) \
|
|
||||||
+ offsetof(struct initiate_ssp_task, __name))
|
|
||||||
#define SCB_FIELDD(__name) \
|
|
||||||
asd_scbsite_read_dword(asd_ha, site_no, sizeof(struct scb_header) \
|
|
||||||
+ offsetof(struct initiate_ssp_task, __name))
|
|
||||||
|
|
||||||
asd_printk("Total Xfer Len: 0x%08x.\n", SCB_FIELDD(total_xfer_len));
|
|
||||||
asd_printk("Frame Type: 0x%02x.\n", SCB_FIELDB(ssp_frame.frame_type));
|
|
||||||
asd_printk("Tag: 0x%04x.\n", SCB_FIELDW(ssp_frame.tag));
|
|
||||||
asd_printk("Target Port Xfer Tag: 0x%04x.\n",
|
|
||||||
SCB_FIELDW(ssp_frame.tptt));
|
|
||||||
asd_printk("Data Offset: 0x%08x.\n", SCB_FIELDW(ssp_frame.data_offs));
|
|
||||||
asd_printk("Retry Count: 0x%02x.\n", SCB_FIELDB(retry_count));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* asd_dump_scb_sites -- dump currently used CSEQ SCB sites
|
|
||||||
* @asd_ha: pointer to host adapter struct
|
|
||||||
*/
|
|
||||||
void asd_dump_scb_sites(struct asd_ha_struct *asd_ha)
|
|
||||||
{
|
|
||||||
u16 site_no;
|
|
||||||
|
|
||||||
for (site_no = 0; site_no < asd_ha->hw_prof.max_scbs; site_no++) {
|
|
||||||
u8 opcode;
|
|
||||||
|
|
||||||
if (!SCB_SITE_VALID(site_no))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* We are only interested in SCB sites currently used.
|
|
||||||
*/
|
|
||||||
opcode = asd_scbsite_read_byte(asd_ha, site_no,
|
|
||||||
offsetof(struct scb_header,
|
|
||||||
opcode));
|
|
||||||
if (opcode == 0xFF)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
asd_printk("\nSCB: 0x%x\n", site_no);
|
|
||||||
asd_dump_scb_site(asd_ha, site_no);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* 0 */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ads_dump_seq_state -- dump CSEQ and LSEQ states
|
|
||||||
* @asd_ha: pointer to host adapter structure
|
* @asd_ha: pointer to host adapter structure
|
||||||
* @lseq_mask: mask of LSEQs of interest
|
* @lseq_mask: mask of LSEQs of interest
|
||||||
*/
|
*/
|
||||||
|
@ -908,42 +762,4 @@ void asd_dump_frame_rcvd(struct asd_phy *phy,
|
||||||
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
|
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
|
|
||||||
static void asd_dump_scb(struct asd_ascb *ascb, int ind)
|
|
||||||
{
|
|
||||||
asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, "
|
|
||||||
"index:%d, opcode:0x%02x\n",
|
|
||||||
ind, ascb->dma_scb.vaddr,
|
|
||||||
(unsigned long long)ascb->dma_scb.dma_handle,
|
|
||||||
(unsigned long long)
|
|
||||||
le64_to_cpu(ascb->scb->header.next_scb),
|
|
||||||
le16_to_cpu(ascb->scb->header.index),
|
|
||||||
ascb->scb->header.opcode);
|
|
||||||
}
|
|
||||||
|
|
||||||
void asd_dump_scb_list(struct asd_ascb *ascb, int num)
|
|
||||||
{
|
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
asd_printk("dumping %d scbs:\n", num);
|
|
||||||
|
|
||||||
asd_dump_scb(ascb, i++);
|
|
||||||
--num;
|
|
||||||
|
|
||||||
if (num > 0 && !list_empty(&ascb->list)) {
|
|
||||||
struct list_head *el;
|
|
||||||
|
|
||||||
list_for_each(el, &ascb->list) {
|
|
||||||
struct asd_ascb *s = list_entry(el, struct asd_ascb,
|
|
||||||
list);
|
|
||||||
asd_dump_scb(s, i++);
|
|
||||||
if (--num <= 0)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* 0 */
|
|
||||||
|
|
||||||
#endif /* ASD_DEBUG */
|
#endif /* ASD_DEBUG */
|
||||||
|
|
|
@ -903,7 +903,7 @@ static void asd_dch_sas_isr(struct asd_ha_struct *asd_ha)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ads_rbi_exsi_isr -- process external system interface interrupt (INITERR)
|
* asd_rbi_exsi_isr -- process external system interface interrupt (INITERR)
|
||||||
* @asd_ha: pointer to host adapter structure
|
* @asd_ha: pointer to host adapter structure
|
||||||
*/
|
*/
|
||||||
static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
|
static void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha)
|
||||||
|
@ -1144,7 +1144,7 @@ static void asd_swap_head_scb(struct asd_ha_struct *asd_ha,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* asd_start_timers -- (add and) start timers of SCBs
|
* asd_start_scb_timers -- (add and) start timers of SCBs
|
||||||
* @list: pointer to struct list_head of the scbs
|
* @list: pointer to struct list_head of the scbs
|
||||||
*
|
*
|
||||||
* If an SCB in the @list has no timer function, assign the default
|
* If an SCB in the @list has no timer function, assign the default
|
||||||
|
|
|
@ -1244,7 +1244,7 @@ int asd_chk_write_status(struct asd_ha_struct *asd_ha,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* asd_hwi_erase_nv_sector - Erase the flash memory sectors.
|
* asd_erase_nv_sector - Erase the flash memory sectors.
|
||||||
* @asd_ha: pointer to the host adapter structure
|
* @asd_ha: pointer to the host adapter structure
|
||||||
* @flash_addr: pointer to offset from flash memory
|
* @flash_addr: pointer to offset from flash memory
|
||||||
* @size: total bytes to erase.
|
* @size: total bytes to erase.
|
||||||
|
|
|
@ -612,7 +612,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* atp870u_queuecommand - Queue SCSI command
|
* atp870u_queuecommand_lck - Queue SCSI command
|
||||||
* @req_p: request block
|
* @req_p: request block
|
||||||
* @done: completion function
|
* @done: completion function
|
||||||
*
|
*
|
||||||
|
@ -711,16 +711,15 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
|
||||||
|
|
||||||
static DEF_SCSI_QCMD(atp870u_queuecommand)
|
static DEF_SCSI_QCMD(atp870u_queuecommand)
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* send_s870 - send a command to the controller
|
* send_s870 - send a command to the controller
|
||||||
* @host: host
|
|
||||||
*
|
*
|
||||||
* On entry there is work queued to be done. We move some of that work to the
|
* On entry there is work queued to be done. We move some of that work to the
|
||||||
* controller itself.
|
* controller itself.
|
||||||
*
|
*
|
||||||
* Caller holds the host lock.
|
* Caller holds the host lock.
|
||||||
*/
|
*/
|
||||||
static void send_s870(struct atp_unit *dev,unsigned char c)
|
static void send_s870(struct atp_unit *dev, unsigned char c)
|
||||||
{
|
{
|
||||||
struct scsi_cmnd *workreq = NULL;
|
struct scsi_cmnd *workreq = NULL;
|
||||||
unsigned int i;//,k;
|
unsigned int i;//,k;
|
||||||
|
|
|
@ -295,7 +295,7 @@ void beiscsi_iface_destroy_default(struct beiscsi_hba *phba)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* beiscsi_set_vlan_tag()- Set the VLAN TAG
|
* beiscsi_iface_config_vlan()- Set the VLAN TAG
|
||||||
* @shost: Scsi Host for the driver instance
|
* @shost: Scsi Host for the driver instance
|
||||||
* @iface_param: Interface paramters
|
* @iface_param: Interface paramters
|
||||||
*
|
*
|
||||||
|
|
|
@ -4926,13 +4926,13 @@ void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
|
||||||
schedule_work(&phba->boot_work);
|
schedule_work(&phba->boot_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
|
||||||
|
/*
|
||||||
|
* beiscsi_show_boot_tgt_info()
|
||||||
* Boot flag info for iscsi-utilities
|
* Boot flag info for iscsi-utilities
|
||||||
* Bit 0 Block valid flag
|
* Bit 0 Block valid flag
|
||||||
* Bit 1 Firmware booting selected
|
* Bit 1 Firmware booting selected
|
||||||
*/
|
*/
|
||||||
#define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
|
|
||||||
|
|
||||||
static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
|
static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
|
||||||
{
|
{
|
||||||
struct beiscsi_hba *phba = data;
|
struct beiscsi_hba *phba = data;
|
||||||
|
|
|
@ -1256,7 +1256,7 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* beiscsi_phys_port()- Display Physical Port Identifier
|
* beiscsi_phys_port_disp()- Display Physical Port Identifier
|
||||||
* @dev: ptr to device not used.
|
* @dev: ptr to device not used.
|
||||||
* @attr: device attribute, not used.
|
* @attr: device attribute, not used.
|
||||||
* @buf: contains formatted text port identifier
|
* @buf: contains formatted text port identifier
|
||||||
|
|
|
@ -1193,7 +1193,7 @@ enum {
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* defintions for CT reason code
|
* definitions for CT reason code
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
CT_RSN_INV_CMD = 0x01,
|
CT_RSN_INV_CMD = 0x01,
|
||||||
|
@ -1240,7 +1240,7 @@ enum {
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* defintions for the explanation code for all servers
|
* definitions for the explanation code for all servers
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
CT_EXP_AUTH_EXCEPTION = 0xF1,
|
CT_EXP_AUTH_EXCEPTION = 0xF1,
|
||||||
|
|
|
@ -217,9 +217,6 @@ struct bfa_vf_event_s {
|
||||||
u32 undefined;
|
u32 undefined;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bfa_fcs_s;
|
|
||||||
struct bfa_fcs_fabric_s;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @todo : need to move to a global config file.
|
* @todo : need to move to a global config file.
|
||||||
* Maximum Rports supported per port (physical/logical).
|
* Maximum Rports supported per port (physical/logical).
|
||||||
|
|
|
@ -1408,7 +1408,7 @@ static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg,
|
||||||
u32 resid_len,
|
u32 resid_len,
|
||||||
struct fchs_s *rsp_fchs);
|
struct fchs_s *rsp_fchs);
|
||||||
static void bfa_fcs_lport_fdmi_timeout(void *arg);
|
static void bfa_fcs_lport_fdmi_timeout(void *arg);
|
||||||
static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
|
static int bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
|
||||||
u8 *pyld);
|
u8 *pyld);
|
||||||
static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
|
static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
|
||||||
u8 *pyld);
|
u8 *pyld);
|
||||||
|
@ -1887,6 +1887,8 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
||||||
bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
|
bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
|
||||||
(u8 *) ((struct ct_hdr_s *) pyld
|
(u8 *) ((struct ct_hdr_s *) pyld
|
||||||
+ 1));
|
+ 1));
|
||||||
|
if (attr_len < 0)
|
||||||
|
return;
|
||||||
|
|
||||||
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
|
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
|
||||||
FC_CLASS_3, (len + attr_len), &fchs,
|
FC_CLASS_3, (len + attr_len), &fchs,
|
||||||
|
@ -1896,17 +1898,20 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
||||||
bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
|
bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u16
|
static int
|
||||||
bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
|
bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
|
||||||
{
|
{
|
||||||
struct bfa_fcs_lport_s *port = fdmi->ms->port;
|
struct bfa_fcs_lport_s *port = fdmi->ms->port;
|
||||||
struct bfa_fcs_fdmi_hba_attr_s hba_attr;
|
struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr;
|
||||||
struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr;
|
|
||||||
struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
|
struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
|
||||||
struct fdmi_attr_s *attr;
|
struct fdmi_attr_s *attr;
|
||||||
|
int len;
|
||||||
u8 *curr_ptr;
|
u8 *curr_ptr;
|
||||||
u16 len, count;
|
u16 templen, count;
|
||||||
u16 templen;
|
|
||||||
|
fcs_hba_attr = kzalloc(sizeof(*fcs_hba_attr), GFP_KERNEL);
|
||||||
|
if (!fcs_hba_attr)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* get hba attributes
|
* get hba attributes
|
||||||
|
@ -2148,6 +2153,9 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
|
||||||
len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
|
len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
|
||||||
|
|
||||||
rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
|
rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
|
||||||
|
|
||||||
|
kfree(fcs_hba_attr);
|
||||||
|
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3409,7 +3409,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
|
||||||
|
|
||||||
drv_fcxp->port = fcs_port->bfad_port;
|
drv_fcxp->port = fcs_port->bfad_port;
|
||||||
|
|
||||||
if (drv_fcxp->port->bfad == 0)
|
if (!drv_fcxp->port->bfad)
|
||||||
drv_fcxp->port->bfad = bfad;
|
drv_fcxp->port->bfad = bfad;
|
||||||
|
|
||||||
/* Fetch the bfa_rport - if nexus needed */
|
/* Fetch the bfa_rport - if nexus needed */
|
||||||
|
|
|
@ -1796,7 +1796,7 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
|
||||||
/**
|
/**
|
||||||
* bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
|
* bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
|
||||||
*
|
*
|
||||||
* @handle: transport handle pointing to adapter struture
|
* @handle: transport handle pointing to adapter structure
|
||||||
*/
|
*/
|
||||||
static int bnx2fc_ulp_get_stats(void *handle)
|
static int bnx2fc_ulp_get_stats(void *handle)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1331,7 +1331,7 @@ static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bnx2fc_indicae_kcqe - process KCQE
|
* bnx2fc_indicate_kcqe() - process KCQE
|
||||||
*
|
*
|
||||||
* @context: adapter structure pointer
|
* @context: adapter structure pointer
|
||||||
* @kcq: kcqe pointer
|
* @kcq: kcqe pointer
|
||||||
|
|
|
@ -819,7 +819,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bnx2i_free_session_resc - free qp resources for the session
|
* bnx2fc_free_session_resc - free qp resources for the session
|
||||||
*
|
*
|
||||||
* @hba: adapter structure pointer
|
* @hba: adapter structure pointer
|
||||||
* @tgt: bnx2fc_rport structure pointer
|
* @tgt: bnx2fc_rport structure pointer
|
||||||
|
|
|
@ -2206,10 +2206,8 @@ static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
|
||||||
{
|
{
|
||||||
struct bnx2i_conn *bnx2i_conn;
|
struct bnx2i_conn *bnx2i_conn;
|
||||||
u32 iscsi_cid;
|
u32 iscsi_cid;
|
||||||
char warn_notice[] = "iscsi_warning";
|
const char *additional_notice = "";
|
||||||
char error_notice[] = "iscsi_error";
|
const char *message;
|
||||||
char additional_notice[64];
|
|
||||||
char *message;
|
|
||||||
int need_recovery;
|
int need_recovery;
|
||||||
u64 err_mask64;
|
u64 err_mask64;
|
||||||
|
|
||||||
|
@ -2224,133 +2222,132 @@ static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
|
||||||
|
|
||||||
if (err_mask64 & iscsi_error_mask) {
|
if (err_mask64 & iscsi_error_mask) {
|
||||||
need_recovery = 0;
|
need_recovery = 0;
|
||||||
message = warn_notice;
|
message = "iscsi_warning";
|
||||||
} else {
|
} else {
|
||||||
need_recovery = 1;
|
need_recovery = 1;
|
||||||
message = error_notice;
|
message = "iscsi_error";
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (iscsi_err->completion_status) {
|
switch (iscsi_err->completion_status) {
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
|
case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
|
||||||
strcpy(additional_notice, "hdr digest err");
|
additional_notice = "hdr digest err";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
|
case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
|
||||||
strcpy(additional_notice, "data digest err");
|
additional_notice = "data digest err";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
|
||||||
strcpy(additional_notice, "wrong opcode rcvd");
|
additional_notice = "wrong opcode rcvd";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
|
||||||
strcpy(additional_notice, "AHS len > 0 rcvd");
|
additional_notice = "AHS len > 0 rcvd";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
|
||||||
strcpy(additional_notice, "invalid ITT rcvd");
|
additional_notice = "invalid ITT rcvd";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
|
||||||
strcpy(additional_notice, "wrong StatSN rcvd");
|
additional_notice = "wrong StatSN rcvd";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
|
||||||
strcpy(additional_notice, "wrong DataSN rcvd");
|
additional_notice = "wrong DataSN rcvd";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
|
||||||
strcpy(additional_notice, "pend R2T violation");
|
additional_notice = "pend R2T violation";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
|
||||||
strcpy(additional_notice, "ERL0, UO");
|
additional_notice = "ERL0, UO";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
|
||||||
strcpy(additional_notice, "ERL0, U1");
|
additional_notice = "ERL0, U1";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
|
||||||
strcpy(additional_notice, "ERL0, U2");
|
additional_notice = "ERL0, U2";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
|
||||||
strcpy(additional_notice, "ERL0, U3");
|
additional_notice = "ERL0, U3";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
|
||||||
strcpy(additional_notice, "ERL0, U4");
|
additional_notice = "ERL0, U4";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
|
||||||
strcpy(additional_notice, "ERL0, U5");
|
additional_notice = "ERL0, U5";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
|
||||||
strcpy(additional_notice, "ERL0, U6");
|
additional_notice = "ERL0, U6";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
|
||||||
strcpy(additional_notice, "invalid resi len");
|
additional_notice = "invalid resi len";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
|
||||||
strcpy(additional_notice, "MRDSL violation");
|
additional_notice = "MRDSL violation";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
|
||||||
strcpy(additional_notice, "F-bit not set");
|
additional_notice = "F-bit not set";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
|
||||||
strcpy(additional_notice, "invalid TTT");
|
additional_notice = "invalid TTT";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
|
||||||
strcpy(additional_notice, "invalid DataSN");
|
additional_notice = "invalid DataSN";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
|
||||||
strcpy(additional_notice, "burst len violation");
|
additional_notice = "burst len violation";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
|
||||||
strcpy(additional_notice, "buf offset violation");
|
additional_notice = "buf offset violation";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
|
||||||
strcpy(additional_notice, "invalid LUN field");
|
additional_notice = "invalid LUN field";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
|
||||||
strcpy(additional_notice, "invalid R2TSN field");
|
additional_notice = "invalid R2TSN field";
|
||||||
break;
|
break;
|
||||||
#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
|
#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \
|
||||||
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
|
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
|
||||||
case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
|
case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
|
||||||
strcpy(additional_notice, "invalid cmd len1");
|
additional_notice = "invalid cmd len1";
|
||||||
break;
|
break;
|
||||||
#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
|
#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \
|
||||||
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
|
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
|
||||||
case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
|
case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
|
||||||
strcpy(additional_notice, "invalid cmd len2");
|
additional_notice = "invalid cmd len2";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
|
||||||
strcpy(additional_notice,
|
additional_notice = "pend r2t exceeds MaxOutstandingR2T value";
|
||||||
"pend r2t exceeds MaxOutstandingR2T value");
|
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
|
||||||
strcpy(additional_notice, "TTT is rsvd");
|
additional_notice = "TTT is rsvd";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
|
||||||
strcpy(additional_notice, "MBL violation");
|
additional_notice = "MBL violation";
|
||||||
break;
|
break;
|
||||||
#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
|
#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \
|
||||||
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
|
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
|
||||||
case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
|
case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
|
||||||
strcpy(additional_notice, "data seg len != 0");
|
additional_notice = "data seg len != 0";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
|
||||||
strcpy(additional_notice, "reject pdu len error");
|
additional_notice = "reject pdu len error";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
|
||||||
strcpy(additional_notice, "async pdu len error");
|
additional_notice = "async pdu len error";
|
||||||
break;
|
break;
|
||||||
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
|
case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
|
||||||
strcpy(additional_notice, "nopin pdu len error");
|
additional_notice = "nopin pdu len error";
|
||||||
break;
|
break;
|
||||||
#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
|
#define BNX2_ERR_PEND_R2T_IN_CLEANUP \
|
||||||
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
|
ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
|
||||||
case BNX2_ERR_PEND_R2T_IN_CLEANUP:
|
case BNX2_ERR_PEND_R2T_IN_CLEANUP:
|
||||||
strcpy(additional_notice, "pend r2t in cleanup");
|
additional_notice = "pend r2t in cleanup";
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
|
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
|
||||||
strcpy(additional_notice, "IP fragments rcvd");
|
additional_notice = "IP fragments rcvd";
|
||||||
break;
|
break;
|
||||||
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
|
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
|
||||||
strcpy(additional_notice, "IP options error");
|
additional_notice = "IP options error";
|
||||||
break;
|
break;
|
||||||
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
|
case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
|
||||||
strcpy(additional_notice, "urgent flag error");
|
additional_notice = "urgent flag error";
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
printk(KERN_ALERT "iscsi_err - unknown err %x\n",
|
printk(KERN_ALERT "iscsi_err - unknown err %x\n",
|
||||||
|
|
|
@ -104,7 +104,7 @@ static ssize_t bnx2i_show_ccell_info(struct device *dev,
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bnx2i_get_link_state - set command cell (HQ) size
|
* bnx2i_set_ccell_info - set command cell (HQ) size
|
||||||
* @dev: device pointer
|
* @dev: device pointer
|
||||||
* @attr: device attribute (unused)
|
* @attr: device attribute (unused)
|
||||||
* @buf: buffer to return current SQ size parameter
|
* @buf: buffer to return current SQ size parameter
|
||||||
|
|
|
@ -244,7 +244,7 @@ csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
|
||||||
*
|
*
|
||||||
* Reads/writes an [almost] arbitrary memory region in the firmware: the
|
* Reads/writes an [almost] arbitrary memory region in the firmware: the
|
||||||
* firmware memory address, length and host buffer must be aligned on
|
* firmware memory address, length and host buffer must be aligned on
|
||||||
* 32-bit boudaries. The memory is transferred as a raw byte sequence
|
* 32-bit boundaries. The memory is transferred as a raw byte sequence
|
||||||
* from/to the firmware's memory. If this memory contains data
|
* from/to the firmware's memory. If this memory contains data
|
||||||
* structures which contain multi-byte integers, it's the callers
|
* structures which contain multi-byte integers, it's the callers
|
||||||
* responsibility to perform appropriate byte order conversions.
|
* responsibility to perform appropriate byte order conversions.
|
||||||
|
|
|
@ -147,9 +147,9 @@ csio_scsi_itnexus_loss_error(uint16_t error)
|
||||||
case FW_ERR_RDEV_LOST:
|
case FW_ERR_RDEV_LOST:
|
||||||
case FW_ERR_RDEV_LOGO:
|
case FW_ERR_RDEV_LOGO:
|
||||||
case FW_ERR_RDEV_IMPL_LOGO:
|
case FW_ERR_RDEV_IMPL_LOGO:
|
||||||
return 1;
|
return true;
|
||||||
}
|
}
|
||||||
return 0;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1177,7 +1177,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cxgb3i_setup_conn_digest - setup conn. digest setting
|
* ddp_setup_conn_digest - setup conn. digest setting
|
||||||
* @csk: cxgb tcp socket
|
* @csk: cxgb tcp socket
|
||||||
* @tid: connection id
|
* @tid: connection id
|
||||||
* @hcrc: header digest enabled
|
* @hcrc: header digest enabled
|
||||||
|
|
|
@ -1357,7 +1357,7 @@ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* process_hrrq() - process the read-response queue
|
* process_hrrq() - process the read-response queue
|
||||||
* @afu: AFU associated with the host.
|
* @hwq: HWQ associated with the host.
|
||||||
* @doneq: Queue of commands harvested from the RRQ.
|
* @doneq: Queue of commands harvested from the RRQ.
|
||||||
* @budget: Threshold of RRQ entries to process.
|
* @budget: Threshold of RRQ entries to process.
|
||||||
*
|
*
|
||||||
|
@ -1997,7 +1997,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
|
||||||
/**
|
/**
|
||||||
* init_mc() - create and register as the master context
|
* init_mc() - create and register as the master context
|
||||||
* @cfg: Internal structure associated with the host.
|
* @cfg: Internal structure associated with the host.
|
||||||
* index: HWQ Index of the master context.
|
* @index: HWQ Index of the master context.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, -errno on failure
|
* Return: 0 on success, -errno on failure
|
||||||
*/
|
*/
|
||||||
|
@ -3294,7 +3294,7 @@ static char *decode_hioctl(unsigned int cmd)
|
||||||
/**
|
/**
|
||||||
* cxlflash_lun_provision() - host LUN provisioning handler
|
* cxlflash_lun_provision() - host LUN provisioning handler
|
||||||
* @cfg: Internal structure associated with the host.
|
* @cfg: Internal structure associated with the host.
|
||||||
* @arg: Kernel copy of userspace ioctl data structure.
|
* @lunprov: Kernel copy of userspace ioctl data structure.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, -errno on failure
|
* Return: 0 on success, -errno on failure
|
||||||
*/
|
*/
|
||||||
|
@ -3385,7 +3385,7 @@ static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
|
||||||
/**
|
/**
|
||||||
* cxlflash_afu_debug() - host AFU debug handler
|
* cxlflash_afu_debug() - host AFU debug handler
|
||||||
* @cfg: Internal structure associated with the host.
|
* @cfg: Internal structure associated with the host.
|
||||||
* @arg: Kernel copy of userspace ioctl data structure.
|
* @afu_dbg: Kernel copy of userspace ioctl data structure.
|
||||||
*
|
*
|
||||||
* For debug requests requiring a data buffer, always provide an aligned
|
* For debug requests requiring a data buffer, always provide an aligned
|
||||||
* (cache line) buffer to the AFU to appease any alignment requirements.
|
* (cache line) buffer to the AFU to appease any alignment requirements.
|
||||||
|
|
|
@ -30,7 +30,7 @@ struct cxlflash_global global;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* marshal_rele_to_resize() - translate release to resize structure
|
* marshal_rele_to_resize() - translate release to resize structure
|
||||||
* @rele: Source structure from which to translate/copy.
|
* @release: Source structure from which to translate/copy.
|
||||||
* @resize: Destination structure for the translate/copy.
|
* @resize: Destination structure for the translate/copy.
|
||||||
*/
|
*/
|
||||||
static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
|
static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
|
||||||
|
@ -44,7 +44,7 @@ static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
|
||||||
/**
|
/**
|
||||||
* marshal_det_to_rele() - translate detach to release structure
|
* marshal_det_to_rele() - translate detach to release structure
|
||||||
* @detach: Destination structure for the translate/copy.
|
* @detach: Destination structure for the translate/copy.
|
||||||
* @rele: Source structure from which to translate/copy.
|
* @release: Source structure from which to translate/copy.
|
||||||
*/
|
*/
|
||||||
static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
|
static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
|
||||||
struct dk_cxlflash_release *release)
|
struct dk_cxlflash_release *release)
|
||||||
|
@ -517,7 +517,7 @@ void rhte_checkin(struct ctx_info *ctxi,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rhte_format1() - populates a RHTE for format 1
|
* rht_format1() - populates a RHTE for format 1
|
||||||
* @rhte: RHTE to populate.
|
* @rhte: RHTE to populate.
|
||||||
* @lun_id: LUN ID of LUN associated with RHTE.
|
* @lun_id: LUN ID of LUN associated with RHTE.
|
||||||
* @perm: Desired permissions for RHTE.
|
* @perm: Desired permissions for RHTE.
|
||||||
|
|
|
@ -41,7 +41,7 @@ static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
|
||||||
/**
|
/**
|
||||||
* marshal_clone_to_rele() - translate clone to release structure
|
* marshal_clone_to_rele() - translate clone to release structure
|
||||||
* @clone: Source structure from which to translate/copy.
|
* @clone: Source structure from which to translate/copy.
|
||||||
* @rele: Destination structure for the translate/copy.
|
* @release: Destination structure for the translate/copy.
|
||||||
*/
|
*/
|
||||||
static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
|
static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
|
||||||
struct dk_cxlflash_release *release)
|
struct dk_cxlflash_release *release)
|
||||||
|
@ -229,7 +229,7 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* validate_alloc() - validates the specified block has been allocated
|
* validate_alloc() - validates the specified block has been allocated
|
||||||
* @ba_lun_info: LUN info owning the block allocator.
|
* @bali: LUN info owning the block allocator.
|
||||||
* @aun: Block to validate.
|
* @aun: Block to validate.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, -1 on failure
|
* Return: 0 on success, -1 on failure
|
||||||
|
@ -300,7 +300,7 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free)
|
||||||
/**
|
/**
|
||||||
* ba_clone() - Clone a chunk of the block allocation table
|
* ba_clone() - Clone a chunk of the block allocation table
|
||||||
* @ba_lun: Block allocator from which to allocate a block.
|
* @ba_lun: Block allocator from which to allocate a block.
|
||||||
* @to_free: Block to free.
|
* @to_clone: Block to clone.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, -1 on failure
|
* Return: 0 on success, -1 on failure
|
||||||
*/
|
*/
|
||||||
|
@ -361,7 +361,7 @@ void cxlflash_ba_terminate(struct ba_lun *ba_lun)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init_vlun() - initializes a LUN for virtual use
|
* init_vlun() - initializes a LUN for virtual use
|
||||||
* @lun_info: LUN information structure that owns the block allocator.
|
* @lli: LUN information structure that owns the block allocator.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, -errno on failure
|
* Return: 0 on success, -errno on failure
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -958,7 +958,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dc395x_queue_command - queue scsi command passed from the mid
|
* dc395x_queue_command_lck - queue scsi command passed from the mid
|
||||||
* layer, invoke 'done' on completion
|
* layer, invoke 'done' on completion
|
||||||
*
|
*
|
||||||
* @cmd: pointer to scsi command object
|
* @cmd: pointer to scsi command object
|
||||||
|
@ -2918,7 +2918,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
|
||||||
} else {
|
} else {
|
||||||
if ((srb->state & (SRB_START_ + SRB_MSGOUT))
|
if ((srb->state & (SRB_START_ + SRB_MSGOUT))
|
||||||
|| !(srb->
|
|| !(srb->
|
||||||
state & (SRB_DISCONNECT + SRB_COMPLETED))) {
|
state & (SRB_DISCONNECT | SRB_COMPLETED))) {
|
||||||
/*
|
/*
|
||||||
* Selection time out
|
* Selection time out
|
||||||
* SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
|
* SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
|
||||||
|
@ -3258,10 +3258,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
||||||
/*
|
/*
|
||||||
* target status..........................
|
* target status..........................
|
||||||
*/
|
*/
|
||||||
if (status_byte(status) == CHECK_CONDITION) {
|
if (status >> 1 == CHECK_CONDITION) {
|
||||||
request_sense(acb, dcb, srb);
|
request_sense(acb, dcb, srb);
|
||||||
return;
|
return;
|
||||||
} else if (status_byte(status) == QUEUE_FULL) {
|
} else if (status >> 1 == QUEUE_FULL) {
|
||||||
tempcnt = (u8)list_size(&dcb->srb_going_list);
|
tempcnt = (u8)list_size(&dcb->srb_going_list);
|
||||||
dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
|
dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
|
||||||
dcb->target_id, dcb->target_lun, tempcnt);
|
dcb->target_id, dcb->target_lun, tempcnt);
|
||||||
|
@ -4248,7 +4248,7 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* adapter_init_host - Initialize the scsi host instance based on
|
* adapter_init_scsi_host - Initialize the scsi host instance based on
|
||||||
* values that we have already stored in the adapter instance. There's
|
* values that we have already stored in the adapter instance. There's
|
||||||
* some mention that a lot of these are deprecated, so we won't use
|
* some mention that a lot of these are deprecated, so we won't use
|
||||||
* them (we'll use the ones in the adapter instance) but we'll fill
|
* them (we'll use the ones in the adapter instance) but we'll fill
|
||||||
|
@ -4336,13 +4336,14 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb)
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init_adapter - Grab the resource for the card, setup the adapter
|
* adapter_init - Grab the resource for the card, setup the adapter
|
||||||
* information, set the card into a known state, create the various
|
* information, set the card into a known state, create the various
|
||||||
* tables etc etc. This basically gets all adapter information all up
|
* tables etc etc. This basically gets all adapter information all up
|
||||||
* to date, initialised and gets the chip in sync with it.
|
* to date, initialised and gets the chip in sync with it.
|
||||||
*
|
*
|
||||||
* @host: This hosts adapter structure
|
* @acb: The adapter which we are to init.
|
||||||
* @io_port: The base I/O port
|
* @io_port: The base I/O port
|
||||||
|
* @io_port_len: The I/O port size
|
||||||
* @irq: IRQ
|
* @irq: IRQ
|
||||||
*
|
*
|
||||||
* Returns 0 if the initialization succeeds, any other value on
|
* Returns 0 if the initialization succeeds, any other value on
|
||||||
|
|
|
@ -405,8 +405,8 @@ static char print_alua_state(unsigned char state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int alua_check_sense(struct scsi_device *sdev,
|
static enum scsi_disposition alua_check_sense(struct scsi_device *sdev,
|
||||||
struct scsi_sense_hdr *sense_hdr)
|
struct scsi_sense_hdr *sense_hdr)
|
||||||
{
|
{
|
||||||
struct alua_dh_data *h = sdev->handler_data;
|
struct alua_dh_data *h = sdev->handler_data;
|
||||||
struct alua_port_group *pg;
|
struct alua_port_group *pg;
|
||||||
|
@ -515,6 +515,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
||||||
struct scsi_sense_hdr sense_hdr;
|
struct scsi_sense_hdr sense_hdr;
|
||||||
struct alua_port_group *tmp_pg;
|
struct alua_port_group *tmp_pg;
|
||||||
int len, k, off, bufflen = ALUA_RTPG_SIZE;
|
int len, k, off, bufflen = ALUA_RTPG_SIZE;
|
||||||
|
int group_id_old, state_old, pref_old, valid_states_old;
|
||||||
unsigned char *desc, *buff;
|
unsigned char *desc, *buff;
|
||||||
unsigned err, retval;
|
unsigned err, retval;
|
||||||
unsigned int tpg_desc_tbl_off;
|
unsigned int tpg_desc_tbl_off;
|
||||||
|
@ -522,6 +523,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool transitioning_sense = false;
|
bool transitioning_sense = false;
|
||||||
|
|
||||||
|
group_id_old = pg->group_id;
|
||||||
|
state_old = pg->state;
|
||||||
|
pref_old = pg->pref;
|
||||||
|
valid_states_old = pg->valid_states;
|
||||||
|
|
||||||
if (!pg->expiry) {
|
if (!pg->expiry) {
|
||||||
unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
|
unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
|
||||||
|
|
||||||
|
@ -573,10 +579,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
||||||
* even though it shouldn't according to T10.
|
* even though it shouldn't according to T10.
|
||||||
* The retry without rtpg_ext_hdr_req set
|
* The retry without rtpg_ext_hdr_req set
|
||||||
* handles this.
|
* handles this.
|
||||||
|
* Note: some arrays return a sense key of ILLEGAL_REQUEST
|
||||||
|
* with ASC 00h if they don't support the extended header.
|
||||||
*/
|
*/
|
||||||
if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
|
if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
|
||||||
sense_hdr.sense_key == ILLEGAL_REQUEST &&
|
sense_hdr.sense_key == ILLEGAL_REQUEST) {
|
||||||
sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
|
|
||||||
pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
|
pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
@ -686,17 +693,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
||||||
if (transitioning_sense)
|
if (transitioning_sense)
|
||||||
pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
|
pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
|
||||||
|
|
||||||
sdev_printk(KERN_INFO, sdev,
|
if (group_id_old != pg->group_id || state_old != pg->state ||
|
||||||
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
|
pref_old != pg->pref || valid_states_old != pg->valid_states)
|
||||||
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
|
sdev_printk(KERN_INFO, sdev,
|
||||||
pg->pref ? "preferred" : "non-preferred",
|
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
|
||||||
pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
|
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
|
||||||
pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
|
pg->pref ? "preferred" : "non-preferred",
|
||||||
pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
|
pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
|
||||||
pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
|
pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
|
||||||
pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
|
pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
|
||||||
pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
|
pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
|
||||||
pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
|
pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
|
||||||
|
pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
|
||||||
|
pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
|
||||||
|
|
||||||
switch (pg->state) {
|
switch (pg->state) {
|
||||||
case SCSI_ACCESS_STATE_TRANSITIONING:
|
case SCSI_ACCESS_STATE_TRANSITIONING:
|
||||||
|
|
|
@ -280,8 +280,8 @@ static int send_trespass_cmd(struct scsi_device *sdev,
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int clariion_check_sense(struct scsi_device *sdev,
|
static enum scsi_disposition clariion_check_sense(struct scsi_device *sdev,
|
||||||
struct scsi_sense_hdr *sense_hdr)
|
struct scsi_sense_hdr *sense_hdr)
|
||||||
{
|
{
|
||||||
switch (sense_hdr->sense_key) {
|
switch (sense_hdr->sense_key) {
|
||||||
case NOT_READY:
|
case NOT_READY:
|
||||||
|
|
|
@ -656,8 +656,8 @@ static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req)
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rdac_check_sense(struct scsi_device *sdev,
|
static enum scsi_disposition rdac_check_sense(struct scsi_device *sdev,
|
||||||
struct scsi_sense_hdr *sense_hdr)
|
struct scsi_sense_hdr *sense_hdr)
|
||||||
{
|
{
|
||||||
struct rdac_dh_data *h = sdev->handler_data;
|
struct rdac_dh_data *h = sdev->handler_data;
|
||||||
|
|
||||||
|
|
|
@ -101,6 +101,11 @@ static const char *translate_esas2r_event_level_to_kernel(const long level)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#pragma GCC diagnostic push
|
||||||
|
#ifndef __clang__
|
||||||
|
#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* the master logging function. this function will format the message as
|
* the master logging function. this function will format the message as
|
||||||
* outlined by the formatting string, the input device information and the
|
* outlined by the formatting string, the input device information and the
|
||||||
|
@ -170,6 +175,8 @@ static int esas2r_log_master(const long level,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#pragma GCC diagnostic pop
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* formats and logs a message to the system log.
|
* formats and logs a message to the system log.
|
||||||
*
|
*
|
||||||
|
|
|
@ -647,7 +647,7 @@ static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
|
||||||
ent->sense_ptr = NULL;
|
ent->sense_ptr = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* When a contingent allegiance conditon is created, we force feed a
|
/* When a contingent allegiance condition is created, we force feed a
|
||||||
* REQUEST_SENSE command to the device to fetch the sense data. I
|
* REQUEST_SENSE command to the device to fetch the sense data. I
|
||||||
* tried many other schemes, relying on the scsi error handling layer
|
* tried many other schemes, relying on the scsi error handling layer
|
||||||
* to send out the REQUEST_SENSE automatically, but this was difficult
|
* to send out the REQUEST_SENSE automatically, but this was difficult
|
||||||
|
@ -1341,7 +1341,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
|
||||||
bytes_sent -= esp->send_cmd_residual;
|
bytes_sent -= esp->send_cmd_residual;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The am53c974 has a DMA 'pecularity'. The doc states:
|
* The am53c974 has a DMA 'peculiarity'. The doc states:
|
||||||
* In some odd byte conditions, one residual byte will
|
* In some odd byte conditions, one residual byte will
|
||||||
* be left in the SCSI FIFO, and the FIFO Flags will
|
* be left in the SCSI FIFO, and the FIFO Flags will
|
||||||
* never count to '0 '. When this happens, the residual
|
* never count to '0 '. When this happens, the residual
|
||||||
|
|
|
@ -2771,7 +2771,7 @@ static int fcoe_vport_disable(struct fc_vport *vport, bool disable)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fcoe_vport_set_symbolic_name() - append vport string to symbolic name
|
* fcoe_set_vport_symbolic_name() - append vport string to symbolic name
|
||||||
* @vport: fc_vport with a new symbolic name string
|
* @vport: fc_vport with a new symbolic name string
|
||||||
*
|
*
|
||||||
* After generating a new symbolic name string, a new RSPN_ID request is
|
* After generating a new symbolic name string, a new RSPN_ID request is
|
||||||
|
|
|
@ -1302,7 +1302,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fcoe_ctlr_recv_els() - Handle an incoming link reset frame
|
* fcoe_ctlr_recv_clr_vlink() - Handle an incoming link reset frame
|
||||||
* @fip: The FCoE controller that received the frame
|
* @fip: The FCoE controller that received the frame
|
||||||
* @skb: The received FIP packet
|
* @skb: The received FIP packet
|
||||||
*
|
*
|
||||||
|
@ -2952,7 +2952,7 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
|
* fcoe_ctlr_vlan_disc_reply() - send FIP VLAN Discovery Notification.
|
||||||
* @fip: The FCoE controller
|
* @fip: The FCoE controller
|
||||||
* @frport: The newly-parsed FCoE rport from the Discovery Request
|
* @frport: The newly-parsed FCoE rport from the Discovery Request
|
||||||
*
|
*
|
||||||
|
|
|
@ -863,7 +863,7 @@ static int fcoe_transport_create(const char *buffer,
|
||||||
int rc = -ENODEV;
|
int rc = -ENODEV;
|
||||||
struct net_device *netdev = NULL;
|
struct net_device *netdev = NULL;
|
||||||
struct fcoe_transport *ft = NULL;
|
struct fcoe_transport *ft = NULL;
|
||||||
enum fip_mode fip_mode = (enum fip_mode)kp->arg;
|
enum fip_mode fip_mode = (enum fip_mode)(uintptr_t)kp->arg;
|
||||||
|
|
||||||
mutex_lock(&ft_mutex);
|
mutex_lock(&ft_mutex);
|
||||||
|
|
||||||
|
|
|
@ -58,8 +58,7 @@ int fnic_debugfs_init(void)
|
||||||
fnic_trace_debugfs_root);
|
fnic_trace_debugfs_root);
|
||||||
|
|
||||||
/* Allocate memory to structure */
|
/* Allocate memory to structure */
|
||||||
fc_trc_flag = (struct fc_trace_flag_type *)
|
fc_trc_flag = vmalloc(sizeof(struct fc_trace_flag_type));
|
||||||
vmalloc(sizeof(struct fc_trace_flag_type));
|
|
||||||
|
|
||||||
if (fc_trc_flag) {
|
if (fc_trc_flag) {
|
||||||
fc_trc_flag->fc_row_file = 0;
|
fc_trc_flag->fc_row_file = 0;
|
||||||
|
|
|
@ -296,7 +296,7 @@ void fnic_handle_event(struct work_struct *work)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if the Received FIP FLOGI frame is rejected
|
* is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
|
||||||
* @fip: The FCoE controller that received the frame
|
* @fip: The FCoE controller that received the frame
|
||||||
* @skb: The received FIP frame
|
* @skb: The received FIP frame
|
||||||
*
|
*
|
||||||
|
@ -1343,9 +1343,10 @@ void fnic_handle_fip_timer(struct fnic *fnic)
|
||||||
if (list_empty(&fnic->vlans)) {
|
if (list_empty(&fnic->vlans)) {
|
||||||
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
|
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
|
||||||
/* no vlans available, try again */
|
/* no vlans available, try again */
|
||||||
if (printk_ratelimit())
|
if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
|
||||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
|
if (printk_ratelimit())
|
||||||
"Start VLAN Discovery\n");
|
shost_printk(KERN_DEBUG, fnic->lport->host,
|
||||||
|
"Start VLAN Discovery\n");
|
||||||
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
|
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1363,9 +1364,10 @@ void fnic_handle_fip_timer(struct fnic *fnic)
|
||||||
case FIP_VLAN_FAILED:
|
case FIP_VLAN_FAILED:
|
||||||
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
|
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
|
||||||
/* if all vlans are in failed state, restart vlan disc */
|
/* if all vlans are in failed state, restart vlan disc */
|
||||||
if (printk_ratelimit())
|
if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
|
||||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
|
if (printk_ratelimit())
|
||||||
"Start VLAN Discovery\n");
|
shost_printk(KERN_DEBUG, fnic->lport->host,
|
||||||
|
"Start VLAN Discovery\n");
|
||||||
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
|
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
|
||||||
break;
|
break;
|
||||||
case FIP_VLAN_SENT:
|
case FIP_VLAN_SENT:
|
||||||
|
|
|
@ -1100,9 +1100,6 @@ static int __init fnic_init_module(void)
|
||||||
goto err_create_fnic_workq;
|
goto err_create_fnic_workq;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_init(&fnic_list_lock);
|
|
||||||
INIT_LIST_HEAD(&fnic_list);
|
|
||||||
|
|
||||||
fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
|
fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
|
||||||
if (!fnic_fip_queue) {
|
if (!fnic_fip_queue) {
|
||||||
printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
|
printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
|
||||||
|
|
|
@ -173,7 +173,7 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* __fnic_set_state_flags
|
* __fnic_set_state_flags
|
||||||
* Sets/Clears bits in fnic's state_flags
|
* Sets/Clears bits in fnic's state_flags
|
||||||
**/
|
**/
|
||||||
|
@ -2287,7 +2287,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* fnic_scsi_host_start_tag
|
* fnic_scsi_host_start_tag
|
||||||
* Allocates tagid from host's tag list
|
* Allocates tagid from host's tag list
|
||||||
**/
|
**/
|
||||||
|
@ -2307,7 +2307,7 @@ fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
|
||||||
return dummy->tag;
|
return dummy->tag;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* fnic_scsi_host_end_tag
|
* fnic_scsi_host_end_tag
|
||||||
* frees tag allocated by fnic_scsi_host_start_tag.
|
* frees tag allocated by fnic_scsi_host_start_tag.
|
||||||
**/
|
**/
|
||||||
|
|
|
@ -153,7 +153,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
|
||||||
if (rd_idx > (fnic_max_trace_entries-1))
|
if (rd_idx > (fnic_max_trace_entries-1))
|
||||||
rd_idx = 0;
|
rd_idx = 0;
|
||||||
/*
|
/*
|
||||||
* Continure dumpping trace buffer entries into
|
* Continue dumping trace buffer entries into
|
||||||
* memory file till rd_idx reaches write index
|
* memory file till rd_idx reaches write index
|
||||||
*/
|
*/
|
||||||
if (rd_idx == wr_idx)
|
if (rd_idx == wr_idx)
|
||||||
|
@ -189,7 +189,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
|
||||||
tbp->data[3], tbp->data[4]);
|
tbp->data[3], tbp->data[4]);
|
||||||
rd_idx++;
|
rd_idx++;
|
||||||
/*
|
/*
|
||||||
* Continue dumpping trace buffer entries into
|
* Continue dumping trace buffer entries into
|
||||||
* memory file till rd_idx reaches write index
|
* memory file till rd_idx reaches write index
|
||||||
*/
|
*/
|
||||||
if (rd_idx == wr_idx)
|
if (rd_idx == wr_idx)
|
||||||
|
@ -632,7 +632,7 @@ void fnic_fc_trace_free(void)
|
||||||
* fnic_fc_ctlr_set_trace_data:
|
* fnic_fc_ctlr_set_trace_data:
|
||||||
* Maintain rd & wr idx accordingly and set data
|
* Maintain rd & wr idx accordingly and set data
|
||||||
* Passed parameters:
|
* Passed parameters:
|
||||||
* host_no: host number accociated with fnic
|
* host_no: host number associated with fnic
|
||||||
* frame_type: send_frame, rece_frame or link event
|
* frame_type: send_frame, rece_frame or link event
|
||||||
* fc_frame: pointer to fc_frame
|
* fc_frame: pointer to fc_frame
|
||||||
* frame_len: Length of the fc_frame
|
* frame_len: Length of the fc_frame
|
||||||
|
@ -715,13 +715,13 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
|
||||||
* fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
|
* fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
|
||||||
* Passed parameter:
|
* Passed parameter:
|
||||||
* @fnic_dbgfs_t: pointer to debugfs trace buffer
|
* @fnic_dbgfs_t: pointer to debugfs trace buffer
|
||||||
* rdata_flag: 1 => Unformated file
|
* rdata_flag: 1 => Unformatted file
|
||||||
* 0 => formated file
|
* 0 => formatted file
|
||||||
* Description:
|
* Description:
|
||||||
* This routine will copy the trace data to memory file with
|
* This routine will copy the trace data to memory file with
|
||||||
* proper formatting and also copy to another memory
|
* proper formatting and also copy to another memory
|
||||||
* file without formatting for further procesing.
|
* file without formatting for further processing.
|
||||||
* Retrun Value:
|
* Return Value:
|
||||||
* Number of bytes that were dumped into fnic_dbgfs_t
|
* Number of bytes that were dumped into fnic_dbgfs_t
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -785,10 +785,10 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
|
||||||
* @fc_trace_hdr_t: pointer to trace data
|
* @fc_trace_hdr_t: pointer to trace data
|
||||||
* @fnic_dbgfs_t: pointer to debugfs trace buffer
|
* @fnic_dbgfs_t: pointer to debugfs trace buffer
|
||||||
* @orig_len: pointer to len
|
* @orig_len: pointer to len
|
||||||
* rdata_flag: 0 => Formated file, 1 => Unformated file
|
* rdata_flag: 0 => Formatted file, 1 => Unformatted file
|
||||||
* Description:
|
* Description:
|
||||||
* This routine will format and copy the passed trace data
|
* This routine will format and copy the passed trace data
|
||||||
* for formated file or unformated file accordingly.
|
* for formatted file or unformatted file accordingly.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
|
void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
|
||||||
|
|
|
@ -346,8 +346,7 @@ struct hisi_sas_hw {
|
||||||
u8 reg_index, u8 reg_count, u8 *write_data);
|
u8 reg_index, u8 reg_count, u8 *write_data);
|
||||||
void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
|
void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
|
||||||
int delay_ms, int timeout_ms);
|
int delay_ms, int timeout_ms);
|
||||||
void (*snapshot_prepare)(struct hisi_hba *hisi_hba);
|
void (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
|
||||||
void (*snapshot_restore)(struct hisi_hba *hisi_hba);
|
|
||||||
int complete_hdr_size;
|
int complete_hdr_size;
|
||||||
struct scsi_host_template *sht;
|
struct scsi_host_template *sht;
|
||||||
};
|
};
|
||||||
|
|
|
@ -1341,10 +1341,12 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
|
||||||
rc = hisi_sas_exec_internal_tmf_task(device, fis,
|
rc = hisi_sas_exec_internal_tmf_task(device, fis,
|
||||||
s, NULL);
|
s, NULL);
|
||||||
if (rc != TMF_RESP_FUNC_COMPLETE)
|
if (rc != TMF_RESP_FUNC_COMPLETE)
|
||||||
dev_err(dev, "ata disk de-reset failed\n");
|
dev_err(dev, "ata disk %016llx de-reset failed\n",
|
||||||
|
SAS_ADDR(device->sas_addr));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dev_err(dev, "ata disk reset failed\n");
|
dev_err(dev, "ata disk %016llx reset failed\n",
|
||||||
|
SAS_ADDR(device->sas_addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rc == TMF_RESP_FUNC_COMPLETE)
|
if (rc == TMF_RESP_FUNC_COMPLETE)
|
||||||
|
@ -1568,21 +1570,26 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
|
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
|
||||||
|
|
||||||
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
|
||||||
{
|
{
|
||||||
struct device *dev = hisi_hba->dev;
|
|
||||||
struct Scsi_Host *shost = hisi_hba->shost;
|
|
||||||
int rc;
|
|
||||||
|
|
||||||
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
|
|
||||||
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
|
|
||||||
|
|
||||||
if (!hisi_hba->hw->soft_reset)
|
if (!hisi_hba->hw->soft_reset)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
|
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
|
||||||
|
hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
||||||
|
{
|
||||||
|
struct device *dev = hisi_hba->dev;
|
||||||
|
struct Scsi_Host *shost = hisi_hba->shost;
|
||||||
|
int rc;
|
||||||
|
|
||||||
dev_info(dev, "controller resetting...\n");
|
dev_info(dev, "controller resetting...\n");
|
||||||
hisi_sas_controller_reset_prepare(hisi_hba);
|
hisi_sas_controller_reset_prepare(hisi_hba);
|
||||||
|
|
||||||
|
@ -2471,6 +2478,9 @@ void hisi_sas_rst_work_handler(struct work_struct *work)
|
||||||
struct hisi_hba *hisi_hba =
|
struct hisi_hba *hisi_hba =
|
||||||
container_of(work, struct hisi_hba, rst_work);
|
container_of(work, struct hisi_hba, rst_work);
|
||||||
|
|
||||||
|
if (hisi_sas_controller_prereset(hisi_hba))
|
||||||
|
return;
|
||||||
|
|
||||||
hisi_sas_controller_reset(hisi_hba);
|
hisi_sas_controller_reset(hisi_hba);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
|
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
|
||||||
|
@ -2480,8 +2490,12 @@ void hisi_sas_sync_rst_work_handler(struct work_struct *work)
|
||||||
struct hisi_sas_rst *rst =
|
struct hisi_sas_rst *rst =
|
||||||
container_of(work, struct hisi_sas_rst, work);
|
container_of(work, struct hisi_sas_rst, work);
|
||||||
|
|
||||||
|
if (hisi_sas_controller_prereset(rst->hisi_hba))
|
||||||
|
goto rst_complete;
|
||||||
|
|
||||||
if (!hisi_sas_controller_reset(rst->hisi_hba))
|
if (!hisi_sas_controller_reset(rst->hisi_hba))
|
||||||
rst->done = true;
|
rst->done = true;
|
||||||
|
rst_complete:
|
||||||
complete(rst->completion);
|
complete(rst->completion);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
|
EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
|
||||||
|
@ -2689,12 +2703,14 @@ int hisi_sas_probe(struct platform_device *pdev,
|
||||||
|
|
||||||
rc = hisi_hba->hw->hw_init(hisi_hba);
|
rc = hisi_hba->hw->hw_init(hisi_hba);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_out_register_ha;
|
goto err_out_hw_init;
|
||||||
|
|
||||||
scsi_scan_host(shost);
|
scsi_scan_host(shost);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_out_hw_init:
|
||||||
|
sas_unregister_ha(sha);
|
||||||
err_out_register_ha:
|
err_out_register_ha:
|
||||||
scsi_remove_host(shost);
|
scsi_remove_host(shost);
|
||||||
err_out_ha:
|
err_out_ha:
|
||||||
|
|
|
@ -1646,7 +1646,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
|
||||||
idx = i * HISI_SAS_PHY_INT_NR;
|
idx = i * HISI_SAS_PHY_INT_NR;
|
||||||
for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
|
for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
|
||||||
irq = platform_get_irq(pdev, idx);
|
irq = platform_get_irq(pdev, idx);
|
||||||
if (!irq) {
|
if (irq < 0) {
|
||||||
dev_err(dev, "irq init: fail map phy interrupt %d\n",
|
dev_err(dev, "irq init: fail map phy interrupt %d\n",
|
||||||
idx);
|
idx);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
@ -1665,7 +1665,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
|
||||||
idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
|
idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
|
||||||
for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
|
for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
|
||||||
irq = platform_get_irq(pdev, idx);
|
irq = platform_get_irq(pdev, idx);
|
||||||
if (!irq) {
|
if (irq < 0) {
|
||||||
dev_err(dev, "irq init: could not map cq interrupt %d\n",
|
dev_err(dev, "irq init: could not map cq interrupt %d\n",
|
||||||
idx);
|
idx);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
@ -1683,7 +1683,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
|
||||||
idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
|
idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
|
||||||
for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
|
for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
|
||||||
irq = platform_get_irq(pdev, idx);
|
irq = platform_get_irq(pdev, idx);
|
||||||
if (!irq) {
|
if (irq < 0) {
|
||||||
dev_err(dev, "irq init: could not map fatal interrupt %d\n",
|
dev_err(dev, "irq init: could not map fatal interrupt %d\n",
|
||||||
idx);
|
idx);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
|
@ -531,6 +531,7 @@ module_param(prot_mask, int, 0);
|
||||||
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
|
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
|
||||||
|
|
||||||
static void debugfs_work_handler_v3_hw(struct work_struct *work);
|
static void debugfs_work_handler_v3_hw(struct work_struct *work);
|
||||||
|
static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba);
|
||||||
|
|
||||||
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
|
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
|
||||||
{
|
{
|
||||||
|
@ -1717,8 +1718,11 @@ static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
irq_value &= ~irq_msk;
|
irq_value &= ~irq_msk;
|
||||||
if (!irq_value)
|
if (!irq_value) {
|
||||||
|
dev_warn(dev, "phy%d channel int 1 received with status bits cleared\n",
|
||||||
|
phy_no);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
|
for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
|
||||||
const struct hisi_sas_hw_error *error = &port_axi_error[i];
|
const struct hisi_sas_hw_error *error = &port_axi_error[i];
|
||||||
|
@ -1779,8 +1783,11 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||||
BIT(CHL_INT2_RX_INVLD_DW_OFF);
|
BIT(CHL_INT2_RX_INVLD_DW_OFF);
|
||||||
|
|
||||||
irq_value &= ~irq_msk;
|
irq_value &= ~irq_msk;
|
||||||
if (!irq_value)
|
if (!irq_value) {
|
||||||
|
dev_warn(dev, "phy%d channel int 2 received with status bits cleared\n",
|
||||||
|
phy_no);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
|
if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
|
||||||
dev_warn(dev, "phy%d identify timeout\n", phy_no);
|
dev_warn(dev, "phy%d identify timeout\n", phy_no);
|
||||||
|
@ -2252,8 +2259,9 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
|
||||||
|
|
||||||
slot_err_v3_hw(hisi_hba, task, slot);
|
slot_err_v3_hw(hisi_hba, task, slot);
|
||||||
if (ts->stat != SAS_DATA_UNDERRUN)
|
if (ts->stat != SAS_DATA_UNDERRUN)
|
||||||
dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
|
dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d addr=%016llx CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
|
||||||
slot->idx, task, sas_dev->device_id,
|
slot->idx, task, sas_dev->device_id,
|
||||||
|
SAS_ADDR(device->sas_addr),
|
||||||
dw0, dw1, complete_hdr->act, dw3,
|
dw0, dw1, complete_hdr->act, dw3,
|
||||||
error_info[0], error_info[1],
|
error_info[0], error_info[1],
|
||||||
error_info[2], error_info[3]);
|
error_info[2], error_info[3]);
|
||||||
|
@ -3181,6 +3189,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
|
||||||
.get_events = phy_get_events_v3_hw,
|
.get_events = phy_get_events_v3_hw,
|
||||||
.write_gpio = write_gpio_v3_hw,
|
.write_gpio = write_gpio_v3_hw,
|
||||||
.wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
|
.wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
|
||||||
|
.debugfs_snapshot_regs = debugfs_snapshot_regs_v3_hw,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct Scsi_Host *
|
static struct Scsi_Host *
|
||||||
|
@ -3665,6 +3674,19 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
|
||||||
|
|
||||||
static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
|
static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
|
||||||
{
|
{
|
||||||
|
int debugfs_dump_index = hisi_hba->debugfs_dump_index;
|
||||||
|
struct device *dev = hisi_hba->dev;
|
||||||
|
u64 timestamp = local_clock();
|
||||||
|
|
||||||
|
if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
|
||||||
|
dev_warn(dev, "dump count exceeded!\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
do_div(timestamp, NSEC_PER_MSEC);
|
||||||
|
hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
|
||||||
|
hisi_hba->debugfs_dump_index++;
|
||||||
|
|
||||||
debugfs_snapshot_prepare_v3_hw(hisi_hba);
|
debugfs_snapshot_prepare_v3_hw(hisi_hba);
|
||||||
|
|
||||||
debugfs_snapshot_global_reg_v3_hw(hisi_hba);
|
debugfs_snapshot_global_reg_v3_hw(hisi_hba);
|
||||||
|
@ -4407,20 +4429,8 @@ static void debugfs_work_handler_v3_hw(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct hisi_hba *hisi_hba =
|
struct hisi_hba *hisi_hba =
|
||||||
container_of(work, struct hisi_hba, debugfs_work);
|
container_of(work, struct hisi_hba, debugfs_work);
|
||||||
int debugfs_dump_index = hisi_hba->debugfs_dump_index;
|
|
||||||
struct device *dev = hisi_hba->dev;
|
|
||||||
u64 timestamp = local_clock();
|
|
||||||
|
|
||||||
if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
|
|
||||||
dev_warn(dev, "dump count exceeded!\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
do_div(timestamp, NSEC_PER_MSEC);
|
|
||||||
hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
|
|
||||||
|
|
||||||
debugfs_snapshot_regs_v3_hw(hisi_hba);
|
debugfs_snapshot_regs_v3_hw(hisi_hba);
|
||||||
hisi_hba->debugfs_dump_index++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
|
static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
|
||||||
|
@ -4760,7 +4770,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
|
|
||||||
rc = hisi_sas_v3_init(hisi_hba);
|
rc = hisi_sas_v3_init(hisi_hba);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_out_register_ha;
|
goto err_out_hw_init;
|
||||||
|
|
||||||
scsi_scan_host(shost);
|
scsi_scan_host(shost);
|
||||||
|
|
||||||
|
@ -4777,6 +4787,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_out_hw_init:
|
||||||
|
sas_unregister_ha(sha);
|
||||||
err_out_register_ha:
|
err_out_register_ha:
|
||||||
scsi_remove_host(shost);
|
scsi_remove_host(shost);
|
||||||
err_out_free_irq_vectors:
|
err_out_free_irq_vectors:
|
||||||
|
|
|
@ -326,6 +326,7 @@ static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvfc_get_err_result - Find the scsi status to return for the fcp response
|
* ibmvfc_get_err_result - Find the scsi status to return for the fcp response
|
||||||
|
* @vhost: ibmvfc host struct
|
||||||
* @vfc_cmd: ibmvfc command struct
|
* @vfc_cmd: ibmvfc command struct
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
|
@ -603,8 +604,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
|
||||||
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
|
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
|
||||||
vhost->action = action;
|
vhost->action = action;
|
||||||
break;
|
break;
|
||||||
|
case IBMVFC_HOST_ACTION_REENABLE:
|
||||||
|
case IBMVFC_HOST_ACTION_RESET:
|
||||||
|
vhost->action = action;
|
||||||
|
break;
|
||||||
case IBMVFC_HOST_ACTION_INIT:
|
case IBMVFC_HOST_ACTION_INIT:
|
||||||
case IBMVFC_HOST_ACTION_TGT_DEL:
|
case IBMVFC_HOST_ACTION_TGT_DEL:
|
||||||
|
case IBMVFC_HOST_ACTION_LOGO:
|
||||||
|
case IBMVFC_HOST_ACTION_QUERY_TGTS:
|
||||||
|
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
|
||||||
|
case IBMVFC_HOST_ACTION_NONE:
|
||||||
|
default:
|
||||||
switch (vhost->action) {
|
switch (vhost->action) {
|
||||||
case IBMVFC_HOST_ACTION_RESET:
|
case IBMVFC_HOST_ACTION_RESET:
|
||||||
case IBMVFC_HOST_ACTION_REENABLE:
|
case IBMVFC_HOST_ACTION_REENABLE:
|
||||||
|
@ -614,15 +624,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IBMVFC_HOST_ACTION_LOGO:
|
|
||||||
case IBMVFC_HOST_ACTION_QUERY_TGTS:
|
|
||||||
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
|
|
||||||
case IBMVFC_HOST_ACTION_NONE:
|
|
||||||
case IBMVFC_HOST_ACTION_RESET:
|
|
||||||
case IBMVFC_HOST_ACTION_REENABLE:
|
|
||||||
default:
|
|
||||||
vhost->action = action;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -650,8 +651,6 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
|
||||||
/**
|
/**
|
||||||
* ibmvfc_del_tgt - Schedule cleanup and removal of the target
|
* ibmvfc_del_tgt - Schedule cleanup and removal of the target
|
||||||
* @tgt: ibmvfc target struct
|
* @tgt: ibmvfc target struct
|
||||||
* @job_step: job step to perform
|
|
||||||
*
|
|
||||||
**/
|
**/
|
||||||
static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
|
static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
|
||||||
{
|
{
|
||||||
|
@ -768,6 +767,8 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
|
||||||
/**
|
/**
|
||||||
* ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
|
* ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
|
||||||
* @vhost: ibmvfc host who owns the event pool
|
* @vhost: ibmvfc host who owns the event pool
|
||||||
|
* @queue: ibmvfc queue struct
|
||||||
|
* @size: pool size
|
||||||
*
|
*
|
||||||
* Returns zero on success.
|
* Returns zero on success.
|
||||||
**/
|
**/
|
||||||
|
@ -820,6 +821,7 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
|
||||||
/**
|
/**
|
||||||
* ibmvfc_free_event_pool - Frees memory of the event pool of a host
|
* ibmvfc_free_event_pool - Frees memory of the event pool of a host
|
||||||
* @vhost: ibmvfc host who owns the event pool
|
* @vhost: ibmvfc host who owns the event pool
|
||||||
|
* @queue: ibmvfc queue struct
|
||||||
*
|
*
|
||||||
**/
|
**/
|
||||||
static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
|
static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
|
||||||
|
@ -1414,6 +1416,7 @@ static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvfc_gather_partition_info - Gather info about the LPAR
|
* ibmvfc_gather_partition_info - Gather info about the LPAR
|
||||||
|
* @vhost: ibmvfc host struct
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
* none
|
* none
|
||||||
|
@ -1484,7 +1487,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvfc_get_event - Gets the next free event in pool
|
* ibmvfc_get_event - Gets the next free event in pool
|
||||||
* @vhost: ibmvfc host struct
|
* @queue: ibmvfc queue struct
|
||||||
*
|
*
|
||||||
* Returns a free event from the pool.
|
* Returns a free event from the pool.
|
||||||
**/
|
**/
|
||||||
|
@ -1631,7 +1634,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvfc_timeout - Internal command timeout handler
|
* ibmvfc_timeout - Internal command timeout handler
|
||||||
* @evt: struct ibmvfc_event that timed out
|
* @t: struct ibmvfc_event that timed out
|
||||||
*
|
*
|
||||||
* Called when an internally generated command times out
|
* Called when an internally generated command times out
|
||||||
**/
|
**/
|
||||||
|
@ -1892,8 +1895,8 @@ static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct s
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvfc_queuecommand - The queuecommand function of the scsi template
|
* ibmvfc_queuecommand - The queuecommand function of the scsi template
|
||||||
|
* @shost: scsi host struct
|
||||||
* @cmnd: struct scsi_cmnd to be executed
|
* @cmnd: struct scsi_cmnd to be executed
|
||||||
* @done: Callback function to be called when cmnd is completed
|
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* 0 on success / other on failure
|
* 0 on success / other on failure
|
||||||
|
@ -2324,7 +2327,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
|
||||||
/**
|
/**
|
||||||
* ibmvfc_match_rport - Match function for specified remote port
|
* ibmvfc_match_rport - Match function for specified remote port
|
||||||
* @evt: ibmvfc event struct
|
* @evt: ibmvfc event struct
|
||||||
* @device: device to match (rport)
|
* @rport: device to match
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* 1 if event matches rport / 0 if event does not match rport
|
* 1 if event matches rport / 0 if event does not match rport
|
||||||
|
@ -3176,8 +3179,9 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
|
||||||
* ibmvfc_handle_crq - Handles and frees received events in the CRQ
|
* ibmvfc_handle_crq - Handles and frees received events in the CRQ
|
||||||
* @crq: Command/Response queue
|
* @crq: Command/Response queue
|
||||||
* @vhost: ibmvfc host struct
|
* @vhost: ibmvfc host struct
|
||||||
|
* @evt_doneq: Event done queue
|
||||||
*
|
*
|
||||||
**/
|
**/
|
||||||
static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
|
static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
|
||||||
struct list_head *evt_doneq)
|
struct list_head *evt_doneq)
|
||||||
{
|
{
|
||||||
|
@ -3358,7 +3362,6 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
|
||||||
* ibmvfc_change_queue_depth - Change the device's queue depth
|
* ibmvfc_change_queue_depth - Change the device's queue depth
|
||||||
* @sdev: scsi device struct
|
* @sdev: scsi device struct
|
||||||
* @qdepth: depth to set
|
* @qdepth: depth to set
|
||||||
* @reason: calling context
|
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
* actual depth set
|
* actual depth set
|
||||||
|
@ -3430,6 +3433,7 @@ static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
|
||||||
/**
|
/**
|
||||||
* ibmvfc_show_log_level - Show the adapter's error logging level
|
* ibmvfc_show_log_level - Show the adapter's error logging level
|
||||||
* @dev: class device struct
|
* @dev: class device struct
|
||||||
|
* @attr: unused
|
||||||
* @buf: buffer
|
* @buf: buffer
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
|
@ -3452,7 +3456,9 @@ static ssize_t ibmvfc_show_log_level(struct device *dev,
|
||||||
/**
|
/**
|
||||||
* ibmvfc_store_log_level - Change the adapter's error logging level
|
* ibmvfc_store_log_level - Change the adapter's error logging level
|
||||||
* @dev: class device struct
|
* @dev: class device struct
|
||||||
|
* @attr: unused
|
||||||
* @buf: buffer
|
* @buf: buffer
|
||||||
|
* @count: buffer size
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
* number of bytes printed to buffer
|
* number of bytes printed to buffer
|
||||||
|
@ -3530,7 +3536,7 @@ static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
|
||||||
struct bin_attribute *bin_attr,
|
struct bin_attribute *bin_attr,
|
||||||
char *buf, loff_t off, size_t count)
|
char *buf, loff_t off, size_t count)
|
||||||
{
|
{
|
||||||
struct device *dev = container_of(kobj, struct device, kobj);
|
struct device *dev = kobj_to_dev(kobj);
|
||||||
struct Scsi_Host *shost = class_to_shost(dev);
|
struct Scsi_Host *shost = class_to_shost(dev);
|
||||||
struct ibmvfc_host *vhost = shost_priv(shost);
|
struct ibmvfc_host *vhost = shost_priv(shost);
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
|
@ -4162,6 +4168,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
|
||||||
/**
|
/**
|
||||||
* __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
|
* __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
|
||||||
* @tgt: ibmvfc target struct
|
* @tgt: ibmvfc target struct
|
||||||
|
* @done: Routine to call when the event is responded to
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* Allocated and initialized ibmvfc_event struct
|
* Allocated and initialized ibmvfc_event struct
|
||||||
|
@ -4478,7 +4485,7 @@ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvfc_adisc_timeout - Handle an ADISC timeout
|
* ibmvfc_adisc_timeout - Handle an ADISC timeout
|
||||||
* @tgt: ibmvfc target struct
|
* @t: ibmvfc target struct
|
||||||
*
|
*
|
||||||
* If an ADISC times out, send a cancel. If the cancel times
|
* If an ADISC times out, send a cancel. If the cancel times
|
||||||
* out, reset the CRQ. When the ADISC comes back as cancelled,
|
* out, reset the CRQ. When the ADISC comes back as cancelled,
|
||||||
|
@ -4681,7 +4688,7 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
|
||||||
/**
|
/**
|
||||||
* ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
|
* ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
|
||||||
* @vhost: ibmvfc host struct
|
* @vhost: ibmvfc host struct
|
||||||
* @scsi_id: SCSI ID to allocate target for
|
* @target: Holds SCSI ID to allocate target forand the WWPN
|
||||||
*
|
*
|
||||||
* Returns:
|
* Returns:
|
||||||
* 0 on success / other on failure
|
* 0 on success / other on failure
|
||||||
|
@ -5111,7 +5118,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
|
* ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
|
||||||
* @vhost: ibmvfc host struct
|
* @evt: ibmvfc event struct
|
||||||
*
|
*
|
||||||
**/
|
**/
|
||||||
static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
|
static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
|
||||||
|
@ -5373,30 +5380,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
|
||||||
case IBMVFC_HOST_ACTION_INIT_WAIT:
|
case IBMVFC_HOST_ACTION_INIT_WAIT:
|
||||||
break;
|
break;
|
||||||
case IBMVFC_HOST_ACTION_RESET:
|
case IBMVFC_HOST_ACTION_RESET:
|
||||||
vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
|
|
||||||
list_splice_init(&vhost->purge, &purge);
|
list_splice_init(&vhost->purge, &purge);
|
||||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||||
ibmvfc_complete_purge(&purge);
|
ibmvfc_complete_purge(&purge);
|
||||||
rc = ibmvfc_reset_crq(vhost);
|
rc = ibmvfc_reset_crq(vhost);
|
||||||
|
|
||||||
spin_lock_irqsave(vhost->host->host_lock, flags);
|
spin_lock_irqsave(vhost->host->host_lock, flags);
|
||||||
if (rc == H_CLOSED)
|
if (!rc || rc == H_CLOSED)
|
||||||
vio_enable_interrupts(to_vio_dev(vhost->dev));
|
vio_enable_interrupts(to_vio_dev(vhost->dev));
|
||||||
if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
|
if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
|
||||||
(rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
|
/*
|
||||||
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
|
* The only action we could have changed to would have
|
||||||
dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
|
* been reenable, in which case, we skip the rest of
|
||||||
|
* this path and wait until we've done the re-enable
|
||||||
|
* before sending the crq init.
|
||||||
|
*/
|
||||||
|
vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
|
||||||
|
|
||||||
|
if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
|
||||||
|
(rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
|
||||||
|
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
|
||||||
|
dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IBMVFC_HOST_ACTION_REENABLE:
|
case IBMVFC_HOST_ACTION_REENABLE:
|
||||||
vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
|
|
||||||
list_splice_init(&vhost->purge, &purge);
|
list_splice_init(&vhost->purge, &purge);
|
||||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||||
ibmvfc_complete_purge(&purge);
|
ibmvfc_complete_purge(&purge);
|
||||||
rc = ibmvfc_reenable_crq_queue(vhost);
|
rc = ibmvfc_reenable_crq_queue(vhost);
|
||||||
|
|
||||||
spin_lock_irqsave(vhost->host->host_lock, flags);
|
spin_lock_irqsave(vhost->host->host_lock, flags);
|
||||||
if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
|
if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
|
||||||
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
|
/*
|
||||||
dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
|
* The only action we could have changed to would have
|
||||||
|
* been reset, in which case, we skip the rest of this
|
||||||
|
* path and wait until we've done the reset before
|
||||||
|
* sending the crq init.
|
||||||
|
*/
|
||||||
|
vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
|
||||||
|
if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
|
||||||
|
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
|
||||||
|
dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IBMVFC_HOST_ACTION_LOGO:
|
case IBMVFC_HOST_ACTION_LOGO:
|
||||||
|
|
|
@ -130,9 +130,10 @@ static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* release_crq_queue: - Deallocates data and unregisters CRQ
|
* ibmvscsi_release_crq_queue() - Deallocates data and unregisters CRQ
|
||||||
* @queue: crq_queue to initialize and register
|
* @queue: crq_queue to initialize and register
|
||||||
* @host_data: ibmvscsi_host_data of host
|
* @hostdata: ibmvscsi_host_data of host
|
||||||
|
* @max_requests: maximum requests (unused)
|
||||||
*
|
*
|
||||||
* Frees irq, deallocates a page for messages, unmaps dma, and unregisters
|
* Frees irq, deallocates a page for messages, unmaps dma, and unregisters
|
||||||
* the crq with the hypervisor.
|
* the crq with the hypervisor.
|
||||||
|
@ -276,10 +277,9 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* reset_crq_queue: - resets a crq after a failure
|
* ibmvscsi_reset_crq_queue() - resets a crq after a failure
|
||||||
* @queue: crq_queue to initialize and register
|
* @queue: crq_queue to initialize and register
|
||||||
* @hostdata: ibmvscsi_host_data of host
|
* @hostdata: ibmvscsi_host_data of host
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
|
static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
|
||||||
struct ibmvscsi_host_data *hostdata)
|
struct ibmvscsi_host_data *hostdata)
|
||||||
|
@ -314,9 +314,10 @@ static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* initialize_crq_queue: - Initializes and registers CRQ with hypervisor
|
* ibmvscsi_init_crq_queue() - Initializes and registers CRQ with hypervisor
|
||||||
* @queue: crq_queue to initialize and register
|
* @queue: crq_queue to initialize and register
|
||||||
* @hostdata: ibmvscsi_host_data of host
|
* @hostdata: ibmvscsi_host_data of host
|
||||||
|
* @max_requests: maximum requests (unused)
|
||||||
*
|
*
|
||||||
* Allocates a page for messages, maps it for dma, and registers
|
* Allocates a page for messages, maps it for dma, and registers
|
||||||
* the crq with the hypervisor.
|
* the crq with the hypervisor.
|
||||||
|
@ -404,10 +405,9 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* reenable_crq_queue: - reenables a crq after
|
* ibmvscsi_reenable_crq_queue() - reenables a crq after
|
||||||
* @queue: crq_queue to initialize and register
|
* @queue: crq_queue to initialize and register
|
||||||
* @hostdata: ibmvscsi_host_data of host
|
* @hostdata: ibmvscsi_host_data of host
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
|
static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
|
||||||
struct ibmvscsi_host_data *hostdata)
|
struct ibmvscsi_host_data *hostdata)
|
||||||
|
@ -439,7 +439,7 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
|
||||||
* @hostdata: ibmvscsi_host_data who owns the event pool
|
* @hostdata: ibmvscsi_host_data who owns the event pool
|
||||||
*
|
*
|
||||||
* Returns zero on success.
|
* Returns zero on success.
|
||||||
*/
|
*/
|
||||||
static int initialize_event_pool(struct event_pool *pool,
|
static int initialize_event_pool(struct event_pool *pool,
|
||||||
int size, struct ibmvscsi_host_data *hostdata)
|
int size, struct ibmvscsi_host_data *hostdata)
|
||||||
{
|
{
|
||||||
|
@ -478,12 +478,12 @@ static int initialize_event_pool(struct event_pool *pool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* release_event_pool: - Frees memory of an event pool of a host
|
* release_event_pool() - Frees memory of an event pool of a host
|
||||||
* @pool: event_pool to be released
|
* @pool: event_pool to be released
|
||||||
* @hostdata: ibmvscsi_host_data who owns the even pool
|
* @hostdata: ibmvscsi_host_data who owns the even pool
|
||||||
*
|
*
|
||||||
* Returns zero on success.
|
* Returns zero on success.
|
||||||
*/
|
*/
|
||||||
static void release_event_pool(struct event_pool *pool,
|
static void release_event_pool(struct event_pool *pool,
|
||||||
struct ibmvscsi_host_data *hostdata)
|
struct ibmvscsi_host_data *hostdata)
|
||||||
{
|
{
|
||||||
|
@ -526,11 +526,10 @@ static int valid_event_struct(struct event_pool *pool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvscsi_free-event_struct: - Changes status of event to "free"
|
* free_event_struct() - Changes status of event to "free"
|
||||||
* @pool: event_pool that contains the event
|
* @pool: event_pool that contains the event
|
||||||
* @evt: srp_event_struct to be modified
|
* @evt: srp_event_struct to be modified
|
||||||
*
|
*/
|
||||||
*/
|
|
||||||
static void free_event_struct(struct event_pool *pool,
|
static void free_event_struct(struct event_pool *pool,
|
||||||
struct srp_event_struct *evt)
|
struct srp_event_struct *evt)
|
||||||
{
|
{
|
||||||
|
@ -547,7 +546,7 @@ static void free_event_struct(struct event_pool *pool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get_evt_struct: - Gets the next free event in pool
|
* get_event_struct() - Gets the next free event in pool
|
||||||
* @pool: event_pool that contains the events to be searched
|
* @pool: event_pool that contains the events to be searched
|
||||||
*
|
*
|
||||||
* Returns the next event in "free" state, and NULL if none are free.
|
* Returns the next event in "free" state, and NULL if none are free.
|
||||||
|
@ -575,7 +574,7 @@ static struct srp_event_struct *get_event_struct(struct event_pool *pool)
|
||||||
/**
|
/**
|
||||||
* init_event_struct: Initialize fields in an event struct that are always
|
* init_event_struct: Initialize fields in an event struct that are always
|
||||||
* required.
|
* required.
|
||||||
* @evt: The event
|
* @evt_struct: The event
|
||||||
* @done: Routine to call when the event is responded to
|
* @done: Routine to call when the event is responded to
|
||||||
* @format: SRP or MAD format
|
* @format: SRP or MAD format
|
||||||
* @timeout: timeout value set in the CRQ
|
* @timeout: timeout value set in the CRQ
|
||||||
|
@ -597,7 +596,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
|
||||||
* Routines for receiving SCSI responses from the hosting partition
|
* Routines for receiving SCSI responses from the hosting partition
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* set_srp_direction: Set the fields in the srp related to data
|
* set_srp_direction: Set the fields in the srp related to data
|
||||||
* direction and number of buffers based on the direction in
|
* direction and number of buffers based on the direction in
|
||||||
* the scsi_cmnd and the number of buffers
|
* the scsi_cmnd and the number of buffers
|
||||||
|
@ -632,9 +631,9 @@ static void set_srp_direction(struct scsi_cmnd *cmd,
|
||||||
/**
|
/**
|
||||||
* unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
|
* unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
|
||||||
* @cmd: srp_cmd whose additional_data member will be unmapped
|
* @cmd: srp_cmd whose additional_data member will be unmapped
|
||||||
|
* @evt_struct: the event
|
||||||
* @dev: device for which the memory is mapped
|
* @dev: device for which the memory is mapped
|
||||||
*
|
*/
|
||||||
*/
|
|
||||||
static void unmap_cmd_data(struct srp_cmd *cmd,
|
static void unmap_cmd_data(struct srp_cmd *cmd,
|
||||||
struct srp_event_struct *evt_struct,
|
struct srp_event_struct *evt_struct,
|
||||||
struct device *dev)
|
struct device *dev)
|
||||||
|
@ -671,6 +670,7 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
|
||||||
/**
|
/**
|
||||||
* map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields
|
* map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields
|
||||||
* @cmd: struct scsi_cmnd with the scatterlist
|
* @cmd: struct scsi_cmnd with the scatterlist
|
||||||
|
* @evt_struct: struct srp_event_struct to map
|
||||||
* @srp_cmd: srp_cmd that contains the memory descriptor
|
* @srp_cmd: srp_cmd that contains the memory descriptor
|
||||||
* @dev: device for which to map dma memory
|
* @dev: device for which to map dma memory
|
||||||
*
|
*
|
||||||
|
@ -717,8 +717,7 @@ static int map_sg_data(struct scsi_cmnd *cmd,
|
||||||
|
|
||||||
/* get indirect table */
|
/* get indirect table */
|
||||||
if (!evt_struct->ext_list) {
|
if (!evt_struct->ext_list) {
|
||||||
evt_struct->ext_list = (struct srp_direct_buf *)
|
evt_struct->ext_list = dma_alloc_coherent(dev,
|
||||||
dma_alloc_coherent(dev,
|
|
||||||
SG_ALL * sizeof(struct srp_direct_buf),
|
SG_ALL * sizeof(struct srp_direct_buf),
|
||||||
&evt_struct->ext_list_token, 0);
|
&evt_struct->ext_list_token, 0);
|
||||||
if (!evt_struct->ext_list) {
|
if (!evt_struct->ext_list) {
|
||||||
|
@ -745,6 +744,7 @@ static int map_sg_data(struct scsi_cmnd *cmd,
|
||||||
/**
|
/**
|
||||||
* map_data_for_srp_cmd: - Calls functions to map data for srp cmds
|
* map_data_for_srp_cmd: - Calls functions to map data for srp cmds
|
||||||
* @cmd: struct scsi_cmnd with the memory to be mapped
|
* @cmd: struct scsi_cmnd with the memory to be mapped
|
||||||
|
* @evt_struct: struct srp_event_struct to map
|
||||||
* @srp_cmd: srp_cmd that contains the memory descriptor
|
* @srp_cmd: srp_cmd that contains the memory descriptor
|
||||||
* @dev: dma device for which to map dma memory
|
* @dev: dma device for which to map dma memory
|
||||||
*
|
*
|
||||||
|
@ -778,6 +778,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
|
||||||
/**
|
/**
|
||||||
* purge_requests: Our virtual adapter just shut down. purge any sent requests
|
* purge_requests: Our virtual adapter just shut down. purge any sent requests
|
||||||
* @hostdata: the adapter
|
* @hostdata: the adapter
|
||||||
|
* @error_code: error code to return as the 'result'
|
||||||
*/
|
*/
|
||||||
static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
|
static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
|
||||||
{
|
{
|
||||||
|
@ -838,7 +839,7 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvscsi_timeout - Internal command timeout handler
|
* ibmvscsi_timeout - Internal command timeout handler
|
||||||
* @evt_struct: struct srp_event_struct that timed out
|
* @t: struct srp_event_struct that timed out
|
||||||
*
|
*
|
||||||
* Called when an internally generated command times out
|
* Called when an internally generated command times out
|
||||||
*/
|
*/
|
||||||
|
@ -1034,8 +1035,8 @@ static inline u16 lun_from_dev(struct scsi_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ibmvscsi_queue: - The queuecommand function of the scsi template
|
* ibmvscsi_queuecommand_lck() - The queuecommand function of the scsi template
|
||||||
* @cmd: struct scsi_cmnd to be executed
|
* @cmnd: struct scsi_cmnd to be executed
|
||||||
* @done: Callback function to be called when cmd is completed
|
* @done: Callback function to be called when cmd is completed
|
||||||
*/
|
*/
|
||||||
static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
|
static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
|
||||||
|
@ -1342,7 +1343,7 @@ static void fast_fail_rsp(struct srp_event_struct *evt_struct)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* init_host - Start host initialization
|
* enable_fast_fail() - Start host initialization
|
||||||
* @hostdata: ibmvscsi_host_data of host
|
* @hostdata: ibmvscsi_host_data of host
|
||||||
*
|
*
|
||||||
* Returns zero if successful.
|
* Returns zero if successful.
|
||||||
|
@ -1456,16 +1457,15 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
|
||||||
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
spin_unlock_irqrestore(hostdata->host->host_lock, flags);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* init_adapter: Start virtual adapter initialization sequence
|
* init_adapter() - Start virtual adapter initialization sequence
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
static void init_adapter(struct ibmvscsi_host_data *hostdata)
|
static void init_adapter(struct ibmvscsi_host_data *hostdata)
|
||||||
{
|
{
|
||||||
send_mad_adapter_info(hostdata);
|
send_mad_adapter_info(hostdata);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* sync_completion: Signal that a synchronous command has completed
|
* sync_completion: Signal that a synchronous command has completed
|
||||||
* Note that after returning from this call, the evt_struct is freed.
|
* Note that after returning from this call, the evt_struct is freed.
|
||||||
* the caller waiting on this completion shouldn't touch the evt_struct
|
* the caller waiting on this completion shouldn't touch the evt_struct
|
||||||
|
@ -1480,8 +1480,8 @@ static void sync_completion(struct srp_event_struct *evt_struct)
|
||||||
complete(&evt_struct->comp);
|
complete(&evt_struct->comp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* ibmvscsi_abort: Abort a command...from scsi host template
|
* ibmvscsi_eh_abort_handler: Abort a command...from scsi host template
|
||||||
* send this over to the server and wait synchronously for the response
|
* send this over to the server and wait synchronously for the response
|
||||||
*/
|
*/
|
||||||
static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
|
static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
|
||||||
|
@ -1618,7 +1618,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
|
||||||
return SUCCESS;
|
return SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
|
* ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host
|
||||||
* template send this over to the server and wait synchronously for the
|
* template send this over to the server and wait synchronously for the
|
||||||
* response
|
* response
|
||||||
|
@ -1884,7 +1884,6 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
|
||||||
* ibmvscsi_change_queue_depth - Change the device's queue depth
|
* ibmvscsi_change_queue_depth - Change the device's queue depth
|
||||||
* @sdev: scsi device struct
|
* @sdev: scsi device struct
|
||||||
* @qdepth: depth to set
|
* @qdepth: depth to set
|
||||||
* @reason: calling context
|
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
* actual depth set
|
* actual depth set
|
||||||
|
@ -2214,7 +2213,7 @@ static int ibmvscsi_work(void *data)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* Called by bus code for each adapter
|
* Called by bus code for each adapter
|
||||||
*/
|
*/
|
||||||
static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||||
|
@ -2374,7 +2373,7 @@ static int ibmvscsi_resume(struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
|
* ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
|
||||||
* support.
|
* support.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -128,10 +128,10 @@ static bool connection_broken(struct scsi_info *vscsi)
|
||||||
* This function calls h_free_q then frees the interrupt bit etc.
|
* This function calls h_free_q then frees the interrupt bit etc.
|
||||||
* It must release the lock before doing so because of the time it can take
|
* It must release the lock before doing so because of the time it can take
|
||||||
* for h_free_crq in PHYP
|
* for h_free_crq in PHYP
|
||||||
* NOTE: the caller must make sure that state and or flags will prevent
|
* NOTE: * the caller must make sure that state and or flags will prevent
|
||||||
* interrupt handler from scheduling work.
|
* interrupt handler from scheduling work.
|
||||||
* NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
|
* * anyone calling this function may need to set the CRQ_CLOSED flag
|
||||||
* we can't do it here, because we don't have the lock
|
* we can't do it here, because we don't have the lock
|
||||||
*
|
*
|
||||||
* EXECUTION ENVIRONMENT:
|
* EXECUTION ENVIRONMENT:
|
||||||
* Process level
|
* Process level
|
||||||
|
@ -2670,7 +2670,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
|
||||||
u64 data_len = 0;
|
u64 data_len = 0;
|
||||||
enum dma_data_direction dir;
|
enum dma_data_direction dir;
|
||||||
int attr = 0;
|
int attr = 0;
|
||||||
int rc = 0;
|
|
||||||
|
|
||||||
nexus = vscsi->tport.ibmv_nexus;
|
nexus = vscsi->tport.ibmv_nexus;
|
||||||
/*
|
/*
|
||||||
|
@ -2725,17 +2724,9 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
|
||||||
|
|
||||||
srp->lun.scsi_lun[0] &= 0x3f;
|
srp->lun.scsi_lun[0] &= 0x3f;
|
||||||
|
|
||||||
rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
|
target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
|
||||||
cmd->sense_buf, scsilun_to_int(&srp->lun),
|
cmd->sense_buf, scsilun_to_int(&srp->lun),
|
||||||
data_len, attr, dir, 0);
|
data_len, attr, dir, 0);
|
||||||
if (rc) {
|
|
||||||
dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
|
|
||||||
spin_lock_bh(&vscsi->intr_lock);
|
|
||||||
list_del(&cmd->list);
|
|
||||||
ibmvscsis_free_cmd_resources(vscsi, cmd);
|
|
||||||
spin_unlock_bh(&vscsi->intr_lock);
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
|
|
|
@ -546,7 +546,6 @@ static int initio_reset_scsi(struct initio_host * host, int seconds)
|
||||||
/**
|
/**
|
||||||
* initio_init - set up an InitIO host adapter
|
* initio_init - set up an InitIO host adapter
|
||||||
* @host: InitIO host adapter
|
* @host: InitIO host adapter
|
||||||
* @num_scbs: Number of SCBS
|
|
||||||
* @bios_addr: BIOS address
|
* @bios_addr: BIOS address
|
||||||
*
|
*
|
||||||
* Set up the host adapter and devices according to the configuration
|
* Set up the host adapter and devices according to the configuration
|
||||||
|
@ -866,17 +865,16 @@ static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_b
|
||||||
|
|
||||||
struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
|
struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
|
||||||
{
|
{
|
||||||
struct scsi_ctrl_blk *tmp, *prev;
|
struct scsi_ctrl_blk *tmp;
|
||||||
u16 scbp_tarlun;
|
u16 scbp_tarlun;
|
||||||
|
|
||||||
|
|
||||||
prev = tmp = host->first_busy;
|
tmp = host->first_busy;
|
||||||
while (tmp != NULL) {
|
while (tmp != NULL) {
|
||||||
scbp_tarlun = (tmp->lun << 8) | (tmp->target);
|
scbp_tarlun = (tmp->lun << 8) | (tmp->target);
|
||||||
if (scbp_tarlun == tarlun) { /* Unlink this SCB */
|
if (scbp_tarlun == tarlun) { /* Unlink this SCB */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
prev = tmp;
|
|
||||||
tmp = tmp->next;
|
tmp = tmp->next;
|
||||||
}
|
}
|
||||||
#if DEBUG_QUEUE
|
#if DEBUG_QUEUE
|
||||||
|
@ -1888,7 +1886,7 @@ static int int_initio_scsi_rst(struct initio_host * host)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* int_initio_scsi_resel - Reselection occurred
|
* int_initio_resel - Reselection occurred
|
||||||
* @host: InitIO host adapter
|
* @host: InitIO host adapter
|
||||||
*
|
*
|
||||||
* A SCSI reselection event has been signalled and the interrupt
|
* A SCSI reselection event has been signalled and the interrupt
|
||||||
|
@ -2602,7 +2600,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i91u_queuecommand - Queue a new command if possible
|
* i91u_queuecommand_lck - Queue a new command if possible
|
||||||
* @cmd: SCSI command block from the mid layer
|
* @cmd: SCSI command block from the mid layer
|
||||||
* @done: Completion handler
|
* @done: Completion handler
|
||||||
*
|
*
|
||||||
|
@ -2651,9 +2649,9 @@ static int i91u_bus_reset(struct scsi_cmnd * cmnd)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i91u_biospararm - return the "logical geometry
|
* i91u_biosparam - return the "logical geometry
|
||||||
* @sdev: SCSI device
|
* @sdev: SCSI device
|
||||||
* @dev; Matching block device
|
* @dev: Matching block device
|
||||||
* @capacity: Sector size of drive
|
* @capacity: Sector size of drive
|
||||||
* @info_array: Return space for BIOS geometry
|
* @info_array: Return space for BIOS geometry
|
||||||
*
|
*
|
||||||
|
@ -2728,10 +2726,8 @@ static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* i91uSCBPost - SCSI callback
|
* i91uSCBPost - SCSI callback
|
||||||
* @host: Pointer to host adapter control block.
|
|
||||||
* @cmnd: Pointer to SCSI control block.
|
|
||||||
*
|
*
|
||||||
* This is callback routine be called when tulip finish one
|
* This is callback routine be called when tulip finish one
|
||||||
* SCSI command.
|
* SCSI command.
|
||||||
|
|
|
@ -5321,7 +5321,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ipr_eh_dev_reset - Reset the device
|
* __ipr_eh_dev_reset - Reset the device
|
||||||
* @scsi_cmd: scsi command struct
|
* @scsi_cmd: scsi command struct
|
||||||
*
|
*
|
||||||
* This function issues a device reset to the affected device.
|
* This function issues a device reset to the affected device.
|
||||||
|
@ -5583,7 +5583,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ipr_eh_abort - Abort a single op
|
* ipr_scan_finished - Report whether scan is done
|
||||||
* @shost: scsi host struct
|
* @shost: scsi host struct
|
||||||
* @elapsed_time: elapsed time
|
* @elapsed_time: elapsed time
|
||||||
*
|
*
|
||||||
|
@ -5606,7 +5606,7 @@ static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ipr_eh_host_reset - Reset the host adapter
|
* ipr_eh_abort - Reset the host adapter
|
||||||
* @scsi_cmd: scsi command struct
|
* @scsi_cmd: scsi command struct
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
|
@ -6715,7 +6715,7 @@ static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ipr_info - Get information about the card/driver
|
* ipr_ioa_info - Get information about the card/driver
|
||||||
* @host: scsi host struct
|
* @host: scsi host struct
|
||||||
*
|
*
|
||||||
* Return value:
|
* Return value:
|
||||||
|
|
|
@ -89,16 +89,14 @@
|
||||||
|
|
||||||
#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
|
#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
|
||||||
|
|
||||||
/**
|
/*
|
||||||
*
|
|
||||||
*
|
|
||||||
* The number of milliseconds to wait while a given phy is consuming power
|
* The number of milliseconds to wait while a given phy is consuming power
|
||||||
* before allowing another set of phys to consume power. Ultimately, this will
|
* before allowing another set of phys to consume power. Ultimately, this will
|
||||||
* be specified by OEM parameter.
|
* be specified by OEM parameter.
|
||||||
*/
|
*/
|
||||||
#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
|
#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* NORMALIZE_PUT_POINTER() -
|
* NORMALIZE_PUT_POINTER() -
|
||||||
*
|
*
|
||||||
* This macro will normalize the completion queue put pointer so its value can
|
* This macro will normalize the completion queue put pointer so its value can
|
||||||
|
@ -108,7 +106,7 @@
|
||||||
((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
|
((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
|
||||||
|
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* NORMALIZE_EVENT_POINTER() -
|
* NORMALIZE_EVENT_POINTER() -
|
||||||
*
|
*
|
||||||
* This macro will normalize the completion queue event entry so its value can
|
* This macro will normalize the completion queue event entry so its value can
|
||||||
|
@ -120,7 +118,7 @@
|
||||||
>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
|
>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
|
||||||
)
|
)
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* NORMALIZE_GET_POINTER() -
|
* NORMALIZE_GET_POINTER() -
|
||||||
*
|
*
|
||||||
* This macro will normalize the completion queue get pointer so its value can
|
* This macro will normalize the completion queue get pointer so its value can
|
||||||
|
@ -129,7 +127,7 @@
|
||||||
#define NORMALIZE_GET_POINTER(x) \
|
#define NORMALIZE_GET_POINTER(x) \
|
||||||
((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
|
((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* NORMALIZE_GET_POINTER_CYCLE_BIT() -
|
* NORMALIZE_GET_POINTER_CYCLE_BIT() -
|
||||||
*
|
*
|
||||||
* This macro will normalize the completion queue cycle pointer so it matches
|
* This macro will normalize the completion queue cycle pointer so it matches
|
||||||
|
@ -138,7 +136,7 @@
|
||||||
#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
|
#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
|
||||||
((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
|
((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* COMPLETION_QUEUE_CYCLE_BIT() -
|
* COMPLETION_QUEUE_CYCLE_BIT() -
|
||||||
*
|
*
|
||||||
* This macro will return the cycle bit of the completion queue entry
|
* This macro will return the cycle bit of the completion queue entry
|
||||||
|
@ -637,7 +635,7 @@ irqreturn_t isci_error_isr(int vec, void *data)
|
||||||
/**
|
/**
|
||||||
* isci_host_start_complete() - This function is called by the core library,
|
* isci_host_start_complete() - This function is called by the core library,
|
||||||
* through the ISCI Module, to indicate controller start status.
|
* through the ISCI Module, to indicate controller start status.
|
||||||
* @isci_host: This parameter specifies the ISCI host object
|
* @ihost: This parameter specifies the ISCI host object
|
||||||
* @completion_status: This parameter specifies the completion status from the
|
* @completion_status: This parameter specifies the completion status from the
|
||||||
* core library.
|
* core library.
|
||||||
*
|
*
|
||||||
|
@ -670,7 +668,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
|
||||||
* use any timeout value, but this method provides the suggested minimum
|
* use any timeout value, but this method provides the suggested minimum
|
||||||
* start timeout value. The returned value is based upon empirical
|
* start timeout value. The returned value is based upon empirical
|
||||||
* information determined as a result of interoperability testing.
|
* information determined as a result of interoperability testing.
|
||||||
* @controller: the handle to the controller object for which to return the
|
* @ihost: the handle to the controller object for which to return the
|
||||||
* suggested start timeout.
|
* suggested start timeout.
|
||||||
*
|
*
|
||||||
* This method returns the number of milliseconds for the suggested start
|
* This method returns the number of milliseconds for the suggested start
|
||||||
|
@ -893,7 +891,7 @@ bool is_controller_start_complete(struct isci_host *ihost)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sci_controller_start_next_phy - start phy
|
* sci_controller_start_next_phy - start phy
|
||||||
* @scic: controller
|
* @ihost: controller
|
||||||
*
|
*
|
||||||
* If all the phys have been started, then attempt to transition the
|
* If all the phys have been started, then attempt to transition the
|
||||||
* controller to the READY state and inform the user
|
* controller to the READY state and inform the user
|
||||||
|
@ -1145,7 +1143,7 @@ void isci_host_completion_routine(unsigned long data)
|
||||||
* controller has been quiesced. This method will ensure that all IO
|
* controller has been quiesced. This method will ensure that all IO
|
||||||
* requests are quiesced, phys are stopped, and all additional operation by
|
* requests are quiesced, phys are stopped, and all additional operation by
|
||||||
* the hardware is halted.
|
* the hardware is halted.
|
||||||
* @controller: the handle to the controller object to stop.
|
* @ihost: the handle to the controller object to stop.
|
||||||
* @timeout: This parameter specifies the number of milliseconds in which the
|
* @timeout: This parameter specifies the number of milliseconds in which the
|
||||||
* stop operation should complete.
|
* stop operation should complete.
|
||||||
*
|
*
|
||||||
|
@ -1174,7 +1172,7 @@ static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
|
||||||
* considered destructive. In other words, all current operations are wiped
|
* considered destructive. In other words, all current operations are wiped
|
||||||
* out. No IO completions for outstanding devices occur. Outstanding IO
|
* out. No IO completions for outstanding devices occur. Outstanding IO
|
||||||
* requests are not aborted or completed at the actual remote device.
|
* requests are not aborted or completed at the actual remote device.
|
||||||
* @controller: the handle to the controller object to reset.
|
* @ihost: the handle to the controller object to reset.
|
||||||
*
|
*
|
||||||
* Indicate if the controller reset method succeeded or failed in some way.
|
* Indicate if the controller reset method succeeded or failed in some way.
|
||||||
* SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
|
* SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
|
||||||
|
@ -1331,7 +1329,7 @@ static inline void sci_controller_starting_state_exit(struct sci_base_state_mach
|
||||||
/**
|
/**
|
||||||
* sci_controller_set_interrupt_coalescence() - This method allows the user to
|
* sci_controller_set_interrupt_coalescence() - This method allows the user to
|
||||||
* configure the interrupt coalescence.
|
* configure the interrupt coalescence.
|
||||||
* @controller: This parameter represents the handle to the controller object
|
* @ihost: This parameter represents the handle to the controller object
|
||||||
* for which its interrupt coalesce register is overridden.
|
* for which its interrupt coalesce register is overridden.
|
||||||
* @coalesce_number: Used to control the number of entries in the Completion
|
* @coalesce_number: Used to control the number of entries in the Completion
|
||||||
* Queue before an interrupt is generated. If the number of entries exceed
|
* Queue before an interrupt is generated. If the number of entries exceed
|
||||||
|
@ -2479,12 +2477,13 @@ struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* sci_controller_allocate_remote_node_context()
|
||||||
* This method allocates remote node index and the reserves the remote node
|
* This method allocates remote node index and the reserves the remote node
|
||||||
* context space for use. This method can fail if there are no more remote
|
* context space for use. This method can fail if there are no more remote
|
||||||
* node index available.
|
* node index available.
|
||||||
* @scic: This is the controller object which contains the set of
|
* @ihost: This is the controller object which contains the set of
|
||||||
* free remote node ids
|
* free remote node ids
|
||||||
* @sci_dev: This is the device object which is requesting the a remote node
|
* @idev: This is the device object which is requesting the a remote node
|
||||||
* id
|
* id
|
||||||
* @node_id: This is the remote node id that is assinged to the device if one
|
* @node_id: This is the remote node id that is assinged to the device if one
|
||||||
* is available
|
* is available
|
||||||
|
@ -2709,11 +2708,11 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
|
||||||
/**
|
/**
|
||||||
* sci_controller_start_task() - This method is called by the SCIC user to
|
* sci_controller_start_task() - This method is called by the SCIC user to
|
||||||
* send/start a framework task management request.
|
* send/start a framework task management request.
|
||||||
* @controller: the handle to the controller object for which to start the task
|
* @ihost: the handle to the controller object for which to start the task
|
||||||
* management request.
|
* management request.
|
||||||
* @remote_device: the handle to the remote device object for which to start
|
* @idev: the handle to the remote device object for which to start
|
||||||
* the task management request.
|
* the task management request.
|
||||||
* @task_request: the handle to the task request object to start.
|
* @ireq: the handle to the task request object to start.
|
||||||
*/
|
*/
|
||||||
enum sci_status sci_controller_start_task(struct isci_host *ihost,
|
enum sci_status sci_controller_start_task(struct isci_host *ihost,
|
||||||
struct isci_remote_device *idev,
|
struct isci_remote_device *idev,
|
||||||
|
|
|
@ -339,10 +339,11 @@ static void phy_sata_timeout(struct timer_list *t)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method returns the port currently containing this phy. If the phy is
|
* phy_get_non_dummy_port() - This method returns the port currently containing
|
||||||
* currently contained by the dummy port, then the phy is considered to not
|
* this phy. If the phy is currently contained by the dummy port, then the phy
|
||||||
* be part of a port.
|
* is considered to not be part of a port.
|
||||||
* @sci_phy: This parameter specifies the phy for which to retrieve the
|
*
|
||||||
|
* @iphy: This parameter specifies the phy for which to retrieve the
|
||||||
* containing port.
|
* containing port.
|
||||||
*
|
*
|
||||||
* This method returns a handle to a port that contains the supplied phy.
|
* This method returns a handle to a port that contains the supplied phy.
|
||||||
|
@ -360,12 +361,8 @@ struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
|
||||||
return iphy->owning_port;
|
return iphy->owning_port;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* This method will assign a port to the phy object.
|
* sci_phy_set_port() - This method will assign a port to the phy object.
|
||||||
* @out]: iphy This parameter specifies the phy for which to assign a port
|
|
||||||
* object.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
void sci_phy_set_port(
|
void sci_phy_set_port(
|
||||||
struct isci_phy *iphy,
|
struct isci_phy *iphy,
|
||||||
|
@ -398,11 +395,11 @@ enum sci_status sci_phy_initialize(struct isci_phy *iphy,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method assigns the direct attached device ID for this phy.
|
* sci_phy_setup_transport() - This method assigns the direct attached device ID for this phy.
|
||||||
*
|
*
|
||||||
* @iphy The phy for which the direct attached device id is to
|
* @iphy: The phy for which the direct attached device id is to
|
||||||
* be assigned.
|
* be assigned.
|
||||||
* @device_id The direct attached device ID to assign to the phy.
|
* @device_id: The direct attached device ID to assign to the phy.
|
||||||
* This will either be the RNi for the device or an invalid RNi if there
|
* This will either be the RNi for the device or an invalid RNi if there
|
||||||
* is no current device assigned to the phy.
|
* is no current device assigned to the phy.
|
||||||
*/
|
*/
|
||||||
|
@ -597,7 +594,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
|
||||||
/**
|
/**
|
||||||
* sci_phy_complete_link_training - perform processing common to
|
* sci_phy_complete_link_training - perform processing common to
|
||||||
* all protocols upon completion of link training.
|
* all protocols upon completion of link training.
|
||||||
* @sci_phy: This parameter specifies the phy object for which link training
|
* @iphy: This parameter specifies the phy object for which link training
|
||||||
* has completed.
|
* has completed.
|
||||||
* @max_link_rate: This parameter specifies the maximum link rate to be
|
* @max_link_rate: This parameter specifies the maximum link rate to be
|
||||||
* associated with this phy.
|
* associated with this phy.
|
||||||
|
@ -1167,8 +1164,8 @@ static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* scu_link_layer_stop_protocol_engine()
|
||||||
* @sci_phy: This is the struct isci_phy object to stop.
|
* @iphy: This is the struct isci_phy object to stop.
|
||||||
*
|
*
|
||||||
* This method will stop the struct isci_phy object. This does not reset the
|
* This method will stop the struct isci_phy object. This does not reset the
|
||||||
* protocol engine it just suspends it and places it in a state where it will
|
* protocol engine it just suspends it and places it in a state where it will
|
||||||
|
@ -1219,7 +1216,8 @@ static void scu_link_layer_start_oob(struct isci_phy *iphy)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* scu_link_layer_tx_hard_reset()
|
||||||
|
* @iphy: This is the struct isci_phy object to stop.
|
||||||
*
|
*
|
||||||
* This method will transmit a hard reset request on the specified phy. The SCU
|
* This method will transmit a hard reset request on the specified phy. The SCU
|
||||||
* hardware requires that we reset the OOB state machine and set the hard reset
|
* hardware requires that we reset the OOB state machine and set the hard reset
|
||||||
|
@ -1420,7 +1418,7 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
|
||||||
/**
|
/**
|
||||||
* isci_phy_control() - This function is one of the SAS Domain Template
|
* isci_phy_control() - This function is one of the SAS Domain Template
|
||||||
* functions. This is a phy management function.
|
* functions. This is a phy management function.
|
||||||
* @phy: This parameter specifies the sphy being controlled.
|
* @sas_phy: This parameter specifies the sphy being controlled.
|
||||||
* @func: This parameter specifies the phy control function being invoked.
|
* @func: This parameter specifies the phy control function being invoked.
|
||||||
* @buf: This parameter is specific to the phy function being invoked.
|
* @buf: This parameter is specific to the phy function being invoked.
|
||||||
*
|
*
|
||||||
|
|
|
@ -447,7 +447,6 @@ void sci_phy_get_attached_sas_address(
|
||||||
struct isci_phy *iphy,
|
struct isci_phy *iphy,
|
||||||
struct sci_sas_address *sas_address);
|
struct sci_sas_address *sas_address);
|
||||||
|
|
||||||
struct sci_phy_proto;
|
|
||||||
void sci_phy_get_protocols(
|
void sci_phy_get_protocols(
|
||||||
struct isci_phy *iphy,
|
struct isci_phy *iphy,
|
||||||
struct sci_phy_proto *protocols);
|
struct sci_phy_proto *protocols);
|
||||||
|
|
|
@ -62,7 +62,7 @@
|
||||||
|
|
||||||
#undef C
|
#undef C
|
||||||
#define C(a) (#a)
|
#define C(a) (#a)
|
||||||
const char *port_state_name(enum sci_port_states state)
|
static const char *port_state_name(enum sci_port_states state)
|
||||||
{
|
{
|
||||||
static const char * const strings[] = PORT_STATES;
|
static const char * const strings[] = PORT_STATES;
|
||||||
|
|
||||||
|
@ -115,9 +115,9 @@ static u32 sci_port_get_phys(struct isci_port *iport)
|
||||||
/**
|
/**
|
||||||
* sci_port_get_properties() - This method simply returns the properties
|
* sci_port_get_properties() - This method simply returns the properties
|
||||||
* regarding the port, such as: physical index, protocols, sas address, etc.
|
* regarding the port, such as: physical index, protocols, sas address, etc.
|
||||||
* @port: this parameter specifies the port for which to retrieve the physical
|
* @iport: this parameter specifies the port for which to retrieve the physical
|
||||||
* index.
|
* index.
|
||||||
* @properties: This parameter specifies the properties structure into which to
|
* @prop: This parameter specifies the properties structure into which to
|
||||||
* copy the requested information.
|
* copy the requested information.
|
||||||
*
|
*
|
||||||
* Indicate if the user specified a valid port. SCI_SUCCESS This value is
|
* Indicate if the user specified a valid port. SCI_SUCCESS This value is
|
||||||
|
@ -233,8 +233,8 @@ static void isci_port_link_up(struct isci_host *isci_host,
|
||||||
* isci_port_link_down() - This function is called by the sci core when a link
|
* isci_port_link_down() - This function is called by the sci core when a link
|
||||||
* becomes inactive.
|
* becomes inactive.
|
||||||
* @isci_host: This parameter specifies the isci host object.
|
* @isci_host: This parameter specifies the isci host object.
|
||||||
* @phy: This parameter specifies the isci phy with the active link.
|
* @isci_phy: This parameter specifies the isci phy with the active link.
|
||||||
* @port: This parameter specifies the isci port with the active link.
|
* @isci_port: This parameter specifies the isci port with the active link.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static void isci_port_link_down(struct isci_host *isci_host,
|
static void isci_port_link_down(struct isci_host *isci_host,
|
||||||
|
@ -308,7 +308,7 @@ static void port_state_machine_change(struct isci_port *iport,
|
||||||
/**
|
/**
|
||||||
* isci_port_hard_reset_complete() - This function is called by the sci core
|
* isci_port_hard_reset_complete() - This function is called by the sci core
|
||||||
* when the hard reset complete notification has been received.
|
* when the hard reset complete notification has been received.
|
||||||
* @port: This parameter specifies the sci port with the active link.
|
* @isci_port: This parameter specifies the sci port with the active link.
|
||||||
* @completion_status: This parameter specifies the core status for the reset
|
* @completion_status: This parameter specifies the core status for the reset
|
||||||
* process.
|
* process.
|
||||||
*
|
*
|
||||||
|
@ -395,9 +395,10 @@ bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_port_is_phy_mask_valid()
|
||||||
* @sci_port: This is the port object for which to determine if the phy mask
|
* @iport: This is the port object for which to determine if the phy mask
|
||||||
* can be supported.
|
* can be supported.
|
||||||
|
* @phy_mask: Phy mask belonging to this port
|
||||||
*
|
*
|
||||||
* This method will return a true value if the port's phy mask can be supported
|
* This method will return a true value if the port's phy mask can be supported
|
||||||
* by the SCU. The following is a list of valid PHY mask configurations for
|
* by the SCU. The following is a list of valid PHY mask configurations for
|
||||||
|
@ -533,7 +534,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a
|
||||||
/**
|
/**
|
||||||
* sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
|
* sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
|
||||||
*
|
*
|
||||||
* @sci_port: logical port on which we need to create the remote node context
|
* @iport: logical port on which we need to create the remote node context
|
||||||
* @rni: remote node index for this remote node context.
|
* @rni: remote node index for this remote node context.
|
||||||
*
|
*
|
||||||
* This routine will construct a dummy remote node context data structure
|
* This routine will construct a dummy remote node context data structure
|
||||||
|
@ -677,8 +678,8 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sci_port_general_link_up_handler - phy can be assigned to port?
|
* sci_port_general_link_up_handler - phy can be assigned to port?
|
||||||
* @sci_port: sci_port object for which has a phy that has gone link up.
|
* @iport: sci_port object for which has a phy that has gone link up.
|
||||||
* @sci_phy: This is the struct isci_phy object that has gone link up.
|
* @iphy: This is the struct isci_phy object that has gone link up.
|
||||||
* @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
|
* @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
|
||||||
*
|
*
|
||||||
* Determine if this phy can be assigned to this port . If the phy is
|
* Determine if this phy can be assigned to this port . If the phy is
|
||||||
|
@ -716,10 +717,11 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* sci_port_is_wide()
|
||||||
* This method returns false if the port only has a single phy object assigned.
|
* This method returns false if the port only has a single phy object assigned.
|
||||||
* If there are no phys or more than one phy then the method will return
|
* If there are no phys or more than one phy then the method will return
|
||||||
* true.
|
* true.
|
||||||
* @sci_port: The port for which the wide port condition is to be checked.
|
* @iport: The port for which the wide port condition is to be checked.
|
||||||
*
|
*
|
||||||
* bool true Is returned if this is a wide ported port. false Is returned if
|
* bool true Is returned if this is a wide ported port. false Is returned if
|
||||||
* this is a narrow port.
|
* this is a narrow port.
|
||||||
|
@ -739,12 +741,13 @@ static bool sci_port_is_wide(struct isci_port *iport)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* sci_port_link_detected()
|
||||||
* This method is called by the PHY object when the link is detected. if the
|
* This method is called by the PHY object when the link is detected. if the
|
||||||
* port wants the PHY to continue on to the link up state then the port
|
* port wants the PHY to continue on to the link up state then the port
|
||||||
* layer must return true. If the port object returns false the phy object
|
* layer must return true. If the port object returns false the phy object
|
||||||
* must halt its attempt to go link up.
|
* must halt its attempt to go link up.
|
||||||
* @sci_port: The port associated with the phy object.
|
* @iport: The port associated with the phy object.
|
||||||
* @sci_phy: The phy object that is trying to go link up.
|
* @iphy: The phy object that is trying to go link up.
|
||||||
*
|
*
|
||||||
* true if the phy object can continue to the link up condition. true Is
|
* true if the phy object can continue to the link up condition. true Is
|
||||||
* returned if this phy can continue to the ready state. false Is returned if
|
* returned if this phy can continue to the ready state. false Is returned if
|
||||||
|
@ -817,10 +820,8 @@ static void port_timeout(struct timer_list *t)
|
||||||
|
|
||||||
/* --------------------------------------------------------------------------- */
|
/* --------------------------------------------------------------------------- */
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* This function updates the hardwares VIIT entry for this port.
|
* This function updates the hardwares VIIT entry for this port.
|
||||||
*
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
static void sci_port_update_viit_entry(struct isci_port *iport)
|
static void sci_port_update_viit_entry(struct isci_port *iport)
|
||||||
{
|
{
|
||||||
|
@ -874,7 +875,7 @@ static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sci_port_post_dummy_request() - post dummy/workaround request
|
* sci_port_post_dummy_request() - post dummy/workaround request
|
||||||
* @sci_port: port to post task
|
* @iport: port to post task
|
||||||
*
|
*
|
||||||
* Prevent the hardware scheduler from posting new requests to the front
|
* Prevent the hardware scheduler from posting new requests to the front
|
||||||
* of the scheduler queue causing a starvation problem for currently
|
* of the scheduler queue causing a starvation problem for currently
|
||||||
|
@ -899,10 +900,11 @@ static void sci_port_post_dummy_request(struct isci_port *iport)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This routine will abort the dummy request. This will alow the hardware to
|
* sci_port_abort_dummy_request()
|
||||||
|
* This routine will abort the dummy request. This will allow the hardware to
|
||||||
* power down parts of the silicon to save power.
|
* power down parts of the silicon to save power.
|
||||||
*
|
*
|
||||||
* @sci_port: The port on which the task must be aborted.
|
* @iport: The port on which the task must be aborted.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static void sci_port_abort_dummy_request(struct isci_port *iport)
|
static void sci_port_abort_dummy_request(struct isci_port *iport)
|
||||||
|
@ -923,8 +925,8 @@ static void sci_port_abort_dummy_request(struct isci_port *iport)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_port_resume_port_task_scheduler()
|
||||||
* @sci_port: This is the struct isci_port object to resume.
|
* @iport: This is the struct isci_port object to resume.
|
||||||
*
|
*
|
||||||
* This method will resume the port task scheduler for this port object. none
|
* This method will resume the port task scheduler for this port object. none
|
||||||
*/
|
*/
|
||||||
|
@ -1014,8 +1016,8 @@ static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_port_ready_substate_operational_exit()
|
||||||
* @object: This is the object which is cast to a struct isci_port object.
|
* @sm: This is the object which is cast to a struct isci_port object.
|
||||||
*
|
*
|
||||||
* This method will perform the actions required by the struct isci_port on
|
* This method will perform the actions required by the struct isci_port on
|
||||||
* exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
|
* exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
|
||||||
|
@ -1186,9 +1188,9 @@ static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sci_port_add_phy() -
|
* sci_port_add_phy()
|
||||||
* @sci_port: This parameter specifies the port in which the phy will be added.
|
* @iport: This parameter specifies the port in which the phy will be added.
|
||||||
* @sci_phy: This parameter is the phy which is to be added to the port.
|
* @iphy: This parameter is the phy which is to be added to the port.
|
||||||
*
|
*
|
||||||
* This method will add a PHY to the selected port. This method returns an
|
* This method will add a PHY to the selected port. This method returns an
|
||||||
* enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
|
* enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
|
||||||
|
@ -1257,9 +1259,9 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sci_port_remove_phy() -
|
* sci_port_remove_phy()
|
||||||
* @sci_port: This parameter specifies the port in which the phy will be added.
|
* @iport: This parameter specifies the port in which the phy will be added.
|
||||||
* @sci_phy: This parameter is the phy which is to be added to the port.
|
* @iphy: This parameter is the phy which is to be added to the port.
|
||||||
*
|
*
|
||||||
* This method will remove the PHY from the selected PORT. This method returns
|
* This method will remove the PHY from the selected PORT. This method returns
|
||||||
* an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
|
* an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
|
||||||
|
|
|
@ -73,7 +73,7 @@ enum SCIC_SDS_APC_ACTIVITY {
|
||||||
* ****************************************************************************** */
|
* ****************************************************************************** */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_sas_address_compare()
|
||||||
* @address_one: A SAS Address to be compared.
|
* @address_one: A SAS Address to be compared.
|
||||||
* @address_two: A SAS Address to be compared.
|
* @address_two: A SAS Address to be compared.
|
||||||
*
|
*
|
||||||
|
@ -102,9 +102,9 @@ static s32 sci_sas_address_compare(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_port_configuration_agent_find_port()
|
||||||
* @controller: The controller object used for the port search.
|
* @ihost: The controller object used for the port search.
|
||||||
* @phy: The phy object to match.
|
* @iphy: The phy object to match.
|
||||||
*
|
*
|
||||||
* This routine will find a matching port for the phy. This means that the
|
* This routine will find a matching port for the phy. This means that the
|
||||||
* port and phy both have the same broadcast sas address and same received sas
|
* port and phy both have the same broadcast sas address and same received sas
|
||||||
|
@ -145,8 +145,8 @@ static struct isci_port *sci_port_configuration_agent_find_port(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_port_configuration_agent_validate_ports()
|
||||||
* @controller: This is the controller object that contains the port agent
|
* @ihost: This is the controller object that contains the port agent
|
||||||
* @port_agent: This is the port configuration agent for the controller.
|
* @port_agent: This is the port configuration agent for the controller.
|
||||||
*
|
*
|
||||||
* This routine will validate the port configuration is correct for the SCU
|
* This routine will validate the port configuration is correct for the SCU
|
||||||
|
@ -373,15 +373,16 @@ static void sci_mpc_agent_link_up(struct isci_host *ihost,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_mpc_agent_link_down()
|
||||||
* @controller: This is the controller object that receives the link down
|
* @ihost: This is the controller object that receives the link down
|
||||||
* notification.
|
* notification.
|
||||||
* @port: This is the port object associated with the phy. If the is no
|
* @port_agent: This is the port configuration agent for the controller.
|
||||||
|
* @iport: This is the port object associated with the phy. If the is no
|
||||||
* associated port this is an NULL. The port is an invalid
|
* associated port this is an NULL. The port is an invalid
|
||||||
* handle only if the phy was never port of this port. This happens when
|
* handle only if the phy was never port of this port. This happens when
|
||||||
* the phy is not broadcasting the same SAS address as the other phys in the
|
* the phy is not broadcasting the same SAS address as the other phys in the
|
||||||
* assigned port.
|
* assigned port.
|
||||||
* @phy: This is the phy object which has gone link down.
|
* @iphy: This is the phy object which has gone link down.
|
||||||
*
|
*
|
||||||
* This function handles the manual port configuration link down notifications.
|
* This function handles the manual port configuration link down notifications.
|
||||||
* Since all ports and phys are associated at initialization time we just turn
|
* Since all ports and phys are associated at initialization time we just turn
|
||||||
|
@ -590,11 +591,12 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sci_apc_agent_link_up - handle apc link up events
|
* sci_apc_agent_link_up - handle apc link up events
|
||||||
* @scic: This is the controller object that receives the link up
|
* @ihost: This is the controller object that receives the link up
|
||||||
* notification.
|
* notification.
|
||||||
* @sci_port: This is the port object associated with the phy. If the is no
|
* @port_agent: This is the port configuration agent for the controller.
|
||||||
|
* @iport: This is the port object associated with the phy. If the is no
|
||||||
* associated port this is an NULL.
|
* associated port this is an NULL.
|
||||||
* @sci_phy: This is the phy object which has gone link up.
|
* @iphy: This is the phy object which has gone link up.
|
||||||
*
|
*
|
||||||
* This method handles the automatic port configuration for link up
|
* This method handles the automatic port configuration for link up
|
||||||
* notifications. Is it possible to get a link down notification from a phy
|
* notifications. Is it possible to get a link down notification from a phy
|
||||||
|
@ -620,9 +622,10 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_apc_agent_link_down()
|
||||||
* @controller: This is the controller object that receives the link down
|
* @ihost: This is the controller object that receives the link down
|
||||||
* notification.
|
* notification.
|
||||||
|
* @port_agent: This is the port configuration agent for the controller.
|
||||||
* @iport: This is the port object associated with the phy. If the is no
|
* @iport: This is the port object associated with the phy. If the is no
|
||||||
* associated port this is an NULL.
|
* associated port this is an NULL.
|
||||||
* @iphy: This is the phy object which has gone link down.
|
* @iphy: This is the phy object which has gone link down.
|
||||||
|
@ -697,9 +700,7 @@ static void apc_agent_timeout(struct timer_list *t)
|
||||||
* Public port configuration agent routines
|
* Public port configuration agent routines
|
||||||
* ****************************************************************************** */
|
* ****************************************************************************** */
|
||||||
|
|
||||||
/**
|
/*
|
||||||
*
|
|
||||||
*
|
|
||||||
* This method will construct the port configuration agent for operation. This
|
* This method will construct the port configuration agent for operation. This
|
||||||
* call is universal for both manual port configuration and automatic port
|
* call is universal for both manual port configuration and automatic port
|
||||||
* configuration modes.
|
* configuration modes.
|
||||||
|
|
|
@ -288,8 +288,9 @@ enum sci_status isci_remote_device_terminate_requests(
|
||||||
* isci_remote_device_not_ready() - This function is called by the ihost when
|
* isci_remote_device_not_ready() - This function is called by the ihost when
|
||||||
* the remote device is not ready. We mark the isci device as ready (not
|
* the remote device is not ready. We mark the isci device as ready (not
|
||||||
* "ready_for_io") and signal the waiting proccess.
|
* "ready_for_io") and signal the waiting proccess.
|
||||||
* @isci_host: This parameter specifies the isci host object.
|
* @ihost: This parameter specifies the isci host object.
|
||||||
* @isci_device: This parameter specifies the remote device
|
* @idev: This parameter specifies the remote device
|
||||||
|
* @reason: Reason to switch on
|
||||||
*
|
*
|
||||||
* sci_lock is held on entrance to this function.
|
* sci_lock is held on entrance to this function.
|
||||||
*/
|
*/
|
||||||
|
@ -1000,7 +1001,7 @@ static void sci_remote_device_initial_state_enter(struct sci_base_state_machine
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sci_remote_device_destruct() - free remote node context and destruct
|
* sci_remote_device_destruct() - free remote node context and destruct
|
||||||
* @remote_device: This parameter specifies the remote device to be destructed.
|
* @idev: This parameter specifies the remote device to be destructed.
|
||||||
*
|
*
|
||||||
* Remote device objects are a limited resource. As such, they must be
|
* Remote device objects are a limited resource. As such, they must be
|
||||||
* protected. Thus calls to construct and destruct are mutually exclusive and
|
* protected. Thus calls to construct and destruct are mutually exclusive and
|
||||||
|
@ -1236,8 +1237,8 @@ static const struct sci_base_state sci_remote_device_state_table[] = {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* sci_remote_device_construct() - common construction
|
* sci_remote_device_construct() - common construction
|
||||||
* @sci_port: SAS/SATA port through which this device is accessed.
|
* @iport: SAS/SATA port through which this device is accessed.
|
||||||
* @sci_dev: remote device to construct
|
* @idev: remote device to construct
|
||||||
*
|
*
|
||||||
* This routine just performs benign initialization and does not
|
* This routine just performs benign initialization and does not
|
||||||
* allocate the remote_node_context which is left to
|
* allocate the remote_node_context which is left to
|
||||||
|
@ -1256,7 +1257,7 @@ static void sci_remote_device_construct(struct isci_port *iport,
|
||||||
SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
|
SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* sci_remote_device_da_construct() - construct direct attached device.
|
* sci_remote_device_da_construct() - construct direct attached device.
|
||||||
*
|
*
|
||||||
* The information (e.g. IAF, Signature FIS, etc.) necessary to build
|
* The information (e.g. IAF, Signature FIS, etc.) necessary to build
|
||||||
|
@ -1294,7 +1295,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
|
||||||
return SCI_SUCCESS;
|
return SCI_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* sci_remote_device_ea_construct() - construct expander attached device
|
* sci_remote_device_ea_construct() - construct expander attached device
|
||||||
*
|
*
|
||||||
* Remote node context(s) is/are a global resource allocated by this
|
* Remote node context(s) is/are a global resource allocated by this
|
||||||
|
@ -1384,7 +1385,7 @@ static bool isci_remote_device_test_resume_done(
|
||||||
return done;
|
return done;
|
||||||
}
|
}
|
||||||
|
|
||||||
void isci_remote_device_wait_for_resume_from_abort(
|
static void isci_remote_device_wait_for_resume_from_abort(
|
||||||
struct isci_host *ihost,
|
struct isci_host *ihost,
|
||||||
struct isci_remote_device *idev)
|
struct isci_remote_device *idev)
|
||||||
{
|
{
|
||||||
|
@ -1439,7 +1440,7 @@ enum sci_status isci_remote_device_resume_from_abort(
|
||||||
* sci_remote_device_start() - This method will start the supplied remote
|
* sci_remote_device_start() - This method will start the supplied remote
|
||||||
* device. This method enables normal IO requests to flow through to the
|
* device. This method enables normal IO requests to flow through to the
|
||||||
* remote device.
|
* remote device.
|
||||||
* @remote_device: This parameter specifies the device to be started.
|
* @idev: This parameter specifies the device to be started.
|
||||||
* @timeout: This parameter specifies the number of milliseconds in which the
|
* @timeout: This parameter specifies the number of milliseconds in which the
|
||||||
* start operation should complete.
|
* start operation should complete.
|
||||||
*
|
*
|
||||||
|
@ -1501,10 +1502,11 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* isci_remote_device_alloc()
|
||||||
* This function builds the isci_remote_device when a libsas dev_found message
|
* This function builds the isci_remote_device when a libsas dev_found message
|
||||||
* is received.
|
* is received.
|
||||||
* @isci_host: This parameter specifies the isci host object.
|
* @ihost: This parameter specifies the isci host object.
|
||||||
* @port: This parameter specifies the isci_port connected to this device.
|
* @iport: This parameter specifies the isci_port connected to this device.
|
||||||
*
|
*
|
||||||
* pointer to new isci_remote_device.
|
* pointer to new isci_remote_device.
|
||||||
*/
|
*/
|
||||||
|
@ -1549,8 +1551,8 @@ void isci_remote_device_release(struct kref *kref)
|
||||||
/**
|
/**
|
||||||
* isci_remote_device_stop() - This function is called internally to stop the
|
* isci_remote_device_stop() - This function is called internally to stop the
|
||||||
* remote device.
|
* remote device.
|
||||||
* @isci_host: This parameter specifies the isci host object.
|
* @ihost: This parameter specifies the isci host object.
|
||||||
* @isci_device: This parameter specifies the remote device.
|
* @idev: This parameter specifies the remote device.
|
||||||
*
|
*
|
||||||
* The status of the ihost request to stop.
|
* The status of the ihost request to stop.
|
||||||
*/
|
*/
|
||||||
|
@ -1585,8 +1587,7 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
|
||||||
/**
|
/**
|
||||||
* isci_remote_device_gone() - This function is called by libsas when a domain
|
* isci_remote_device_gone() - This function is called by libsas when a domain
|
||||||
* device is removed.
|
* device is removed.
|
||||||
* @domain_device: This parameter specifies the libsas domain device.
|
* @dev: This parameter specifies the libsas domain device.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
void isci_remote_device_gone(struct domain_device *dev)
|
void isci_remote_device_gone(struct domain_device *dev)
|
||||||
{
|
{
|
||||||
|
@ -1606,7 +1607,7 @@ void isci_remote_device_gone(struct domain_device *dev)
|
||||||
* device is discovered. A remote device object is created and started. the
|
* device is discovered. A remote device object is created and started. the
|
||||||
* function then sleeps until the sci core device started message is
|
* function then sleeps until the sci core device started message is
|
||||||
* received.
|
* received.
|
||||||
* @domain_device: This parameter specifies the libsas domain device.
|
* @dev: This parameter specifies the libsas domain device.
|
||||||
*
|
*
|
||||||
* status, zero indicates success.
|
* status, zero indicates success.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -74,7 +74,7 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
|
||||||
#undef C
|
#undef C
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_context_is_ready()
|
||||||
* @sci_rnc: The state of the remote node context object to check.
|
* @sci_rnc: The state of the remote node context object to check.
|
||||||
*
|
*
|
||||||
* This method will return true if the remote node context is in a READY state
|
* This method will return true if the remote node context is in a READY state
|
||||||
|
@ -163,12 +163,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
|
||||||
rnc->ssp.oaf_source_zone_group = 0;
|
rnc->ssp.oaf_source_zone_group = 0;
|
||||||
rnc->ssp.oaf_more_compatibility_features = 0;
|
rnc->ssp.oaf_more_compatibility_features = 0;
|
||||||
}
|
}
|
||||||
/**
|
/*
|
||||||
*
|
|
||||||
* @sci_rnc:
|
|
||||||
* @callback:
|
|
||||||
* @callback_parameter:
|
|
||||||
*
|
|
||||||
* This method will setup the remote node context object so it will transition
|
* This method will setup the remote node context object so it will transition
|
||||||
* to its ready state. If the remote node context is already setup to
|
* to its ready state. If the remote node context is already setup to
|
||||||
* transition to its final state then this function does nothing. none
|
* transition to its final state then this function does nothing. none
|
||||||
|
@ -202,9 +197,7 @@ static void sci_remote_node_context_setup_to_destroy(
|
||||||
wake_up(&ihost->eventq);
|
wake_up(&ihost->eventq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
*
|
|
||||||
*
|
|
||||||
* This method just calls the user callback function and then resets the
|
* This method just calls the user callback function and then resets the
|
||||||
* callback.
|
* callback.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -53,17 +53,15 @@
|
||||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
|
* This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
|
||||||
* public, protected, and private methods.
|
* public, protected, and private methods.
|
||||||
*
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
#include "remote_node_table.h"
|
#include "remote_node_table.h"
|
||||||
#include "remote_node_context.h"
|
#include "remote_node_context.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_get_group_index()
|
||||||
* @remote_node_table: This is the remote node index table from which the
|
* @remote_node_table: This is the remote node index table from which the
|
||||||
* selection will be made.
|
* selection will be made.
|
||||||
* @group_table_index: This is the index to the group table from which to
|
* @group_table_index: This is the index to the group table from which to
|
||||||
|
@ -98,10 +96,10 @@ static u32 sci_remote_node_table_get_group_index(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_clear_group_index()
|
||||||
* @out]: remote_node_table This the remote node table in which to clear the
|
* @remote_node_table: This the remote node table in which to clear the
|
||||||
* selector.
|
* selector.
|
||||||
* @set_index: This is the remote node selector in which the change will be
|
* @group_table_index: This is the remote node selector in which the change will be
|
||||||
* made.
|
* made.
|
||||||
* @group_index: This is the bit index in the table to be modified.
|
* @group_index: This is the bit index in the table to be modified.
|
||||||
*
|
*
|
||||||
|
@ -128,8 +126,8 @@ static void sci_remote_node_table_clear_group_index(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_set_group_index()
|
||||||
* @out]: remote_node_table This the remote node table in which to set the
|
* @remote_node_table: This the remote node table in which to set the
|
||||||
* selector.
|
* selector.
|
||||||
* @group_table_index: This is the remote node selector in which the change
|
* @group_table_index: This is the remote node selector in which the change
|
||||||
* will be made.
|
* will be made.
|
||||||
|
@ -158,8 +156,8 @@ static void sci_remote_node_table_set_group_index(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_set_node_index()
|
||||||
* @out]: remote_node_table This is the remote node table in which to modify
|
* @remote_node_table: This is the remote node table in which to modify
|
||||||
* the remote node availability.
|
* the remote node availability.
|
||||||
* @remote_node_index: This is the remote node index that is being returned to
|
* @remote_node_index: This is the remote node index that is being returned to
|
||||||
* the table.
|
* the table.
|
||||||
|
@ -191,8 +189,8 @@ static void sci_remote_node_table_set_node_index(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_clear_node_index()
|
||||||
* @out]: remote_node_table This is the remote node table from which to clear
|
* @remote_node_table: This is the remote node table from which to clear
|
||||||
* the available remote node bit.
|
* the available remote node bit.
|
||||||
* @remote_node_index: This is the remote node index which is to be cleared
|
* @remote_node_index: This is the remote node index which is to be cleared
|
||||||
* from the table.
|
* from the table.
|
||||||
|
@ -224,8 +222,8 @@ static void sci_remote_node_table_clear_node_index(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_clear_group()
|
||||||
* @out]: remote_node_table The remote node table from which the slot will be
|
* @remote_node_table: The remote node table from which the slot will be
|
||||||
* cleared.
|
* cleared.
|
||||||
* @group_index: The index for the slot that is to be cleared.
|
* @group_index: The index for the slot that is to be cleared.
|
||||||
*
|
*
|
||||||
|
@ -252,9 +250,8 @@ static void sci_remote_node_table_clear_group(
|
||||||
remote_node_table->available_remote_nodes[dword_location] = dword_value;
|
remote_node_table->available_remote_nodes[dword_location] = dword_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
*
|
* sci_remote_node_table_set_group()
|
||||||
* @remote_node_table:
|
|
||||||
*
|
*
|
||||||
* THis method sets an entire remote node group in the remote node table.
|
* THis method sets an entire remote node group in the remote node table.
|
||||||
*/
|
*/
|
||||||
|
@ -280,7 +277,7 @@ static void sci_remote_node_table_set_group(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_get_group_value()
|
||||||
* @remote_node_table: This is the remote node table that for which the group
|
* @remote_node_table: This is the remote node table that for which the group
|
||||||
* value is to be returned.
|
* value is to be returned.
|
||||||
* @group_index: This is the group index to use to find the group value.
|
* @group_index: This is the group index to use to find the group value.
|
||||||
|
@ -307,8 +304,8 @@ static u8 sci_remote_node_table_get_group_value(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_initialize()
|
||||||
* @out]: remote_node_table The remote that which is to be initialized.
|
* @remote_node_table: The remote that which is to be initialized.
|
||||||
* @remote_node_entries: The number of entries to put in the table.
|
* @remote_node_entries: The number of entries to put in the table.
|
||||||
*
|
*
|
||||||
* This method will initialize the remote node table for use. none
|
* This method will initialize the remote node table for use. none
|
||||||
|
@ -365,10 +362,10 @@ void sci_remote_node_table_initialize(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_allocate_single_remote_node()
|
||||||
* @out]: remote_node_table The remote node table from which to allocate a
|
* @remote_node_table: The remote node table from which to allocate a
|
||||||
* remote node.
|
* remote node.
|
||||||
* @table_index: The group index that is to be used for the search.
|
* @group_table_index: The group index that is to be used for the search.
|
||||||
*
|
*
|
||||||
* This method will allocate a single RNi from the remote node table. The
|
* This method will allocate a single RNi from the remote node table. The
|
||||||
* table index will determine from which remote node group table to search.
|
* table index will determine from which remote node group table to search.
|
||||||
|
@ -425,10 +422,10 @@ static u16 sci_remote_node_table_allocate_single_remote_node(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_allocate_triple_remote_node()
|
||||||
* @remote_node_table: This is the remote node table from which to allocate the
|
* @remote_node_table: This is the remote node table from which to allocate the
|
||||||
* remote node entries.
|
* remote node entries.
|
||||||
* @group_table_index: THis is the group table index which must equal two (2)
|
* @group_table_index: This is the group table index which must equal two (2)
|
||||||
* for this operation.
|
* for this operation.
|
||||||
*
|
*
|
||||||
* This method will allocate three consecutive remote node context entries. If
|
* This method will allocate three consecutive remote node context entries. If
|
||||||
|
@ -462,7 +459,7 @@ static u16 sci_remote_node_table_allocate_triple_remote_node(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_allocate_remote_node()
|
||||||
* @remote_node_table: This is the remote node table from which the remote node
|
* @remote_node_table: This is the remote node table from which the remote node
|
||||||
* allocation is to take place.
|
* allocation is to take place.
|
||||||
* @remote_node_count: This is ther remote node count which is one of
|
* @remote_node_count: This is ther remote node count which is one of
|
||||||
|
@ -505,9 +502,10 @@ u16 sci_remote_node_table_allocate_remote_node(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_release_single_remote_node()
|
||||||
* @remote_node_table:
|
* @remote_node_table: This is the remote node table from which the remote node
|
||||||
*
|
* release is to take place.
|
||||||
|
* @remote_node_index: This is the remote node index that is being released.
|
||||||
* This method will free a single remote node index back to the remote node
|
* This method will free a single remote node index back to the remote node
|
||||||
* table. This routine will update the remote node groups
|
* table. This routine will update the remote node groups
|
||||||
*/
|
*/
|
||||||
|
@ -550,9 +548,10 @@ static void sci_remote_node_table_release_single_remote_node(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_release_triple_remote_node()
|
||||||
* @remote_node_table: This is the remote node table to which the remote node
|
* @remote_node_table: This is the remote node table to which the remote node
|
||||||
* index is to be freed.
|
* index is to be freed.
|
||||||
|
* @remote_node_index: This is the remote node index that is being released.
|
||||||
*
|
*
|
||||||
* This method will release a group of three consecutive remote nodes back to
|
* This method will release a group of three consecutive remote nodes back to
|
||||||
* the free remote nodes.
|
* the free remote nodes.
|
||||||
|
@ -573,11 +572,12 @@ static void sci_remote_node_table_release_triple_remote_node(
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_remote_node_table_release_remote_node_index()
|
||||||
* @remote_node_table: The remote node table to which the remote node index is
|
* @remote_node_table: The remote node table to which the remote node index is
|
||||||
* to be freed.
|
* to be freed.
|
||||||
* @remote_node_count: This is the count of consecutive remote nodes that are
|
* @remote_node_count: This is the count of consecutive remote nodes that are
|
||||||
* to be freed.
|
* to be freed.
|
||||||
|
* @remote_node_index: This is the remote node index that is being released.
|
||||||
*
|
*
|
||||||
* This method will release the remote node index back into the remote node
|
* This method will release the remote node index back into the remote node
|
||||||
* table free pool.
|
* table free pool.
|
||||||
|
|
|
@ -207,11 +207,8 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
|
||||||
SCI_CONTROLLER_INVALID_IO_TAG;
|
SCI_CONTROLLER_INVALID_IO_TAG;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* This method is will fill in the SCU Task Context for any type of SSP request.
|
* This method is will fill in the SCU Task Context for any type of SSP request.
|
||||||
* @sci_req:
|
|
||||||
* @task_context:
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
static void scu_ssp_request_construct_task_context(
|
static void scu_ssp_request_construct_task_context(
|
||||||
struct isci_request *ireq,
|
struct isci_request *ireq,
|
||||||
|
@ -410,10 +407,8 @@ static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
|
||||||
tc->ref_tag_seed_gen = 0;
|
tc->ref_tag_seed_gen = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* This method is will fill in the SCU Task Context for a SSP IO request.
|
* This method is will fill in the SCU Task Context for a SSP IO request.
|
||||||
* @sci_req:
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
|
static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
|
||||||
enum dma_data_direction dir,
|
enum dma_data_direction dir,
|
||||||
|
@ -456,17 +451,16 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method will fill in the SCU Task Context for a SSP Task request. The
|
* scu_ssp_task_request_construct_task_context() - This method will fill in
|
||||||
* following important settings are utilized: -# priority ==
|
* the SCU Task Context for a SSP Task request. The following important
|
||||||
* SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
|
* settings are utilized: -# priority == SCU_TASK_PRIORITY_HIGH. This
|
||||||
* ahead of other task destined for the same Remote Node. -# task_type ==
|
* ensures that the task request is issued ahead of other task destined
|
||||||
* SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
|
* for the same Remote Node. -# task_type == SCU_TASK_TYPE_IOREAD. This
|
||||||
* (i.e. non-raw frame) is being utilized to perform task management. -#
|
* simply indicates that a normal request type (i.e. non-raw frame) is
|
||||||
* control_frame == 1. This ensures that the proper endianess is set so
|
* being utilized to perform task management. -#control_frame == 1. This
|
||||||
* that the bytes are transmitted in the right order for a task frame.
|
* ensures that the proper endianness is set so that the bytes are
|
||||||
* @sci_req: This parameter specifies the task request object being
|
* transmitted in the right order for a task frame.
|
||||||
* constructed.
|
* @ireq: This parameter specifies the task request object being constructed.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
|
static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
|
||||||
{
|
{
|
||||||
|
@ -484,9 +478,10 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* scu_sata_request_construct_task_context()
|
||||||
* This method is will fill in the SCU Task Context for any type of SATA
|
* This method is will fill in the SCU Task Context for any type of SATA
|
||||||
* request. This is called from the various SATA constructors.
|
* request. This is called from the various SATA constructors.
|
||||||
* @sci_req: The general IO request object which is to be used in
|
* @ireq: The general IO request object which is to be used in
|
||||||
* constructing the SCU task context.
|
* constructing the SCU task context.
|
||||||
* @task_context: The buffer pointer for the SCU task context which is being
|
* @task_context: The buffer pointer for the SCU task context which is being
|
||||||
* constructed.
|
* constructed.
|
||||||
|
@ -593,9 +588,9 @@ static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
|
||||||
return SCI_SUCCESS;
|
return SCI_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
*
|
* sci_stp_optimized_request_construct()
|
||||||
* @sci_req: This parameter specifies the request to be constructed as an
|
* @ireq: This parameter specifies the request to be constructed as an
|
||||||
* optimized request.
|
* optimized request.
|
||||||
* @optimized_task_type: This parameter specifies whether the request is to be
|
* @optimized_task_type: This parameter specifies whether the request is to be
|
||||||
* an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
|
* an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
|
||||||
|
@ -778,11 +773,11 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define SCU_TASK_CONTEXT_SRAM 0x200000
|
||||||
/**
|
/**
|
||||||
* sci_req_tx_bytes - bytes transferred when reply underruns request
|
* sci_req_tx_bytes - bytes transferred when reply underruns request
|
||||||
* @ireq: request that was terminated early
|
* @ireq: request that was terminated early
|
||||||
*/
|
*/
|
||||||
#define SCU_TASK_CONTEXT_SRAM 0x200000
|
|
||||||
static u32 sci_req_tx_bytes(struct isci_request *ireq)
|
static u32 sci_req_tx_bytes(struct isci_request *ireq)
|
||||||
{
|
{
|
||||||
struct isci_host *ihost = ireq->owning_controller;
|
struct isci_host *ihost = ireq->owning_controller;
|
||||||
|
@ -1396,10 +1391,10 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_stp_request_pio_data_in_copy_data_buffer()
|
||||||
* @stp_request: The request that is used for the SGL processing.
|
* @stp_req: The request that is used for the SGL processing.
|
||||||
* @data_buffer: The buffer of data to be copied.
|
* @data_buf: The buffer of data to be copied.
|
||||||
* @length: The length of the data transfer.
|
* @len: The length of the data transfer.
|
||||||
*
|
*
|
||||||
* Copy the data from the buffer for the length specified to the IO request SGL
|
* Copy the data from the buffer for the length specified to the IO request SGL
|
||||||
* specified data region. enum sci_status
|
* specified data region. enum sci_status
|
||||||
|
@ -1443,8 +1438,8 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
* sci_stp_request_pio_data_in_copy_data()
|
||||||
* @sci_req: The PIO DATA IN request that is to receive the data.
|
* @stp_req: The PIO DATA IN request that is to receive the data.
|
||||||
* @data_buffer: The buffer to copy from.
|
* @data_buffer: The buffer to copy from.
|
||||||
*
|
*
|
||||||
* Copy the data buffer to the io request data region. enum sci_status
|
* Copy the data buffer to the io request data region. enum sci_status
|
||||||
|
@ -2452,7 +2447,7 @@ sci_io_request_tc_completion(struct isci_request *ireq,
|
||||||
* isci_request_process_response_iu() - This function sets the status and
|
* isci_request_process_response_iu() - This function sets the status and
|
||||||
* response iu, in the task struct, from the request object for the upper
|
* response iu, in the task struct, from the request object for the upper
|
||||||
* layer driver.
|
* layer driver.
|
||||||
* @sas_task: This parameter is the task struct from the upper layer driver.
|
* @task: This parameter is the task struct from the upper layer driver.
|
||||||
* @resp_iu: This parameter points to the response iu of the completed request.
|
* @resp_iu: This parameter points to the response iu of the completed request.
|
||||||
* @dev: This parameter specifies the linux device struct.
|
* @dev: This parameter specifies the linux device struct.
|
||||||
*
|
*
|
||||||
|
@ -2485,6 +2480,7 @@ static void isci_request_process_response_iu(
|
||||||
* isci_request_set_open_reject_status() - This function prepares the I/O
|
* isci_request_set_open_reject_status() - This function prepares the I/O
|
||||||
* completion for OPEN_REJECT conditions.
|
* completion for OPEN_REJECT conditions.
|
||||||
* @request: This parameter is the completed isci_request object.
|
* @request: This parameter is the completed isci_request object.
|
||||||
|
* @task: This parameter is the task struct from the upper layer driver.
|
||||||
* @response_ptr: This parameter specifies the service response for the I/O.
|
* @response_ptr: This parameter specifies the service response for the I/O.
|
||||||
* @status_ptr: This parameter specifies the exec status for the I/O.
|
* @status_ptr: This parameter specifies the exec status for the I/O.
|
||||||
* @open_rej_reason: This parameter specifies the encoded reason for the
|
* @open_rej_reason: This parameter specifies the encoded reason for the
|
||||||
|
@ -2509,7 +2505,9 @@ static void isci_request_set_open_reject_status(
|
||||||
/**
|
/**
|
||||||
* isci_request_handle_controller_specific_errors() - This function decodes
|
* isci_request_handle_controller_specific_errors() - This function decodes
|
||||||
* controller-specific I/O completion error conditions.
|
* controller-specific I/O completion error conditions.
|
||||||
|
* @idev: Remote device
|
||||||
* @request: This parameter is the completed isci_request object.
|
* @request: This parameter is the completed isci_request object.
|
||||||
|
* @task: This parameter is the task struct from the upper layer driver.
|
||||||
* @response_ptr: This parameter specifies the service response for the I/O.
|
* @response_ptr: This parameter specifies the service response for the I/O.
|
||||||
* @status_ptr: This parameter specifies the exec status for the I/O.
|
* @status_ptr: This parameter specifies the exec status for the I/O.
|
||||||
*
|
*
|
||||||
|
@ -3326,7 +3324,7 @@ static enum sci_status isci_smp_request_build(struct isci_request *ireq)
|
||||||
* @ihost: This parameter specifies the ISCI host object
|
* @ihost: This parameter specifies the ISCI host object
|
||||||
* @request: This parameter points to the isci_request object allocated in the
|
* @request: This parameter points to the isci_request object allocated in the
|
||||||
* request construct function.
|
* request construct function.
|
||||||
* @sci_device: This parameter is the handle for the sci core's remote device
|
* @idev: This parameter is the handle for the sci core's remote device
|
||||||
* object that is the destination for this request.
|
* object that is the destination for this request.
|
||||||
*
|
*
|
||||||
* SCI_SUCCESS on successfull completion, or specific failure code.
|
* SCI_SUCCESS on successfull completion, or specific failure code.
|
||||||
|
|
|
@ -369,7 +369,7 @@ static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
|
||||||
tmf->io_tag = old_request->io_tag;
|
tmf->io_tag = old_request->io_tag;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
|
* isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
|
||||||
* Template functions.
|
* Template functions.
|
||||||
* @lun: This parameter specifies the lun to be reset.
|
* @lun: This parameter specifies the lun to be reset.
|
||||||
|
@ -668,7 +668,6 @@ int isci_task_clear_task_set(
|
||||||
* returned, libsas turns this into a LUN reset; when FUNC_FAILED is
|
* returned, libsas turns this into a LUN reset; when FUNC_FAILED is
|
||||||
* returned, libsas will turn this into a target reset
|
* returned, libsas will turn this into a target reset
|
||||||
* @task: This parameter specifies the sas task being queried.
|
* @task: This parameter specifies the sas task being queried.
|
||||||
* @lun: This parameter specifies the lun associated with this request.
|
|
||||||
*
|
*
|
||||||
* status, zero indicates success.
|
* status, zero indicates success.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev)
|
||||||
if (!esp->command_block)
|
if (!esp->command_block)
|
||||||
goto fail_unmap_regs;
|
goto fail_unmap_regs;
|
||||||
|
|
||||||
host->irq = platform_get_irq(dev, 0);
|
host->irq = err = platform_get_irq(dev, 0);
|
||||||
|
if (err < 0)
|
||||||
|
goto fail_unmap_command_block;
|
||||||
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
|
err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto fail_unmap_command_block;
|
goto fail_unmap_command_block;
|
||||||
|
|
|
@ -2248,7 +2248,7 @@ int fc_slave_alloc(struct scsi_device *sdev)
|
||||||
EXPORT_SYMBOL(fc_slave_alloc);
|
EXPORT_SYMBOL(fc_slave_alloc);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fc_fcp_destory() - Tear down the FCP layer for a given local port
|
* fc_fcp_destroy() - Tear down the FCP layer for a given local port
|
||||||
* @lport: The local port that no longer needs the FCP layer
|
* @lport: The local port that no longer needs the FCP layer
|
||||||
*/
|
*/
|
||||||
void fc_fcp_destroy(struct fc_lport *lport)
|
void fc_fcp_destroy(struct fc_lport *lport)
|
||||||
|
|
|
@ -703,7 +703,7 @@ static void fc_lport_disc_callback(struct fc_lport *lport,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fc_rport_enter_ready() - Enter the ready state and start discovery
|
* fc_lport_enter_ready() - Enter the ready state and start discovery
|
||||||
* @lport: The local port that is ready
|
* @lport: The local port that is ready
|
||||||
*/
|
*/
|
||||||
static void fc_lport_enter_ready(struct fc_lport *lport)
|
static void fc_lport_enter_ready(struct fc_lport *lport)
|
||||||
|
@ -747,7 +747,7 @@ static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint
|
* fc_lport_set_local_id() - set the local port Port ID for point-to-multipoint
|
||||||
* @lport: The local port which will have its Port ID set.
|
* @lport: The local port which will have its Port ID set.
|
||||||
* @port_id: The new port ID.
|
* @port_id: The new port ID.
|
||||||
*
|
*
|
||||||
|
@ -1393,7 +1393,7 @@ static struct fc_rport_operations fc_lport_rport_ops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fc_rport_enter_dns() - Create a fc_rport for the name server
|
* fc_lport_enter_dns() - Create a fc_rport for the name server
|
||||||
* @lport: The local port requesting a remote port for the name server
|
* @lport: The local port requesting a remote port for the name server
|
||||||
*/
|
*/
|
||||||
static void fc_lport_enter_dns(struct fc_lport *lport)
|
static void fc_lport_enter_dns(struct fc_lport *lport)
|
||||||
|
@ -1509,7 +1509,7 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fc_rport_enter_fdmi() - Create a fc_rport for the management server
|
* fc_lport_enter_fdmi() - Create a fc_rport for the management server
|
||||||
* @lport: The local port requesting a remote port for the management server
|
* @lport: The local port requesting a remote port for the management server
|
||||||
*/
|
*/
|
||||||
static void fc_lport_enter_fdmi(struct fc_lport *lport)
|
static void fc_lport_enter_fdmi(struct fc_lport *lport)
|
||||||
|
@ -1640,7 +1640,7 @@ void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||||
EXPORT_SYMBOL(fc_lport_logo_resp);
|
EXPORT_SYMBOL(fc_lport_logo_resp);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fc_rport_enter_logo() - Logout of the fabric
|
* fc_lport_enter_logo() - Logout of the fabric
|
||||||
* @lport: The local port to be logged out
|
* @lport: The local port to be logged out
|
||||||
*/
|
*/
|
||||||
static void fc_lport_enter_logo(struct fc_lport *lport)
|
static void fc_lport_enter_logo(struct fc_lport *lport)
|
||||||
|
@ -1731,7 +1731,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||||
|
|
||||||
if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
|
if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
|
||||||
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
|
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
|
||||||
"lport->mfs:%hu\n", mfs, lport->mfs);
|
"lport->mfs:%u\n", mfs, lport->mfs);
|
||||||
fc_lport_error(lport, fp);
|
fc_lport_error(lport, fp);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -1782,7 +1782,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||||
EXPORT_SYMBOL(fc_lport_flogi_resp);
|
EXPORT_SYMBOL(fc_lport_flogi_resp);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
|
* fc_lport_enter_flogi() - Send a FLOGI request to the fabric manager
|
||||||
* @lport: Fibre Channel local port to be logged in to the fabric
|
* @lport: Fibre Channel local port to be logged in to the fabric
|
||||||
*/
|
*/
|
||||||
static void fc_lport_enter_flogi(struct fc_lport *lport)
|
static void fc_lport_enter_flogi(struct fc_lport *lport)
|
||||||
|
|
|
@ -1486,7 +1486,7 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fc_rport_els_adisc_resp() - Handler for Address Discovery (ADISC) responses
|
* fc_rport_adisc_resp() - Handler for Address Discovery (ADISC) responses
|
||||||
* @sp: The sequence the ADISC response was on
|
* @sp: The sequence the ADISC response was on
|
||||||
* @fp: The ADISC response frame
|
* @fp: The ADISC response frame
|
||||||
* @rdata_arg: The remote port that sent the ADISC response
|
* @rdata_arg: The remote port that sent the ADISC response
|
||||||
|
|
|
@ -35,46 +35,40 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
|
||||||
/* ts->resp == SAS_TASK_COMPLETE */
|
/* ts->resp == SAS_TASK_COMPLETE */
|
||||||
/* task delivered, what happened afterwards? */
|
/* task delivered, what happened afterwards? */
|
||||||
switch (ts->stat) {
|
switch (ts->stat) {
|
||||||
case SAS_DEV_NO_RESPONSE:
|
case SAS_DEV_NO_RESPONSE:
|
||||||
return AC_ERR_TIMEOUT;
|
return AC_ERR_TIMEOUT;
|
||||||
|
case SAS_INTERRUPTED:
|
||||||
case SAS_INTERRUPTED:
|
case SAS_PHY_DOWN:
|
||||||
case SAS_PHY_DOWN:
|
case SAS_NAK_R_ERR:
|
||||||
case SAS_NAK_R_ERR:
|
return AC_ERR_ATA_BUS;
|
||||||
return AC_ERR_ATA_BUS;
|
case SAS_DATA_UNDERRUN:
|
||||||
|
/*
|
||||||
|
* Some programs that use the taskfile interface
|
||||||
case SAS_DATA_UNDERRUN:
|
* (smartctl in particular) can cause underrun
|
||||||
/*
|
* problems. Ignore these errors, perhaps at our
|
||||||
* Some programs that use the taskfile interface
|
* peril.
|
||||||
* (smartctl in particular) can cause underrun
|
*/
|
||||||
* problems. Ignore these errors, perhaps at our
|
return 0;
|
||||||
* peril.
|
case SAS_DATA_OVERRUN:
|
||||||
*/
|
case SAS_QUEUE_FULL:
|
||||||
return 0;
|
case SAS_DEVICE_UNKNOWN:
|
||||||
|
case SAS_SG_ERR:
|
||||||
case SAS_DATA_OVERRUN:
|
return AC_ERR_INVALID;
|
||||||
case SAS_QUEUE_FULL:
|
case SAS_OPEN_TO:
|
||||||
case SAS_DEVICE_UNKNOWN:
|
case SAS_OPEN_REJECT:
|
||||||
case SAS_SG_ERR:
|
pr_warn("%s: Saw error %d. What to do?\n",
|
||||||
return AC_ERR_INVALID;
|
__func__, ts->stat);
|
||||||
|
return AC_ERR_OTHER;
|
||||||
case SAS_OPEN_TO:
|
case SAM_STAT_CHECK_CONDITION:
|
||||||
case SAS_OPEN_REJECT:
|
case SAS_ABORTED_TASK:
|
||||||
pr_warn("%s: Saw error %d. What to do?\n",
|
return AC_ERR_DEV;
|
||||||
__func__, ts->stat);
|
case SAS_PROTO_RESPONSE:
|
||||||
return AC_ERR_OTHER;
|
/* This means the ending_fis has the error
|
||||||
|
* value; return 0 here to collect it
|
||||||
case SAM_STAT_CHECK_CONDITION:
|
*/
|
||||||
case SAS_ABORTED_TASK:
|
return 0;
|
||||||
return AC_ERR_DEV;
|
default:
|
||||||
|
return 0;
|
||||||
case SAS_PROTO_RESPONSE:
|
|
||||||
/* This means the ending_fis has the error
|
|
||||||
* value; return 0 here to collect it */
|
|
||||||
return 0;
|
|
||||||
default:
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
|
||||||
struct dev_to_host_fis *fis =
|
struct dev_to_host_fis *fis =
|
||||||
(struct dev_to_host_fis *) dev->frame_rcvd;
|
(struct dev_to_host_fis *) dev->frame_rcvd;
|
||||||
if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
|
if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
|
||||||
fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
|
fis->byte_count_low == 0x69 && fis->byte_count_high == 0x96
|
||||||
&& (fis->device & ~0x10) == 0)
|
&& (fis->device & ~0x10) == 0)
|
||||||
dev->dev_type = SAS_SATA_PM;
|
dev->dev_type = SAS_SATA_PM;
|
||||||
else
|
else
|
||||||
|
|
|
@ -553,7 +553,7 @@ static int sas_ex_manuf_info(struct domain_device *dev)
|
||||||
|
|
||||||
mi_req[1] = SMP_REPORT_MANUF_INFO;
|
mi_req[1] = SMP_REPORT_MANUF_INFO;
|
||||||
|
|
||||||
res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE);
|
res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp, MI_RESP_SIZE);
|
||||||
if (res) {
|
if (res) {
|
||||||
pr_notice("MI: ex %016llx failed:0x%x\n",
|
pr_notice("MI: ex %016llx failed:0x%x\n",
|
||||||
SAS_ADDR(dev->sas_addr), res);
|
SAS_ADDR(dev->sas_addr), res);
|
||||||
|
@ -594,13 +594,13 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
|
||||||
|
|
||||||
pc_req[1] = SMP_PHY_CONTROL;
|
pc_req[1] = SMP_PHY_CONTROL;
|
||||||
pc_req[9] = phy_id;
|
pc_req[9] = phy_id;
|
||||||
pc_req[10]= phy_func;
|
pc_req[10] = phy_func;
|
||||||
if (rates) {
|
if (rates) {
|
||||||
pc_req[32] = rates->minimum_linkrate << 4;
|
pc_req[32] = rates->minimum_linkrate << 4;
|
||||||
pc_req[33] = rates->maximum_linkrate << 4;
|
pc_req[33] = rates->maximum_linkrate << 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE);
|
res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp, PC_RESP_SIZE);
|
||||||
if (res) {
|
if (res) {
|
||||||
pr_err("ex %016llx phy%02d PHY control failed: %d\n",
|
pr_err("ex %016llx phy%02d PHY control failed: %d\n",
|
||||||
SAS_ADDR(dev->sas_addr), phy_id, res);
|
SAS_ADDR(dev->sas_addr), phy_id, res);
|
||||||
|
@ -678,7 +678,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
|
||||||
req[9] = phy->number;
|
req[9] = phy->number;
|
||||||
|
|
||||||
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
|
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
|
||||||
resp, RPEL_RESP_SIZE);
|
resp, RPEL_RESP_SIZE);
|
||||||
|
|
||||||
if (res)
|
if (res)
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -714,7 +714,7 @@ int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
|
||||||
rps_req[9] = phy_id;
|
rps_req[9] = phy_id;
|
||||||
|
|
||||||
res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
|
res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
|
||||||
rps_resp, RPS_RESP_SIZE);
|
rps_resp, RPS_RESP_SIZE);
|
||||||
|
|
||||||
/* 0x34 is the FIS type for the D2H fis. There's a potential
|
/* 0x34 is the FIS type for the D2H fis. There's a potential
|
||||||
* standards cockup here. sas-2 explicitly specifies the FIS
|
* standards cockup here. sas-2 explicitly specifies the FIS
|
||||||
|
@ -1506,7 +1506,8 @@ static int sas_configure_phy(struct domain_device *dev, int phy_id,
|
||||||
if (res)
|
if (res)
|
||||||
return res;
|
return res;
|
||||||
if (include ^ present)
|
if (include ^ present)
|
||||||
return sas_configure_set(dev, phy_id, sas_addr, index,include);
|
return sas_configure_set(dev, phy_id, sas_addr, index,
|
||||||
|
include);
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue