mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-3.13/drivers' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: "This is the block driver pull request for 3.13. As with the core pull request just sent out, this was rebased on top of the core branch again after the immutable series was pulled. This also means that bcache gets to sit the initial pull over. I will send a second driver pull request in the merge window to get those fixes in, once they have been rebased and tested on top of the non-immutable stack. This pull request contains: - Add support for the sTec Kronos pci-e flash card from sTec. Also has various cleanups for this driver, from myself, Bart, Mike Snizter, and Wei Yongjun. - Add surprise removal support for the micron mtip32xx driver from Micron. - Floppy documentation fix from Ben Harris. - debugfs bug fix for pktcdvd from Dan Carpenter. - Fix for the mtip32xx driver stack usage in the debugfs path, dynamically allocating those buffers instead. From David Milburn. - Disable cpqarray in Kconfig. The plan is to remove it on request of HP, but lets disable it for a few revisions just to see if anyone yells. - drbd fixes from Lars Ellenberg and Philipp Reisner. - Elevator switch fix for the s390 block driver from Heiko Carstens. - loop crash fix on IO to unassigned device from Mikulas Patocka. - A series of bug fixes for the IBM rsxx pci-e flash driver from Philip J Kelleher. - cciss probe fix from Stephen Cameron. - Xen block front/back fixes from Roger Pau Monne and Vegard Nossum" * 'for-3.13/drivers' of git://git.kernel.dk/linux-block: (41 commits) floppy: Correct documentation of driver options when used as a module. pktcdvd: debugfs functions return NULL on error xen-blkfront: restore the non-persistent data path skd: fix formatting in skd_s1120.h skd: reorder construct/destruct code skd: cleanup skd_do_inq_page_da() skd: remove SKD_OMIT_FROM_SRC_DIST ifdefs skd: remove redundant skdev->pdev assignment from skd_pci_probe() skd: use <asm/unaligned.h> skd: remove SCSI subsystem specific includes skd: register block device only if some devices are present skd: fix error messages in skd_init() skd: fix error paths in skd_init() skd: fix unregister_blkdev() placement skd: more removal of bio-based code skd: cleanup the skd_*() function block wrapping skd: rip out bio path skd: fix error return code in skd_pci_probe() s390/dasd: hold request queue sysfs lock when calling elevator_init() cciss: return 0 from driver probe function on success, not 1 ...
This commit is contained in:
commit
5eea9be8b2
|
@ -39,15 +39,15 @@ Module configuration options
|
||||||
============================
|
============================
|
||||||
|
|
||||||
If you use the floppy driver as a module, use the following syntax:
|
If you use the floppy driver as a module, use the following syntax:
|
||||||
modprobe floppy <options>
|
modprobe floppy floppy="<options>"
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
modprobe floppy omnibook messages
|
modprobe floppy floppy="omnibook messages"
|
||||||
|
|
||||||
If you need certain options enabled every time you load the floppy driver,
|
If you need certain options enabled every time you load the floppy driver,
|
||||||
you can put:
|
you can put:
|
||||||
|
|
||||||
options floppy omnibook messages
|
options floppy floppy="omnibook messages"
|
||||||
|
|
||||||
in a configuration file in /etc/modprobe.d/.
|
in a configuration file in /etc/modprobe.d/.
|
||||||
|
|
||||||
|
|
|
@ -110,7 +110,7 @@ source "drivers/block/mtip32xx/Kconfig"
|
||||||
|
|
||||||
config BLK_CPQ_DA
|
config BLK_CPQ_DA
|
||||||
tristate "Compaq SMART2 support"
|
tristate "Compaq SMART2 support"
|
||||||
depends on PCI && VIRT_TO_BUS
|
depends on PCI && VIRT_TO_BUS && 0
|
||||||
help
|
help
|
||||||
This is the driver for Compaq Smart Array controllers. Everyone
|
This is the driver for Compaq Smart Array controllers. Everyone
|
||||||
using these boards should say Y here. See the file
|
using these boards should say Y here. See the file
|
||||||
|
@ -319,6 +319,16 @@ config BLK_DEV_NVME
|
||||||
To compile this driver as a module, choose M here: the
|
To compile this driver as a module, choose M here: the
|
||||||
module will be called nvme.
|
module will be called nvme.
|
||||||
|
|
||||||
|
config BLK_DEV_SKD
|
||||||
|
tristate "STEC S1120 Block Driver"
|
||||||
|
depends on PCI
|
||||||
|
depends on 64BIT
|
||||||
|
---help---
|
||||||
|
Saying Y or M here will enable support for the
|
||||||
|
STEC, Inc. S1120 PCIe SSD.
|
||||||
|
|
||||||
|
Use device /dev/skd$N amd /dev/skd$Np$M.
|
||||||
|
|
||||||
config BLK_DEV_OSD
|
config BLK_DEV_OSD
|
||||||
tristate "OSD object-as-blkdev support"
|
tristate "OSD object-as-blkdev support"
|
||||||
depends on SCSI_OSD_ULD
|
depends on SCSI_OSD_ULD
|
||||||
|
|
|
@ -23,6 +23,7 @@ obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
|
||||||
obj-$(CONFIG_MG_DISK) += mg_disk.o
|
obj-$(CONFIG_MG_DISK) += mg_disk.o
|
||||||
obj-$(CONFIG_SUNVDC) += sunvdc.o
|
obj-$(CONFIG_SUNVDC) += sunvdc.o
|
||||||
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
|
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
|
||||||
|
obj-$(CONFIG_BLK_DEV_SKD) += skd.o
|
||||||
obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
|
obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
|
||||||
|
|
||||||
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
|
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
|
||||||
|
@ -44,4 +45,5 @@ obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
|
||||||
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
|
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
|
||||||
|
|
||||||
nvme-y := nvme-core.o nvme-scsi.o
|
nvme-y := nvme-core.o nvme-scsi.o
|
||||||
|
skd-y := skd_main.o
|
||||||
swim_mod-y := swim.o swim_asm.o
|
swim_mod-y := swim.o swim_asm.o
|
||||||
|
|
|
@ -5183,7 +5183,7 @@ static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
rebuild_lun_table(h, 1, 0);
|
rebuild_lun_table(h, 1, 0);
|
||||||
cciss_engage_scsi(h);
|
cciss_engage_scsi(h);
|
||||||
h->busy_initializing = 0;
|
h->busy_initializing = 0;
|
||||||
return 1;
|
return 0;
|
||||||
|
|
||||||
clean4:
|
clean4:
|
||||||
cciss_free_cmd_pool(h);
|
cciss_free_cmd_pool(h);
|
||||||
|
|
|
@ -1474,7 +1474,8 @@ enum determine_dev_size {
|
||||||
DS_ERROR = -1,
|
DS_ERROR = -1,
|
||||||
DS_UNCHANGED = 0,
|
DS_UNCHANGED = 0,
|
||||||
DS_SHRUNK = 1,
|
DS_SHRUNK = 1,
|
||||||
DS_GREW = 2
|
DS_GREW = 2,
|
||||||
|
DS_GREW_FROM_ZERO = 3,
|
||||||
};
|
};
|
||||||
extern enum determine_dev_size
|
extern enum determine_dev_size
|
||||||
drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local);
|
drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local);
|
||||||
|
|
|
@ -2750,13 +2750,6 @@ int __init drbd_init(void)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = drbd_genl_register();
|
|
||||||
if (err) {
|
|
||||||
printk(KERN_ERR "drbd: unable to register generic netlink family\n");
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
register_reboot_notifier(&drbd_notifier);
|
register_reboot_notifier(&drbd_notifier);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2767,6 +2760,15 @@ int __init drbd_init(void)
|
||||||
drbd_proc = NULL; /* play safe for drbd_cleanup */
|
drbd_proc = NULL; /* play safe for drbd_cleanup */
|
||||||
idr_init(&minors);
|
idr_init(&minors);
|
||||||
|
|
||||||
|
rwlock_init(&global_state_lock);
|
||||||
|
INIT_LIST_HEAD(&drbd_tconns);
|
||||||
|
|
||||||
|
err = drbd_genl_register();
|
||||||
|
if (err) {
|
||||||
|
printk(KERN_ERR "drbd: unable to register generic netlink family\n");
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
err = drbd_create_mempools();
|
err = drbd_create_mempools();
|
||||||
if (err)
|
if (err)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@ -2778,9 +2780,6 @@ int __init drbd_init(void)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
rwlock_init(&global_state_lock);
|
|
||||||
INIT_LIST_HEAD(&drbd_tconns);
|
|
||||||
|
|
||||||
retry.wq = create_singlethread_workqueue("drbd-reissue");
|
retry.wq = create_singlethread_workqueue("drbd-reissue");
|
||||||
if (!retry.wq) {
|
if (!retry.wq) {
|
||||||
printk(KERN_ERR "drbd: unable to create retry workqueue\n");
|
printk(KERN_ERR "drbd: unable to create retry workqueue\n");
|
||||||
|
|
|
@ -955,7 +955,7 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
|
||||||
}
|
}
|
||||||
|
|
||||||
if (size > la_size_sect)
|
if (size > la_size_sect)
|
||||||
rv = DS_GREW;
|
rv = la_size_sect ? DS_GREW : DS_GREW_FROM_ZERO;
|
||||||
if (size < la_size_sect)
|
if (size < la_size_sect)
|
||||||
rv = DS_SHRUNK;
|
rv = DS_SHRUNK;
|
||||||
|
|
||||||
|
@ -1132,9 +1132,9 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
|
||||||
/* We may ignore peer limits if the peer is modern enough.
|
/* We may ignore peer limits if the peer is modern enough.
|
||||||
Because new from 8.3.8 onwards the peer can use multiple
|
Because new from 8.3.8 onwards the peer can use multiple
|
||||||
BIOs for a single peer_request */
|
BIOs for a single peer_request */
|
||||||
if (mdev->state.conn >= C_CONNECTED) {
|
if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
|
||||||
if (mdev->tconn->agreed_pro_version < 94)
|
if (mdev->tconn->agreed_pro_version < 94)
|
||||||
peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
|
peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
|
||||||
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
|
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
|
||||||
else if (mdev->tconn->agreed_pro_version == 94)
|
else if (mdev->tconn->agreed_pro_version == 94)
|
||||||
peer = DRBD_MAX_SIZE_H80_PACKET;
|
peer = DRBD_MAX_SIZE_H80_PACKET;
|
||||||
|
|
|
@ -1890,29 +1890,11 @@ static u32 seq_max(u32 a, u32 b)
|
||||||
return seq_greater(a, b) ? a : b;
|
return seq_greater(a, b) ? a : b;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool need_peer_seq(struct drbd_conf *mdev)
|
|
||||||
{
|
|
||||||
struct drbd_tconn *tconn = mdev->tconn;
|
|
||||||
int tp;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We only need to keep track of the last packet_seq number of our peer
|
|
||||||
* if we are in dual-primary mode and we have the resolve-conflicts flag set; see
|
|
||||||
* handle_write_conflicts().
|
|
||||||
*/
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
|
static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
|
||||||
{
|
{
|
||||||
unsigned int newest_peer_seq;
|
unsigned int newest_peer_seq;
|
||||||
|
|
||||||
if (need_peer_seq(mdev)) {
|
if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
|
||||||
spin_lock(&mdev->peer_seq_lock);
|
spin_lock(&mdev->peer_seq_lock);
|
||||||
newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
|
newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
|
||||||
mdev->peer_seq = newest_peer_seq;
|
mdev->peer_seq = newest_peer_seq;
|
||||||
|
@ -1972,22 +1954,31 @@ static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_s
|
||||||
{
|
{
|
||||||
DEFINE_WAIT(wait);
|
DEFINE_WAIT(wait);
|
||||||
long timeout;
|
long timeout;
|
||||||
int ret;
|
int ret = 0, tp;
|
||||||
|
|
||||||
if (!need_peer_seq(mdev))
|
if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock(&mdev->peer_seq_lock);
|
spin_lock(&mdev->peer_seq_lock);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
|
if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
|
||||||
mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
|
mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
|
||||||
ret = 0;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (signal_pending(current)) {
|
if (signal_pending(current)) {
|
||||||
ret = -ERESTARTSYS;
|
ret = -ERESTARTSYS;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
if (!tp)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* Only need to wait if two_primaries is enabled */
|
||||||
prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
|
prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
|
||||||
spin_unlock(&mdev->peer_seq_lock);
|
spin_unlock(&mdev->peer_seq_lock);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@ -2228,8 +2219,10 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
|
||||||
}
|
}
|
||||||
goto out_interrupted;
|
goto out_interrupted;
|
||||||
}
|
}
|
||||||
} else
|
} else {
|
||||||
|
update_peer_seq(mdev, peer_seq);
|
||||||
spin_lock_irq(&mdev->tconn->req_lock);
|
spin_lock_irq(&mdev->tconn->req_lock);
|
||||||
|
}
|
||||||
list_add(&peer_req->w.list, &mdev->active_ee);
|
list_add(&peer_req->w.list, &mdev->active_ee);
|
||||||
spin_unlock_irq(&mdev->tconn->req_lock);
|
spin_unlock_irq(&mdev->tconn->req_lock);
|
||||||
|
|
||||||
|
@ -4132,7 +4125,11 @@ recv_bm_rle_bits(struct drbd_conf *mdev,
|
||||||
(unsigned int)bs.buf_len);
|
(unsigned int)bs.buf_len);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
look_ahead >>= bits;
|
/* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
|
||||||
|
if (likely(bits < 64))
|
||||||
|
look_ahead >>= bits;
|
||||||
|
else
|
||||||
|
look_ahead = 0;
|
||||||
have -= bits;
|
have -= bits;
|
||||||
|
|
||||||
bits = bitstream_get_bits(&bs, &tmp, 64 - have);
|
bits = bitstream_get_bits(&bs, &tmp, 64 - have);
|
||||||
|
|
|
@ -1306,6 +1306,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
|
||||||
int backing_limit;
|
int backing_limit;
|
||||||
|
|
||||||
if (bio_size && get_ldev(mdev)) {
|
if (bio_size && get_ldev(mdev)) {
|
||||||
|
unsigned int max_hw_sectors = queue_max_hw_sectors(q);
|
||||||
struct request_queue * const b =
|
struct request_queue * const b =
|
||||||
mdev->ldev->backing_bdev->bd_disk->queue;
|
mdev->ldev->backing_bdev->bd_disk->queue;
|
||||||
if (b->merge_bvec_fn) {
|
if (b->merge_bvec_fn) {
|
||||||
|
@ -1313,6 +1314,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
|
||||||
limit = min(limit, backing_limit);
|
limit = min(limit, backing_limit);
|
||||||
}
|
}
|
||||||
put_ldev(mdev);
|
put_ldev(mdev);
|
||||||
|
if ((limit >> 9) > max_hw_sectors)
|
||||||
|
limit = max_hw_sectors << 9;
|
||||||
}
|
}
|
||||||
return limit;
|
return limit;
|
||||||
}
|
}
|
||||||
|
|
|
@ -894,13 +894,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
||||||
|
|
||||||
bio_list_init(&lo->lo_bio_list);
|
bio_list_init(&lo->lo_bio_list);
|
||||||
|
|
||||||
/*
|
|
||||||
* set queue make_request_fn, and add limits based on lower level
|
|
||||||
* device
|
|
||||||
*/
|
|
||||||
blk_queue_make_request(lo->lo_queue, loop_make_request);
|
|
||||||
lo->lo_queue->queuedata = lo;
|
|
||||||
|
|
||||||
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
|
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
|
||||||
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
|
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
|
||||||
|
|
||||||
|
@ -1618,6 +1611,8 @@ static int loop_add(struct loop_device **l, int i)
|
||||||
if (!lo)
|
if (!lo)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
lo->lo_state = Lo_unbound;
|
||||||
|
|
||||||
/* allocate id, if @id >= 0, we're requesting that specific id */
|
/* allocate id, if @id >= 0, we're requesting that specific id */
|
||||||
if (i >= 0) {
|
if (i >= 0) {
|
||||||
err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
|
err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
|
||||||
|
@ -1635,6 +1630,12 @@ static int loop_add(struct loop_device **l, int i)
|
||||||
if (!lo->lo_queue)
|
if (!lo->lo_queue)
|
||||||
goto out_free_idr;
|
goto out_free_idr;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* set queue make_request_fn
|
||||||
|
*/
|
||||||
|
blk_queue_make_request(lo->lo_queue, loop_make_request);
|
||||||
|
lo->lo_queue->queuedata = lo;
|
||||||
|
|
||||||
disk = lo->lo_disk = alloc_disk(1 << part_shift);
|
disk = lo->lo_disk = alloc_disk(1 << part_shift);
|
||||||
if (!disk)
|
if (!disk)
|
||||||
goto out_free_queue;
|
goto out_free_queue;
|
||||||
|
|
|
@ -936,7 +936,7 @@ static int mg_probe(struct platform_device *plat_dev)
|
||||||
goto probe_err_3b;
|
goto probe_err_3b;
|
||||||
}
|
}
|
||||||
err = request_irq(host->irq, mg_irq,
|
err = request_irq(host->irq, mg_irq,
|
||||||
IRQF_DISABLED | IRQF_TRIGGER_RISING,
|
IRQF_TRIGGER_RISING,
|
||||||
MG_DEV_NAME, host);
|
MG_DEV_NAME, host);
|
||||||
if (err) {
|
if (err) {
|
||||||
printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
|
printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
|
||||||
|
|
|
@ -126,64 +126,30 @@ struct mtip_compat_ide_task_request_s {
|
||||||
static bool mtip_check_surprise_removal(struct pci_dev *pdev)
|
static bool mtip_check_surprise_removal(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
u16 vendor_id = 0;
|
u16 vendor_id = 0;
|
||||||
|
struct driver_data *dd = pci_get_drvdata(pdev);
|
||||||
|
|
||||||
|
if (dd->sr)
|
||||||
|
return true;
|
||||||
|
|
||||||
/* Read the vendorID from the configuration space */
|
/* Read the vendorID from the configuration space */
|
||||||
pci_read_config_word(pdev, 0x00, &vendor_id);
|
pci_read_config_word(pdev, 0x00, &vendor_id);
|
||||||
if (vendor_id == 0xFFFF)
|
if (vendor_id == 0xFFFF) {
|
||||||
|
dd->sr = true;
|
||||||
|
if (dd->queue)
|
||||||
|
set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags);
|
||||||
|
else
|
||||||
|
dev_warn(&dd->pdev->dev,
|
||||||
|
"%s: dd->queue is NULL\n", __func__);
|
||||||
|
if (dd->port) {
|
||||||
|
set_bit(MTIP_PF_SR_CLEANUP_BIT, &dd->port->flags);
|
||||||
|
wake_up_interruptible(&dd->port->svc_wait);
|
||||||
|
} else
|
||||||
|
dev_warn(&dd->pdev->dev,
|
||||||
|
"%s: dd->port is NULL\n", __func__);
|
||||||
return true; /* device removed */
|
return true; /* device removed */
|
||||||
|
|
||||||
return false; /* device present */
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This function is called for clean the pending command in the
|
|
||||||
* command slot during the surprise removal of device and return
|
|
||||||
* error to the upper layer.
|
|
||||||
*
|
|
||||||
* @dd Pointer to the DRIVER_DATA structure.
|
|
||||||
*
|
|
||||||
* return value
|
|
||||||
* None
|
|
||||||
*/
|
|
||||||
static void mtip_command_cleanup(struct driver_data *dd)
|
|
||||||
{
|
|
||||||
int group = 0, commandslot = 0, commandindex = 0;
|
|
||||||
struct mtip_cmd *command;
|
|
||||||
struct mtip_port *port = dd->port;
|
|
||||||
static int in_progress;
|
|
||||||
|
|
||||||
if (in_progress)
|
|
||||||
return;
|
|
||||||
|
|
||||||
in_progress = 1;
|
|
||||||
|
|
||||||
for (group = 0; group < 4; group++) {
|
|
||||||
for (commandslot = 0; commandslot < 32; commandslot++) {
|
|
||||||
if (!(port->allocated[group] & (1 << commandslot)))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
commandindex = group << 5 | commandslot;
|
|
||||||
command = &port->commands[commandindex];
|
|
||||||
|
|
||||||
if (atomic_read(&command->active)
|
|
||||||
&& (command->async_callback)) {
|
|
||||||
command->async_callback(command->async_data,
|
|
||||||
-ENODEV);
|
|
||||||
command->async_callback = NULL;
|
|
||||||
command->async_data = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_unmap_sg(&port->dd->pdev->dev,
|
|
||||||
command->sg,
|
|
||||||
command->scatter_ents,
|
|
||||||
command->direction);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
up(&port->cmd_slot);
|
return false; /* device present */
|
||||||
|
|
||||||
set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
|
|
||||||
in_progress = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -222,10 +188,7 @@ static int get_slot(struct mtip_port *port)
|
||||||
}
|
}
|
||||||
dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
|
dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
|
||||||
|
|
||||||
if (mtip_check_surprise_removal(port->dd->pdev)) {
|
mtip_check_surprise_removal(port->dd->pdev);
|
||||||
/* Device not present, clean outstanding commands */
|
|
||||||
mtip_command_cleanup(port->dd);
|
|
||||||
}
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,6 +208,107 @@ static inline void release_slot(struct mtip_port *port, int tag)
|
||||||
smp_mb__after_clear_bit();
|
smp_mb__after_clear_bit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IO completion function.
|
||||||
|
*
|
||||||
|
* This completion function is called by the driver ISR when a
|
||||||
|
* command that was issued by the kernel completes. It first calls the
|
||||||
|
* asynchronous completion function which normally calls back into the block
|
||||||
|
* layer passing the asynchronous callback data, then unmaps the
|
||||||
|
* scatter list associated with the completed command, and finally
|
||||||
|
* clears the allocated bit associated with the completed command.
|
||||||
|
*
|
||||||
|
* @port Pointer to the port data structure.
|
||||||
|
* @tag Tag of the command.
|
||||||
|
* @data Pointer to driver_data.
|
||||||
|
* @status Completion status.
|
||||||
|
*
|
||||||
|
* return value
|
||||||
|
* None
|
||||||
|
*/
|
||||||
|
static void mtip_async_complete(struct mtip_port *port,
|
||||||
|
int tag,
|
||||||
|
void *data,
|
||||||
|
int status)
|
||||||
|
{
|
||||||
|
struct mtip_cmd *command;
|
||||||
|
struct driver_data *dd = data;
|
||||||
|
int cb_status = status ? -EIO : 0;
|
||||||
|
|
||||||
|
if (unlikely(!dd) || unlikely(!port))
|
||||||
|
return;
|
||||||
|
|
||||||
|
command = &port->commands[tag];
|
||||||
|
|
||||||
|
if (unlikely(status == PORT_IRQ_TF_ERR)) {
|
||||||
|
dev_warn(&port->dd->pdev->dev,
|
||||||
|
"Command tag %d failed due to TFE\n", tag);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Upper layer callback */
|
||||||
|
if (likely(command->async_callback))
|
||||||
|
command->async_callback(command->async_data, cb_status);
|
||||||
|
|
||||||
|
command->async_callback = NULL;
|
||||||
|
command->comp_func = NULL;
|
||||||
|
|
||||||
|
/* Unmap the DMA scatter list entries */
|
||||||
|
dma_unmap_sg(&dd->pdev->dev,
|
||||||
|
command->sg,
|
||||||
|
command->scatter_ents,
|
||||||
|
command->direction);
|
||||||
|
|
||||||
|
/* Clear the allocated and active bits for the command */
|
||||||
|
atomic_set(&port->commands[tag].active, 0);
|
||||||
|
release_slot(port, tag);
|
||||||
|
|
||||||
|
up(&port->cmd_slot);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function is called for clean the pending command in the
|
||||||
|
* command slot during the surprise removal of device and return
|
||||||
|
* error to the upper layer.
|
||||||
|
*
|
||||||
|
* @dd Pointer to the DRIVER_DATA structure.
|
||||||
|
*
|
||||||
|
* return value
|
||||||
|
* None
|
||||||
|
*/
|
||||||
|
static void mtip_command_cleanup(struct driver_data *dd)
|
||||||
|
{
|
||||||
|
int tag = 0;
|
||||||
|
struct mtip_cmd *cmd;
|
||||||
|
struct mtip_port *port = dd->port;
|
||||||
|
unsigned int num_cmd_slots = dd->slot_groups * 32;
|
||||||
|
|
||||||
|
if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!port)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cmd = &port->commands[MTIP_TAG_INTERNAL];
|
||||||
|
if (atomic_read(&cmd->active))
|
||||||
|
if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) &
|
||||||
|
(1 << MTIP_TAG_INTERNAL))
|
||||||
|
if (cmd->comp_func)
|
||||||
|
cmd->comp_func(port, MTIP_TAG_INTERNAL,
|
||||||
|
cmd->comp_data, -ENODEV);
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
tag = find_next_bit(port->allocated, num_cmd_slots, tag);
|
||||||
|
if (tag >= num_cmd_slots)
|
||||||
|
break;
|
||||||
|
|
||||||
|
cmd = &port->commands[tag];
|
||||||
|
if (atomic_read(&cmd->active))
|
||||||
|
mtip_async_complete(port, tag, dd, -ENODEV);
|
||||||
|
}
|
||||||
|
|
||||||
|
set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reset the HBA (without sleeping)
|
* Reset the HBA (without sleeping)
|
||||||
*
|
*
|
||||||
|
@ -584,6 +648,9 @@ static void mtip_timeout_function(unsigned long int data)
|
||||||
if (unlikely(!port))
|
if (unlikely(!port))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (unlikely(port->dd->sr))
|
||||||
|
return;
|
||||||
|
|
||||||
if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
|
if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
|
||||||
mod_timer(&port->cmd_timer,
|
mod_timer(&port->cmd_timer,
|
||||||
jiffies + msecs_to_jiffies(30000));
|
jiffies + msecs_to_jiffies(30000));
|
||||||
|
@ -674,66 +741,6 @@ static void mtip_timeout_function(unsigned long int data)
|
||||||
jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
|
jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* IO completion function.
|
|
||||||
*
|
|
||||||
* This completion function is called by the driver ISR when a
|
|
||||||
* command that was issued by the kernel completes. It first calls the
|
|
||||||
* asynchronous completion function which normally calls back into the block
|
|
||||||
* layer passing the asynchronous callback data, then unmaps the
|
|
||||||
* scatter list associated with the completed command, and finally
|
|
||||||
* clears the allocated bit associated with the completed command.
|
|
||||||
*
|
|
||||||
* @port Pointer to the port data structure.
|
|
||||||
* @tag Tag of the command.
|
|
||||||
* @data Pointer to driver_data.
|
|
||||||
* @status Completion status.
|
|
||||||
*
|
|
||||||
* return value
|
|
||||||
* None
|
|
||||||
*/
|
|
||||||
static void mtip_async_complete(struct mtip_port *port,
|
|
||||||
int tag,
|
|
||||||
void *data,
|
|
||||||
int status)
|
|
||||||
{
|
|
||||||
struct mtip_cmd *command;
|
|
||||||
struct driver_data *dd = data;
|
|
||||||
int cb_status = status ? -EIO : 0;
|
|
||||||
|
|
||||||
if (unlikely(!dd) || unlikely(!port))
|
|
||||||
return;
|
|
||||||
|
|
||||||
command = &port->commands[tag];
|
|
||||||
|
|
||||||
if (unlikely(status == PORT_IRQ_TF_ERR)) {
|
|
||||||
dev_warn(&port->dd->pdev->dev,
|
|
||||||
"Command tag %d failed due to TFE\n", tag);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Upper layer callback */
|
|
||||||
if (likely(command->async_callback))
|
|
||||||
command->async_callback(command->async_data, cb_status);
|
|
||||||
|
|
||||||
command->async_callback = NULL;
|
|
||||||
command->comp_func = NULL;
|
|
||||||
|
|
||||||
/* Unmap the DMA scatter list entries */
|
|
||||||
dma_unmap_sg(&dd->pdev->dev,
|
|
||||||
command->sg,
|
|
||||||
command->scatter_ents,
|
|
||||||
command->direction);
|
|
||||||
|
|
||||||
/* Clear the allocated and active bits for the command */
|
|
||||||
atomic_set(&port->commands[tag].active, 0);
|
|
||||||
release_slot(port, tag);
|
|
||||||
|
|
||||||
if (unlikely(command->unaligned))
|
|
||||||
up(&port->cmd_slot_unal);
|
|
||||||
else
|
|
||||||
up(&port->cmd_slot);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal command completion callback function.
|
* Internal command completion callback function.
|
||||||
*
|
*
|
||||||
|
@ -854,7 +861,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
|
||||||
"Missing completion func for tag %d",
|
"Missing completion func for tag %d",
|
||||||
tag);
|
tag);
|
||||||
if (mtip_check_surprise_removal(dd->pdev)) {
|
if (mtip_check_surprise_removal(dd->pdev)) {
|
||||||
mtip_command_cleanup(dd);
|
|
||||||
/* don't proceed further */
|
/* don't proceed further */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1018,14 +1024,12 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
|
||||||
command->comp_data,
|
command->comp_data,
|
||||||
0);
|
0);
|
||||||
} else {
|
} else {
|
||||||
dev_warn(&dd->pdev->dev,
|
dev_dbg(&dd->pdev->dev,
|
||||||
"Null completion "
|
"Null completion for tag %d",
|
||||||
"for tag %d",
|
|
||||||
tag);
|
tag);
|
||||||
|
|
||||||
if (mtip_check_surprise_removal(
|
if (mtip_check_surprise_removal(
|
||||||
dd->pdev)) {
|
dd->pdev)) {
|
||||||
mtip_command_cleanup(dd);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1145,7 +1149,6 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
|
||||||
|
|
||||||
if (unlikely(port_stat & PORT_IRQ_ERR)) {
|
if (unlikely(port_stat & PORT_IRQ_ERR)) {
|
||||||
if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
|
if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
|
||||||
mtip_command_cleanup(dd);
|
|
||||||
/* don't proceed further */
|
/* don't proceed further */
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
@ -2806,34 +2809,51 @@ static ssize_t show_device_status(struct device_driver *drv, char *buf)
|
||||||
static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
|
static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
|
||||||
size_t len, loff_t *offset)
|
size_t len, loff_t *offset)
|
||||||
{
|
{
|
||||||
|
struct driver_data *dd = (struct driver_data *)f->private_data;
|
||||||
int size = *offset;
|
int size = *offset;
|
||||||
char buf[MTIP_DFS_MAX_BUF_SIZE];
|
char *buf;
|
||||||
|
int rv = 0;
|
||||||
|
|
||||||
if (!len || *offset)
|
if (!len || *offset)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
|
||||||
|
if (!buf) {
|
||||||
|
dev_err(&dd->pdev->dev,
|
||||||
|
"Memory allocation: status buffer\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
size += show_device_status(NULL, buf);
|
size += show_device_status(NULL, buf);
|
||||||
|
|
||||||
*offset = size <= len ? size : len;
|
*offset = size <= len ? size : len;
|
||||||
size = copy_to_user(ubuf, buf, *offset);
|
size = copy_to_user(ubuf, buf, *offset);
|
||||||
if (size)
|
if (size)
|
||||||
return -EFAULT;
|
rv = -EFAULT;
|
||||||
|
|
||||||
return *offset;
|
kfree(buf);
|
||||||
|
return rv ? rv : *offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
|
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
|
||||||
size_t len, loff_t *offset)
|
size_t len, loff_t *offset)
|
||||||
{
|
{
|
||||||
struct driver_data *dd = (struct driver_data *)f->private_data;
|
struct driver_data *dd = (struct driver_data *)f->private_data;
|
||||||
char buf[MTIP_DFS_MAX_BUF_SIZE];
|
char *buf;
|
||||||
u32 group_allocated;
|
u32 group_allocated;
|
||||||
int size = *offset;
|
int size = *offset;
|
||||||
int n;
|
int n, rv = 0;
|
||||||
|
|
||||||
if (!len || size)
|
if (!len || size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
|
||||||
|
if (!buf) {
|
||||||
|
dev_err(&dd->pdev->dev,
|
||||||
|
"Memory allocation: register buffer\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
|
size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
|
||||||
|
|
||||||
for (n = dd->slot_groups-1; n >= 0; n--)
|
for (n = dd->slot_groups-1; n >= 0; n--)
|
||||||
|
@ -2888,21 +2908,30 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
|
||||||
*offset = size <= len ? size : len;
|
*offset = size <= len ? size : len;
|
||||||
size = copy_to_user(ubuf, buf, *offset);
|
size = copy_to_user(ubuf, buf, *offset);
|
||||||
if (size)
|
if (size)
|
||||||
return -EFAULT;
|
rv = -EFAULT;
|
||||||
|
|
||||||
return *offset;
|
kfree(buf);
|
||||||
|
return rv ? rv : *offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
|
static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
|
||||||
size_t len, loff_t *offset)
|
size_t len, loff_t *offset)
|
||||||
{
|
{
|
||||||
struct driver_data *dd = (struct driver_data *)f->private_data;
|
struct driver_data *dd = (struct driver_data *)f->private_data;
|
||||||
char buf[MTIP_DFS_MAX_BUF_SIZE];
|
char *buf;
|
||||||
int size = *offset;
|
int size = *offset;
|
||||||
|
int rv = 0;
|
||||||
|
|
||||||
if (!len || size)
|
if (!len || size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
|
||||||
|
if (!buf) {
|
||||||
|
dev_err(&dd->pdev->dev,
|
||||||
|
"Memory allocation: flag buffer\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
|
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
|
||||||
dd->port->flags);
|
dd->port->flags);
|
||||||
size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
|
size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
|
||||||
|
@ -2911,9 +2940,10 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
|
||||||
*offset = size <= len ? size : len;
|
*offset = size <= len ? size : len;
|
||||||
size = copy_to_user(ubuf, buf, *offset);
|
size = copy_to_user(ubuf, buf, *offset);
|
||||||
if (size)
|
if (size)
|
||||||
return -EFAULT;
|
rv = -EFAULT;
|
||||||
|
|
||||||
return *offset;
|
kfree(buf);
|
||||||
|
return rv ? rv : *offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations mtip_device_status_fops = {
|
static const struct file_operations mtip_device_status_fops = {
|
||||||
|
@ -3006,6 +3036,46 @@ static void mtip_hw_debugfs_exit(struct driver_data *dd)
|
||||||
debugfs_remove_recursive(dd->dfs_node);
|
debugfs_remove_recursive(dd->dfs_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mtip_free_orphan(struct driver_data *dd)
|
||||||
|
{
|
||||||
|
struct kobject *kobj;
|
||||||
|
|
||||||
|
if (dd->bdev) {
|
||||||
|
if (dd->bdev->bd_holders >= 1)
|
||||||
|
return -2;
|
||||||
|
|
||||||
|
bdput(dd->bdev);
|
||||||
|
dd->bdev = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
mtip_hw_debugfs_exit(dd);
|
||||||
|
|
||||||
|
spin_lock(&rssd_index_lock);
|
||||||
|
ida_remove(&rssd_index_ida, dd->index);
|
||||||
|
spin_unlock(&rssd_index_lock);
|
||||||
|
|
||||||
|
if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag) &&
|
||||||
|
test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
|
||||||
|
put_disk(dd->disk);
|
||||||
|
} else {
|
||||||
|
if (dd->disk) {
|
||||||
|
kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
|
||||||
|
if (kobj) {
|
||||||
|
mtip_hw_sysfs_exit(dd, kobj);
|
||||||
|
kobject_put(kobj);
|
||||||
|
}
|
||||||
|
del_gendisk(dd->disk);
|
||||||
|
dd->disk = NULL;
|
||||||
|
}
|
||||||
|
if (dd->queue) {
|
||||||
|
dd->queue->queuedata = NULL;
|
||||||
|
blk_cleanup_queue(dd->queue);
|
||||||
|
dd->queue = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kfree(dd);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Perform any init/resume time hardware setup
|
* Perform any init/resume time hardware setup
|
||||||
|
@ -3154,6 +3224,7 @@ static int mtip_service_thread(void *data)
|
||||||
unsigned long slot, slot_start, slot_wrap;
|
unsigned long slot, slot_start, slot_wrap;
|
||||||
unsigned int num_cmd_slots = dd->slot_groups * 32;
|
unsigned int num_cmd_slots = dd->slot_groups * 32;
|
||||||
struct mtip_port *port = dd->port;
|
struct mtip_port *port = dd->port;
|
||||||
|
int ret;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
/*
|
/*
|
||||||
|
@ -3164,13 +3235,18 @@ static int mtip_service_thread(void *data)
|
||||||
!(port->flags & MTIP_PF_PAUSE_IO));
|
!(port->flags & MTIP_PF_PAUSE_IO));
|
||||||
|
|
||||||
if (kthread_should_stop())
|
if (kthread_should_stop())
|
||||||
|
goto st_out;
|
||||||
|
|
||||||
|
set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
|
||||||
|
|
||||||
|
/* If I am an orphan, start self cleanup */
|
||||||
|
if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
|
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
|
||||||
&dd->dd_flag)))
|
&dd->dd_flag)))
|
||||||
break;
|
goto st_out;
|
||||||
|
|
||||||
set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
|
|
||||||
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
|
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
|
||||||
slot = 1;
|
slot = 1;
|
||||||
/* used to restrict the loop to one iteration */
|
/* used to restrict the loop to one iteration */
|
||||||
|
@ -3201,7 +3277,7 @@ static int mtip_service_thread(void *data)
|
||||||
|
|
||||||
clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
|
clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
|
||||||
} else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
|
} else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
|
||||||
if (!mtip_ftl_rebuild_poll(dd))
|
if (mtip_ftl_rebuild_poll(dd) < 0)
|
||||||
set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
|
set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
|
||||||
&dd->dd_flag);
|
&dd->dd_flag);
|
||||||
clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
|
clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
|
||||||
|
@ -3209,8 +3285,30 @@ static int mtip_service_thread(void *data)
|
||||||
clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
|
clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
|
||||||
|
|
||||||
if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
|
if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
|
||||||
break;
|
goto st_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* wait for pci remove to exit */
|
||||||
|
while (1) {
|
||||||
|
if (test_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag))
|
||||||
|
break;
|
||||||
|
msleep_interruptible(1000);
|
||||||
|
if (kthread_should_stop())
|
||||||
|
goto st_out;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
ret = mtip_free_orphan(dd);
|
||||||
|
if (!ret) {
|
||||||
|
/* NOTE: All data structures are invalid, do not
|
||||||
|
* access any here */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
msleep_interruptible(1000);
|
||||||
|
if (kthread_should_stop())
|
||||||
|
goto st_out;
|
||||||
|
}
|
||||||
|
st_out:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3437,13 +3535,13 @@ static int mtip_hw_init(struct driver_data *dd)
|
||||||
rv = -EFAULT;
|
rv = -EFAULT;
|
||||||
goto out3;
|
goto out3;
|
||||||
}
|
}
|
||||||
|
mtip_dump_identify(dd->port);
|
||||||
|
|
||||||
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
|
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
|
||||||
MTIP_FTL_REBUILD_MAGIC) {
|
MTIP_FTL_REBUILD_MAGIC) {
|
||||||
set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
|
set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
|
||||||
return MTIP_FTL_REBUILD_MAGIC;
|
return MTIP_FTL_REBUILD_MAGIC;
|
||||||
}
|
}
|
||||||
mtip_dump_identify(dd->port);
|
|
||||||
|
|
||||||
/* check write protect, over temp and rebuild statuses */
|
/* check write protect, over temp and rebuild statuses */
|
||||||
rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
|
rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
|
||||||
|
@ -3467,8 +3565,8 @@ static int mtip_hw_init(struct driver_data *dd)
|
||||||
}
|
}
|
||||||
if (buf[288] == 0xBF) {
|
if (buf[288] == 0xBF) {
|
||||||
dev_info(&dd->pdev->dev,
|
dev_info(&dd->pdev->dev,
|
||||||
"Drive indicates rebuild has failed.\n");
|
"Drive is in security locked state.\n");
|
||||||
/* TODO */
|
set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3523,9 +3621,8 @@ static int mtip_hw_exit(struct driver_data *dd)
|
||||||
* Send standby immediate (E0h) to the drive so that it
|
* Send standby immediate (E0h) to the drive so that it
|
||||||
* saves its state.
|
* saves its state.
|
||||||
*/
|
*/
|
||||||
if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
|
if (!dd->sr) {
|
||||||
|
if (!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
|
||||||
if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags))
|
|
||||||
if (mtip_standby_immediate(dd->port))
|
if (mtip_standby_immediate(dd->port))
|
||||||
dev_warn(&dd->pdev->dev,
|
dev_warn(&dd->pdev->dev,
|
||||||
"STANDBY IMMEDIATE failed\n");
|
"STANDBY IMMEDIATE failed\n");
|
||||||
|
@ -3551,6 +3648,7 @@ static int mtip_hw_exit(struct driver_data *dd)
|
||||||
dd->port->command_list_dma);
|
dd->port->command_list_dma);
|
||||||
/* Free the memory allocated for the for structure. */
|
/* Free the memory allocated for the for structure. */
|
||||||
kfree(dd->port);
|
kfree(dd->port);
|
||||||
|
dd->port = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3572,7 +3670,8 @@ static int mtip_hw_shutdown(struct driver_data *dd)
|
||||||
* Send standby immediate (E0h) to the drive so that it
|
* Send standby immediate (E0h) to the drive so that it
|
||||||
* saves its state.
|
* saves its state.
|
||||||
*/
|
*/
|
||||||
mtip_standby_immediate(dd->port);
|
if (!dd->sr && dd->port)
|
||||||
|
mtip_standby_immediate(dd->port);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -3887,6 +3986,10 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
|
||||||
bio_endio(bio, -ENODATA);
|
bio_endio(bio, -ENODATA);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
|
||||||
|
bio_endio(bio, -ENXIO);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
||||||
|
@ -4010,6 +4113,8 @@ static int mtip_block_initialize(struct driver_data *dd)
|
||||||
dd->disk->private_data = dd;
|
dd->disk->private_data = dd;
|
||||||
dd->index = index;
|
dd->index = index;
|
||||||
|
|
||||||
|
mtip_hw_debugfs_init(dd);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if rebuild pending, start the service thread, and delay the block
|
* if rebuild pending, start the service thread, and delay the block
|
||||||
* queue creation and add_disk()
|
* queue creation and add_disk()
|
||||||
|
@ -4068,6 +4173,7 @@ static int mtip_block_initialize(struct driver_data *dd)
|
||||||
/* Enable the block device and add it to /dev */
|
/* Enable the block device and add it to /dev */
|
||||||
add_disk(dd->disk);
|
add_disk(dd->disk);
|
||||||
|
|
||||||
|
dd->bdev = bdget_disk(dd->disk, 0);
|
||||||
/*
|
/*
|
||||||
* Now that the disk is active, initialize any sysfs attributes
|
* Now that the disk is active, initialize any sysfs attributes
|
||||||
* managed by the protocol layer.
|
* managed by the protocol layer.
|
||||||
|
@ -4077,7 +4183,6 @@ static int mtip_block_initialize(struct driver_data *dd)
|
||||||
mtip_hw_sysfs_init(dd, kobj);
|
mtip_hw_sysfs_init(dd, kobj);
|
||||||
kobject_put(kobj);
|
kobject_put(kobj);
|
||||||
}
|
}
|
||||||
mtip_hw_debugfs_init(dd);
|
|
||||||
|
|
||||||
if (dd->mtip_svc_handler) {
|
if (dd->mtip_svc_handler) {
|
||||||
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
|
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
|
||||||
|
@ -4103,7 +4208,8 @@ static int mtip_block_initialize(struct driver_data *dd)
|
||||||
return rv;
|
return rv;
|
||||||
|
|
||||||
kthread_run_error:
|
kthread_run_error:
|
||||||
mtip_hw_debugfs_exit(dd);
|
bdput(dd->bdev);
|
||||||
|
dd->bdev = NULL;
|
||||||
|
|
||||||
/* Delete our gendisk. This also removes the device from /dev */
|
/* Delete our gendisk. This also removes the device from /dev */
|
||||||
del_gendisk(dd->disk);
|
del_gendisk(dd->disk);
|
||||||
|
@ -4112,6 +4218,7 @@ static int mtip_block_initialize(struct driver_data *dd)
|
||||||
blk_cleanup_queue(dd->queue);
|
blk_cleanup_queue(dd->queue);
|
||||||
|
|
||||||
block_queue_alloc_init_error:
|
block_queue_alloc_init_error:
|
||||||
|
mtip_hw_debugfs_exit(dd);
|
||||||
disk_index_error:
|
disk_index_error:
|
||||||
spin_lock(&rssd_index_lock);
|
spin_lock(&rssd_index_lock);
|
||||||
ida_remove(&rssd_index_ida, index);
|
ida_remove(&rssd_index_ida, index);
|
||||||
|
@ -4141,40 +4248,48 @@ static int mtip_block_remove(struct driver_data *dd)
|
||||||
{
|
{
|
||||||
struct kobject *kobj;
|
struct kobject *kobj;
|
||||||
|
|
||||||
if (dd->mtip_svc_handler) {
|
if (!dd->sr) {
|
||||||
set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
|
mtip_hw_debugfs_exit(dd);
|
||||||
wake_up_interruptible(&dd->port->svc_wait);
|
|
||||||
kthread_stop(dd->mtip_svc_handler);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Clean up the sysfs attributes, if created */
|
if (dd->mtip_svc_handler) {
|
||||||
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
|
set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
|
||||||
kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
|
wake_up_interruptible(&dd->port->svc_wait);
|
||||||
if (kobj) {
|
kthread_stop(dd->mtip_svc_handler);
|
||||||
mtip_hw_sysfs_exit(dd, kobj);
|
|
||||||
kobject_put(kobj);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Clean up the sysfs attributes, if created */
|
||||||
|
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
|
||||||
|
kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
|
||||||
|
if (kobj) {
|
||||||
|
mtip_hw_sysfs_exit(dd, kobj);
|
||||||
|
kobject_put(kobj);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Delete our gendisk structure. This also removes the device
|
||||||
|
* from /dev
|
||||||
|
*/
|
||||||
|
if (dd->bdev) {
|
||||||
|
bdput(dd->bdev);
|
||||||
|
dd->bdev = NULL;
|
||||||
|
}
|
||||||
|
if (dd->disk) {
|
||||||
|
if (dd->disk->queue) {
|
||||||
|
del_gendisk(dd->disk);
|
||||||
|
blk_cleanup_queue(dd->queue);
|
||||||
|
dd->queue = NULL;
|
||||||
|
} else
|
||||||
|
put_disk(dd->disk);
|
||||||
|
}
|
||||||
|
dd->disk = NULL;
|
||||||
|
|
||||||
|
spin_lock(&rssd_index_lock);
|
||||||
|
ida_remove(&rssd_index_ida, dd->index);
|
||||||
|
spin_unlock(&rssd_index_lock);
|
||||||
|
} else {
|
||||||
|
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
|
||||||
|
dd->disk->disk_name);
|
||||||
}
|
}
|
||||||
mtip_hw_debugfs_exit(dd);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Delete our gendisk structure. This also removes the device
|
|
||||||
* from /dev
|
|
||||||
*/
|
|
||||||
if (dd->disk) {
|
|
||||||
if (dd->disk->queue)
|
|
||||||
del_gendisk(dd->disk);
|
|
||||||
else
|
|
||||||
put_disk(dd->disk);
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock(&rssd_index_lock);
|
|
||||||
ida_remove(&rssd_index_ida, dd->index);
|
|
||||||
spin_unlock(&rssd_index_lock);
|
|
||||||
|
|
||||||
blk_cleanup_queue(dd->queue);
|
|
||||||
dd->disk = NULL;
|
|
||||||
dd->queue = NULL;
|
|
||||||
|
|
||||||
/* De-initialize the protocol layer. */
|
/* De-initialize the protocol layer. */
|
||||||
mtip_hw_exit(dd);
|
mtip_hw_exit(dd);
|
||||||
|
@ -4490,8 +4605,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
|
||||||
static void mtip_pci_remove(struct pci_dev *pdev)
|
static void mtip_pci_remove(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
struct driver_data *dd = pci_get_drvdata(pdev);
|
struct driver_data *dd = pci_get_drvdata(pdev);
|
||||||
int counter = 0;
|
unsigned long flags, to;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
|
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
|
||||||
|
|
||||||
|
@ -4500,17 +4614,22 @@ static void mtip_pci_remove(struct pci_dev *pdev)
|
||||||
list_add(&dd->remove_list, &removing_list);
|
list_add(&dd->remove_list, &removing_list);
|
||||||
spin_unlock_irqrestore(&dev_lock, flags);
|
spin_unlock_irqrestore(&dev_lock, flags);
|
||||||
|
|
||||||
if (mtip_check_surprise_removal(pdev)) {
|
mtip_check_surprise_removal(pdev);
|
||||||
while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
|
synchronize_irq(dd->pdev->irq);
|
||||||
counter++;
|
|
||||||
msleep(20);
|
/* Spin until workers are done */
|
||||||
if (counter == 10) {
|
to = jiffies + msecs_to_jiffies(4000);
|
||||||
/* Cleanup the outstanding commands */
|
do {
|
||||||
mtip_command_cleanup(dd);
|
msleep(20);
|
||||||
break;
|
} while (atomic_read(&dd->irq_workers_active) != 0 &&
|
||||||
}
|
time_before(jiffies, to));
|
||||||
}
|
|
||||||
|
if (atomic_read(&dd->irq_workers_active) != 0) {
|
||||||
|
dev_warn(&dd->pdev->dev,
|
||||||
|
"Completion workers still active!\n");
|
||||||
}
|
}
|
||||||
|
/* Cleanup the outstanding commands */
|
||||||
|
mtip_command_cleanup(dd);
|
||||||
|
|
||||||
/* Clean up the block layer. */
|
/* Clean up the block layer. */
|
||||||
mtip_block_remove(dd);
|
mtip_block_remove(dd);
|
||||||
|
@ -4529,8 +4648,15 @@ static void mtip_pci_remove(struct pci_dev *pdev)
|
||||||
list_del_init(&dd->remove_list);
|
list_del_init(&dd->remove_list);
|
||||||
spin_unlock_irqrestore(&dev_lock, flags);
|
spin_unlock_irqrestore(&dev_lock, flags);
|
||||||
|
|
||||||
kfree(dd);
|
if (!dd->sr)
|
||||||
|
kfree(dd);
|
||||||
|
else
|
||||||
|
set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag);
|
||||||
|
|
||||||
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
|
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
|
||||||
|
pci_set_drvdata(pdev, NULL);
|
||||||
|
pci_dev_put(pdev);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -140,6 +140,7 @@ enum {
|
||||||
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
|
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
|
||||||
MTIP_PF_ISSUE_CMDS_BIT = 5,
|
MTIP_PF_ISSUE_CMDS_BIT = 5,
|
||||||
MTIP_PF_REBUILD_BIT = 6,
|
MTIP_PF_REBUILD_BIT = 6,
|
||||||
|
MTIP_PF_SR_CLEANUP_BIT = 7,
|
||||||
MTIP_PF_SVC_THD_STOP_BIT = 8,
|
MTIP_PF_SVC_THD_STOP_BIT = 8,
|
||||||
|
|
||||||
/* below are bit numbers in 'dd_flag' defined in driver_data */
|
/* below are bit numbers in 'dd_flag' defined in driver_data */
|
||||||
|
@ -147,15 +148,18 @@ enum {
|
||||||
MTIP_DDF_REMOVE_PENDING_BIT = 1,
|
MTIP_DDF_REMOVE_PENDING_BIT = 1,
|
||||||
MTIP_DDF_OVER_TEMP_BIT = 2,
|
MTIP_DDF_OVER_TEMP_BIT = 2,
|
||||||
MTIP_DDF_WRITE_PROTECT_BIT = 3,
|
MTIP_DDF_WRITE_PROTECT_BIT = 3,
|
||||||
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
|
MTIP_DDF_REMOVE_DONE_BIT = 4,
|
||||||
(1 << MTIP_DDF_SEC_LOCK_BIT) |
|
|
||||||
(1 << MTIP_DDF_OVER_TEMP_BIT) |
|
|
||||||
(1 << MTIP_DDF_WRITE_PROTECT_BIT)),
|
|
||||||
|
|
||||||
MTIP_DDF_CLEANUP_BIT = 5,
|
MTIP_DDF_CLEANUP_BIT = 5,
|
||||||
MTIP_DDF_RESUME_BIT = 6,
|
MTIP_DDF_RESUME_BIT = 6,
|
||||||
MTIP_DDF_INIT_DONE_BIT = 7,
|
MTIP_DDF_INIT_DONE_BIT = 7,
|
||||||
MTIP_DDF_REBUILD_FAILED_BIT = 8,
|
MTIP_DDF_REBUILD_FAILED_BIT = 8,
|
||||||
|
|
||||||
|
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
|
||||||
|
(1 << MTIP_DDF_SEC_LOCK_BIT) |
|
||||||
|
(1 << MTIP_DDF_OVER_TEMP_BIT) |
|
||||||
|
(1 << MTIP_DDF_WRITE_PROTECT_BIT) |
|
||||||
|
(1 << MTIP_DDF_REBUILD_FAILED_BIT)),
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct smart_attr {
|
struct smart_attr {
|
||||||
|
@ -499,6 +503,8 @@ struct driver_data {
|
||||||
|
|
||||||
bool trim_supp; /* flag indicating trim support */
|
bool trim_supp; /* flag indicating trim support */
|
||||||
|
|
||||||
|
bool sr;
|
||||||
|
|
||||||
int numa_node; /* NUMA support */
|
int numa_node; /* NUMA support */
|
||||||
|
|
||||||
char workq_name[32];
|
char workq_name[32];
|
||||||
|
@ -511,6 +517,8 @@ struct driver_data {
|
||||||
|
|
||||||
int isr_binding;
|
int isr_binding;
|
||||||
|
|
||||||
|
struct block_device *bdev;
|
||||||
|
|
||||||
int unal_qdepth; /* qdepth of unaligned IO queue */
|
int unal_qdepth; /* qdepth of unaligned IO queue */
|
||||||
|
|
||||||
struct list_head online_list; /* linkage for online list */
|
struct list_head online_list; /* linkage for online list */
|
||||||
|
|
|
@ -473,45 +473,31 @@ static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
|
||||||
{
|
{
|
||||||
if (!pkt_debugfs_root)
|
if (!pkt_debugfs_root)
|
||||||
return;
|
return;
|
||||||
pd->dfs_f_info = NULL;
|
|
||||||
pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
|
pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
|
||||||
if (IS_ERR(pd->dfs_d_root)) {
|
if (!pd->dfs_d_root)
|
||||||
pd->dfs_d_root = NULL;
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
|
pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
|
||||||
pd->dfs_d_root, pd, &debug_fops);
|
pd->dfs_d_root, pd, &debug_fops);
|
||||||
if (IS_ERR(pd->dfs_f_info)) {
|
|
||||||
pd->dfs_f_info = NULL;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
|
static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
|
||||||
{
|
{
|
||||||
if (!pkt_debugfs_root)
|
if (!pkt_debugfs_root)
|
||||||
return;
|
return;
|
||||||
if (pd->dfs_f_info)
|
debugfs_remove(pd->dfs_f_info);
|
||||||
debugfs_remove(pd->dfs_f_info);
|
debugfs_remove(pd->dfs_d_root);
|
||||||
pd->dfs_f_info = NULL;
|
pd->dfs_f_info = NULL;
|
||||||
if (pd->dfs_d_root)
|
|
||||||
debugfs_remove(pd->dfs_d_root);
|
|
||||||
pd->dfs_d_root = NULL;
|
pd->dfs_d_root = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pkt_debugfs_init(void)
|
static void pkt_debugfs_init(void)
|
||||||
{
|
{
|
||||||
pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
|
pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
|
||||||
if (IS_ERR(pkt_debugfs_root)) {
|
|
||||||
pkt_debugfs_root = NULL;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pkt_debugfs_cleanup(void)
|
static void pkt_debugfs_cleanup(void)
|
||||||
{
|
{
|
||||||
if (!pkt_debugfs_root)
|
|
||||||
return;
|
|
||||||
debugfs_remove(pkt_debugfs_root);
|
debugfs_remove(pkt_debugfs_root);
|
||||||
pkt_debugfs_root = NULL;
|
pkt_debugfs_root = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -654,7 +654,8 @@ static void rsxx_eeh_failure(struct pci_dev *dev)
|
||||||
for (i = 0; i < card->n_targets; i++) {
|
for (i = 0; i < card->n_targets; i++) {
|
||||||
spin_lock_bh(&card->ctrl[i].queue_lock);
|
spin_lock_bh(&card->ctrl[i].queue_lock);
|
||||||
cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
|
cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
|
||||||
&card->ctrl[i].queue);
|
&card->ctrl[i].queue,
|
||||||
|
COMPLETE_DMA);
|
||||||
spin_unlock_bh(&card->ctrl[i].queue_lock);
|
spin_unlock_bh(&card->ctrl[i].queue_lock);
|
||||||
|
|
||||||
cnt += rsxx_dma_cancel(&card->ctrl[i]);
|
cnt += rsxx_dma_cancel(&card->ctrl[i]);
|
||||||
|
@ -748,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
|
||||||
|
|
||||||
card->eeh_state = 0;
|
card->eeh_state = 0;
|
||||||
|
|
||||||
st = rsxx_eeh_remap_dmas(card);
|
|
||||||
if (st)
|
|
||||||
goto failed_remap_dmas;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&card->irq_lock, flags);
|
spin_lock_irqsave(&card->irq_lock, flags);
|
||||||
if (card->n_targets & RSXX_MAX_TARGETS)
|
if (card->n_targets & RSXX_MAX_TARGETS)
|
||||||
rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
|
rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
|
||||||
|
@ -778,7 +775,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
|
||||||
return PCI_ERS_RESULT_RECOVERED;
|
return PCI_ERS_RESULT_RECOVERED;
|
||||||
|
|
||||||
failed_hw_buffers_init:
|
failed_hw_buffers_init:
|
||||||
failed_remap_dmas:
|
|
||||||
for (i = 0; i < card->n_targets; i++) {
|
for (i = 0; i < card->n_targets; i++) {
|
||||||
if (card->ctrl[i].status.buf)
|
if (card->ctrl[i].status.buf)
|
||||||
pci_free_consistent(card->dev,
|
pci_free_consistent(card->dev,
|
||||||
|
|
|
@ -295,13 +295,15 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_size = card->config.data.block_size;
|
if (card->config_valid) {
|
||||||
|
blk_size = card->config.data.block_size;
|
||||||
|
blk_queue_dma_alignment(card->queue, blk_size - 1);
|
||||||
|
blk_queue_logical_block_size(card->queue, blk_size);
|
||||||
|
}
|
||||||
|
|
||||||
blk_queue_make_request(card->queue, rsxx_make_request);
|
blk_queue_make_request(card->queue, rsxx_make_request);
|
||||||
blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
|
blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
|
||||||
blk_queue_dma_alignment(card->queue, blk_size - 1);
|
|
||||||
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
|
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
|
||||||
blk_queue_logical_block_size(card->queue, blk_size);
|
|
||||||
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
|
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
|
||||||
|
|
||||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
|
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
|
||||||
|
|
|
@ -221,6 +221,21 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*----------------- RSXX DMA Handling -------------------*/
|
/*----------------- RSXX DMA Handling -------------------*/
|
||||||
|
static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
|
||||||
|
{
|
||||||
|
if (dma->cmd != HW_CMD_BLK_DISCARD) {
|
||||||
|
if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
|
||||||
|
pci_unmap_page(ctrl->card->dev, dma->dma_addr,
|
||||||
|
get_dma_size(dma),
|
||||||
|
dma->cmd == HW_CMD_BLK_WRITE ?
|
||||||
|
PCI_DMA_TODEVICE :
|
||||||
|
PCI_DMA_FROMDEVICE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kmem_cache_free(rsxx_dma_pool, dma);
|
||||||
|
}
|
||||||
|
|
||||||
static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
|
static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
|
||||||
struct rsxx_dma *dma,
|
struct rsxx_dma *dma,
|
||||||
unsigned int status)
|
unsigned int status)
|
||||||
|
@ -232,21 +247,14 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
|
||||||
if (status & DMA_CANCELLED)
|
if (status & DMA_CANCELLED)
|
||||||
ctrl->stats.dma_cancelled++;
|
ctrl->stats.dma_cancelled++;
|
||||||
|
|
||||||
if (dma->dma_addr)
|
|
||||||
pci_unmap_page(ctrl->card->dev, dma->dma_addr,
|
|
||||||
get_dma_size(dma),
|
|
||||||
dma->cmd == HW_CMD_BLK_WRITE ?
|
|
||||||
PCI_DMA_TODEVICE :
|
|
||||||
PCI_DMA_FROMDEVICE);
|
|
||||||
|
|
||||||
if (dma->cb)
|
if (dma->cb)
|
||||||
dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
|
dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
|
||||||
|
|
||||||
kmem_cache_free(rsxx_dma_pool, dma);
|
rsxx_free_dma(ctrl, dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
|
int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
|
||||||
struct list_head *q)
|
struct list_head *q, unsigned int done)
|
||||||
{
|
{
|
||||||
struct rsxx_dma *dma;
|
struct rsxx_dma *dma;
|
||||||
struct rsxx_dma *tmp;
|
struct rsxx_dma *tmp;
|
||||||
|
@ -254,7 +262,10 @@ int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
|
||||||
|
|
||||||
list_for_each_entry_safe(dma, tmp, q, list) {
|
list_for_each_entry_safe(dma, tmp, q, list) {
|
||||||
list_del(&dma->list);
|
list_del(&dma->list);
|
||||||
rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
|
if (done & COMPLETE_DMA)
|
||||||
|
rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
|
||||||
|
else
|
||||||
|
rsxx_free_dma(ctrl, dma);
|
||||||
cnt++;
|
cnt++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,7 +381,7 @@ static void dma_engine_stalled(unsigned long data)
|
||||||
|
|
||||||
/* Clean up the DMA queue */
|
/* Clean up the DMA queue */
|
||||||
spin_lock(&ctrl->queue_lock);
|
spin_lock(&ctrl->queue_lock);
|
||||||
cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
|
cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
|
||||||
spin_unlock(&ctrl->queue_lock);
|
spin_unlock(&ctrl->queue_lock);
|
||||||
|
|
||||||
cnt += rsxx_dma_cancel(ctrl);
|
cnt += rsxx_dma_cancel(ctrl);
|
||||||
|
@ -388,6 +399,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
|
||||||
int tag;
|
int tag;
|
||||||
int cmds_pending = 0;
|
int cmds_pending = 0;
|
||||||
struct hw_cmd *hw_cmd_buf;
|
struct hw_cmd *hw_cmd_buf;
|
||||||
|
int dir;
|
||||||
|
|
||||||
hw_cmd_buf = ctrl->cmd.buf;
|
hw_cmd_buf = ctrl->cmd.buf;
|
||||||
|
|
||||||
|
@ -424,6 +436,31 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dma->cmd != HW_CMD_BLK_DISCARD) {
|
||||||
|
if (dma->cmd == HW_CMD_BLK_WRITE)
|
||||||
|
dir = PCI_DMA_TODEVICE;
|
||||||
|
else
|
||||||
|
dir = PCI_DMA_FROMDEVICE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The function pci_map_page is placed here because we
|
||||||
|
* can only, by design, issue up to 255 commands to the
|
||||||
|
* hardware at one time per DMA channel. So the maximum
|
||||||
|
* amount of mapped memory would be 255 * 4 channels *
|
||||||
|
* 4096 Bytes which is less than 2GB, the limit of a x8
|
||||||
|
* Non-HWWD PCIe slot. This way the pci_map_page
|
||||||
|
* function should never fail because of a lack of
|
||||||
|
* mappable memory.
|
||||||
|
*/
|
||||||
|
dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
|
||||||
|
dma->pg_off, dma->sub_page.cnt << 9, dir);
|
||||||
|
if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
|
||||||
|
push_tracker(ctrl->trackers, tag);
|
||||||
|
rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
set_tracker_dma(ctrl->trackers, tag, dma);
|
set_tracker_dma(ctrl->trackers, tag, dma);
|
||||||
hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
|
hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
|
||||||
hw_cmd_buf[ctrl->cmd.idx].tag = tag;
|
hw_cmd_buf[ctrl->cmd.idx].tag = tag;
|
||||||
|
@ -620,14 +657,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
|
||||||
if (!dma)
|
if (!dma)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
|
|
||||||
dir ? PCI_DMA_TODEVICE :
|
|
||||||
PCI_DMA_FROMDEVICE);
|
|
||||||
if (!dma->dma_addr) {
|
|
||||||
kmem_cache_free(rsxx_dma_pool, dma);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
|
dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
|
||||||
dma->laddr = laddr;
|
dma->laddr = laddr;
|
||||||
dma->sub_page.off = (dma_off >> 9);
|
dma->sub_page.off = (dma_off >> 9);
|
||||||
|
@ -736,11 +765,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bvec_err:
|
bvec_err:
|
||||||
for (i = 0; i < card->n_targets; i++) {
|
for (i = 0; i < card->n_targets; i++)
|
||||||
spin_lock_bh(&card->ctrl[i].queue_lock);
|
rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
|
||||||
rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]);
|
FREE_DMA);
|
||||||
spin_unlock_bh(&card->ctrl[i].queue_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
return st;
|
return st;
|
||||||
}
|
}
|
||||||
|
@ -990,7 +1017,7 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
|
||||||
|
|
||||||
/* Clean up the DMA queue */
|
/* Clean up the DMA queue */
|
||||||
spin_lock_bh(&ctrl->queue_lock);
|
spin_lock_bh(&ctrl->queue_lock);
|
||||||
rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
|
rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
|
||||||
spin_unlock_bh(&ctrl->queue_lock);
|
spin_unlock_bh(&ctrl->queue_lock);
|
||||||
|
|
||||||
rsxx_dma_cancel(ctrl);
|
rsxx_dma_cancel(ctrl);
|
||||||
|
@ -1032,6 +1059,14 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
|
||||||
else
|
else
|
||||||
card->ctrl[i].stats.reads_issued--;
|
card->ctrl[i].stats.reads_issued--;
|
||||||
|
|
||||||
|
if (dma->cmd != HW_CMD_BLK_DISCARD) {
|
||||||
|
pci_unmap_page(card->dev, dma->dma_addr,
|
||||||
|
get_dma_size(dma),
|
||||||
|
dma->cmd == HW_CMD_BLK_WRITE ?
|
||||||
|
PCI_DMA_TODEVICE :
|
||||||
|
PCI_DMA_FROMDEVICE);
|
||||||
|
}
|
||||||
|
|
||||||
list_add_tail(&dma->list, &issued_dmas[i]);
|
list_add_tail(&dma->list, &issued_dmas[i]);
|
||||||
push_tracker(card->ctrl[i].trackers, j);
|
push_tracker(card->ctrl[i].trackers, j);
|
||||||
cnt++;
|
cnt++;
|
||||||
|
@ -1043,15 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
|
||||||
atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
|
atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
|
||||||
card->ctrl[i].stats.sw_q_depth += cnt;
|
card->ctrl[i].stats.sw_q_depth += cnt;
|
||||||
card->ctrl[i].e_cnt = 0;
|
card->ctrl[i].e_cnt = 0;
|
||||||
|
|
||||||
list_for_each_entry(dma, &card->ctrl[i].queue, list) {
|
|
||||||
if (dma->dma_addr)
|
|
||||||
pci_unmap_page(card->dev, dma->dma_addr,
|
|
||||||
get_dma_size(dma),
|
|
||||||
dma->cmd == HW_CMD_BLK_WRITE ?
|
|
||||||
PCI_DMA_TODEVICE :
|
|
||||||
PCI_DMA_FROMDEVICE);
|
|
||||||
}
|
|
||||||
spin_unlock_bh(&card->ctrl[i].queue_lock);
|
spin_unlock_bh(&card->ctrl[i].queue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1060,31 +1086,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
|
|
||||||
{
|
|
||||||
struct rsxx_dma *dma;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < card->n_targets; i++) {
|
|
||||||
spin_lock_bh(&card->ctrl[i].queue_lock);
|
|
||||||
list_for_each_entry(dma, &card->ctrl[i].queue, list) {
|
|
||||||
dma->dma_addr = pci_map_page(card->dev, dma->page,
|
|
||||||
dma->pg_off, get_dma_size(dma),
|
|
||||||
dma->cmd == HW_CMD_BLK_WRITE ?
|
|
||||||
PCI_DMA_TODEVICE :
|
|
||||||
PCI_DMA_FROMDEVICE);
|
|
||||||
if (!dma->dma_addr) {
|
|
||||||
spin_unlock_bh(&card->ctrl[i].queue_lock);
|
|
||||||
kmem_cache_free(rsxx_dma_pool, dma);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock_bh(&card->ctrl[i].queue_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int rsxx_dma_init(void)
|
int rsxx_dma_init(void)
|
||||||
{
|
{
|
||||||
rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
|
rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
|
||||||
|
|
|
@ -52,7 +52,7 @@ struct proc_cmd;
|
||||||
#define RS70_PCI_REV_SUPPORTED 4
|
#define RS70_PCI_REV_SUPPORTED 4
|
||||||
|
|
||||||
#define DRIVER_NAME "rsxx"
|
#define DRIVER_NAME "rsxx"
|
||||||
#define DRIVER_VERSION "4.0"
|
#define DRIVER_VERSION "4.0.3.2516"
|
||||||
|
|
||||||
/* Block size is 4096 */
|
/* Block size is 4096 */
|
||||||
#define RSXX_HW_BLK_SHIFT 12
|
#define RSXX_HW_BLK_SHIFT 12
|
||||||
|
@ -345,6 +345,11 @@ enum rsxx_creg_stat {
|
||||||
CREG_STAT_TAG_MASK = 0x0000ff00,
|
CREG_STAT_TAG_MASK = 0x0000ff00,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum rsxx_dma_finish {
|
||||||
|
FREE_DMA = 0x0,
|
||||||
|
COMPLETE_DMA = 0x1,
|
||||||
|
};
|
||||||
|
|
||||||
static inline unsigned int CREG_DATA(int N)
|
static inline unsigned int CREG_DATA(int N)
|
||||||
{
|
{
|
||||||
return CREG_DATA0 + (N << 2);
|
return CREG_DATA0 + (N << 2);
|
||||||
|
@ -379,7 +384,9 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
|
||||||
int rsxx_dma_setup(struct rsxx_cardinfo *card);
|
int rsxx_dma_setup(struct rsxx_cardinfo *card);
|
||||||
void rsxx_dma_destroy(struct rsxx_cardinfo *card);
|
void rsxx_dma_destroy(struct rsxx_cardinfo *card);
|
||||||
int rsxx_dma_init(void);
|
int rsxx_dma_init(void);
|
||||||
int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q);
|
int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
|
||||||
|
struct list_head *q,
|
||||||
|
unsigned int done);
|
||||||
int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
|
int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
|
||||||
void rsxx_dma_cleanup(void);
|
void rsxx_dma_cleanup(void);
|
||||||
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
|
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,330 @@
|
||||||
|
/* Copyright 2012 STEC, Inc.
|
||||||
|
*
|
||||||
|
* This file is licensed under the terms of the 3-clause
|
||||||
|
* BSD License (http://opensource.org/licenses/BSD-3-Clause)
|
||||||
|
* or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
|
||||||
|
* at your option. Both licenses are also available in the LICENSE file
|
||||||
|
* distributed with this project. This file may not be copied, modified,
|
||||||
|
* or distributed except in accordance with those terms.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef SKD_S1120_H
|
||||||
|
#define SKD_S1120_H
|
||||||
|
|
||||||
|
#pragma pack(push, s1120_h, 1)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Q-channel, 64-bit r/w
|
||||||
|
*/
|
||||||
|
#define FIT_Q_COMMAND 0x400u
|
||||||
|
#define FIT_QCMD_QID_MASK (0x3 << 1)
|
||||||
|
#define FIT_QCMD_QID0 (0x0 << 1)
|
||||||
|
#define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0
|
||||||
|
#define FIT_QCMD_QID1 (0x1 << 1)
|
||||||
|
#define FIT_QCMD_QID2 (0x2 << 1)
|
||||||
|
#define FIT_QCMD_QID3 (0x3 << 1)
|
||||||
|
#define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */
|
||||||
|
#define FIT_QCMD_MSGSIZE_MASK (0x3 << 4)
|
||||||
|
#define FIT_QCMD_MSGSIZE_64 (0x0 << 4)
|
||||||
|
#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
|
||||||
|
#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
|
||||||
|
#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
|
||||||
|
#define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Control, 32-bit r/w
|
||||||
|
*/
|
||||||
|
#define FIT_CONTROL 0x500u
|
||||||
|
#define FIT_CR_HARD_RESET (1u << 0u)
|
||||||
|
#define FIT_CR_SOFT_RESET (1u << 1u)
|
||||||
|
#define FIT_CR_DIS_TIMESTAMPS (1u << 6u)
|
||||||
|
#define FIT_CR_ENABLE_INTERRUPTS (1u << 7u)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Status, 32-bit, r/o
|
||||||
|
*/
|
||||||
|
#define FIT_STATUS 0x510u
|
||||||
|
#define FIT_SR_DRIVE_STATE_MASK 0x000000FFu
|
||||||
|
#define FIT_SR_SIGNATURE (0xFF << 8)
|
||||||
|
#define FIT_SR_PIO_DMA (1 << 16)
|
||||||
|
#define FIT_SR_DRIVE_OFFLINE 0x00
|
||||||
|
#define FIT_SR_DRIVE_INIT 0x01
|
||||||
|
/* #define FIT_SR_DRIVE_READY 0x02 */
|
||||||
|
#define FIT_SR_DRIVE_ONLINE 0x03
|
||||||
|
#define FIT_SR_DRIVE_BUSY 0x04
|
||||||
|
#define FIT_SR_DRIVE_FAULT 0x05
|
||||||
|
#define FIT_SR_DRIVE_DEGRADED 0x06
|
||||||
|
#define FIT_SR_PCIE_LINK_DOWN 0x07
|
||||||
|
#define FIT_SR_DRIVE_SOFT_RESET 0x08
|
||||||
|
#define FIT_SR_DRIVE_INIT_FAULT 0x09
|
||||||
|
#define FIT_SR_DRIVE_BUSY_SANITIZE 0x0A
|
||||||
|
#define FIT_SR_DRIVE_BUSY_ERASE 0x0B
|
||||||
|
#define FIT_SR_DRIVE_FW_BOOTING 0x0C
|
||||||
|
#define FIT_SR_DRIVE_NEED_FW_DOWNLOAD 0xFE
|
||||||
|
#define FIT_SR_DEVICE_MISSING 0xFF
|
||||||
|
#define FIT_SR__RESERVED 0xFFFFFF00u
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIT_STATUS - Status register data definition
|
||||||
|
*/
|
||||||
|
#define FIT_SR_STATE_MASK (0xFF << 0)
|
||||||
|
#define FIT_SR_SIGNATURE (0xFF << 8)
|
||||||
|
#define FIT_SR_PIO_DMA (1 << 16)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear)
|
||||||
|
*/
|
||||||
|
#define FIT_INT_STATUS_HOST 0x520u
|
||||||
|
#define FIT_ISH_FW_STATE_CHANGE (1u << 0u)
|
||||||
|
#define FIT_ISH_COMPLETION_POSTED (1u << 1u)
|
||||||
|
#define FIT_ISH_MSG_FROM_DEV (1u << 2u)
|
||||||
|
#define FIT_ISH_UNDEFINED_3 (1u << 3u)
|
||||||
|
#define FIT_ISH_UNDEFINED_4 (1u << 4u)
|
||||||
|
#define FIT_ISH_Q0_FULL (1u << 5u)
|
||||||
|
#define FIT_ISH_Q1_FULL (1u << 6u)
|
||||||
|
#define FIT_ISH_Q2_FULL (1u << 7u)
|
||||||
|
#define FIT_ISH_Q3_FULL (1u << 8u)
|
||||||
|
#define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u)
|
||||||
|
#define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u)
|
||||||
|
|
||||||
|
#define FIT_INT_DEF_MASK \
|
||||||
|
(FIT_ISH_FW_STATE_CHANGE | \
|
||||||
|
FIT_ISH_COMPLETION_POSTED | \
|
||||||
|
FIT_ISH_MSG_FROM_DEV | \
|
||||||
|
FIT_ISH_Q0_FULL | \
|
||||||
|
FIT_ISH_Q1_FULL | \
|
||||||
|
FIT_ISH_Q2_FULL | \
|
||||||
|
FIT_ISH_Q3_FULL | \
|
||||||
|
FIT_ISH_QCMD_FIFO_OVERRUN | \
|
||||||
|
FIT_ISH_BAD_EXP_ROM_READ)
|
||||||
|
|
||||||
|
#define FIT_INT_QUEUE_FULL \
|
||||||
|
(FIT_ISH_Q0_FULL | \
|
||||||
|
FIT_ISH_Q1_FULL | \
|
||||||
|
FIT_ISH_Q2_FULL | \
|
||||||
|
FIT_ISH_Q3_FULL)
|
||||||
|
|
||||||
|
#define MSI_MSG_NWL_ERROR_0 0x00000000
|
||||||
|
#define MSI_MSG_NWL_ERROR_1 0x00000001
|
||||||
|
#define MSI_MSG_NWL_ERROR_2 0x00000002
|
||||||
|
#define MSI_MSG_NWL_ERROR_3 0x00000003
|
||||||
|
#define MSI_MSG_STATE_CHANGE 0x00000004
|
||||||
|
#define MSI_MSG_COMPLETION_POSTED 0x00000005
|
||||||
|
#define MSI_MSG_MSG_FROM_DEV 0x00000006
|
||||||
|
#define MSI_MSG_RESERVED_0 0x00000007
|
||||||
|
#define MSI_MSG_RESERVED_1 0x00000008
|
||||||
|
#define MSI_MSG_QUEUE_0_FULL 0x00000009
|
||||||
|
#define MSI_MSG_QUEUE_1_FULL 0x0000000A
|
||||||
|
#define MSI_MSG_QUEUE_2_FULL 0x0000000B
|
||||||
|
#define MSI_MSG_QUEUE_3_FULL 0x0000000C
|
||||||
|
|
||||||
|
#define FIT_INT_RESERVED_MASK \
|
||||||
|
(FIT_ISH_UNDEFINED_3 | \
|
||||||
|
FIT_ISH_UNDEFINED_4)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Interrupt mask, 32-bit r/w
|
||||||
|
* Bit definitions are the same as FIT_INT_STATUS_HOST
|
||||||
|
*/
|
||||||
|
#define FIT_INT_MASK_HOST 0x528u
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Message to device, 32-bit r/w
|
||||||
|
*/
|
||||||
|
#define FIT_MSG_TO_DEVICE 0x540u
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Message from device, 32-bit, r/o
|
||||||
|
*/
|
||||||
|
#define FIT_MSG_FROM_DEVICE 0x548u
|
||||||
|
|
||||||
|
/*
|
||||||
|
* 32-bit messages to/from device, composition/extraction macros
|
||||||
|
*/
|
||||||
|
#define FIT_MXD_CONS(TYPE, PARAM, DATA) \
|
||||||
|
((((TYPE) & 0xFFu) << 24u) | \
|
||||||
|
(((PARAM) & 0xFFu) << 16u) | \
|
||||||
|
(((DATA) & 0xFFFFu) << 0u))
|
||||||
|
#define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu)
|
||||||
|
#define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu)
|
||||||
|
#define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Types of messages to/from device
|
||||||
|
*/
|
||||||
|
#define FIT_MTD_FITFW_INIT 0x01u
|
||||||
|
#define FIT_MTD_GET_CMDQ_DEPTH 0x02u
|
||||||
|
#define FIT_MTD_SET_COMPQ_DEPTH 0x03u
|
||||||
|
#define FIT_MTD_SET_COMPQ_ADDR 0x04u
|
||||||
|
#define FIT_MTD_ARM_QUEUE 0x05u
|
||||||
|
#define FIT_MTD_CMD_LOG_HOST_ID 0x07u
|
||||||
|
#define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u
|
||||||
|
#define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u
|
||||||
|
#define FIT_MFD_SMART_EXCEEDED 0x10u
|
||||||
|
#define FIT_MFD_POWER_DOWN 0x11u
|
||||||
|
#define FIT_MFD_OFFLINE 0x12u
|
||||||
|
#define FIT_MFD_ONLINE 0x13u
|
||||||
|
#define FIT_MFD_FW_RESTARTING 0x14u
|
||||||
|
#define FIT_MFD_PM_ACTIVE 0x15u
|
||||||
|
#define FIT_MFD_PM_STANDBY 0x16u
|
||||||
|
#define FIT_MFD_PM_SLEEP 0x17u
|
||||||
|
#define FIT_MFD_CMD_PROGRESS 0x18u
|
||||||
|
|
||||||
|
#define FIT_MTD_DEBUG 0xFEu
|
||||||
|
#define FIT_MFD_DEBUG 0xFFu
|
||||||
|
|
||||||
|
#define FIT_MFD_MASK (0xFFu)
|
||||||
|
#define FIT_MFD_DATA_MASK (0xFFu)
|
||||||
|
#define FIT_MFD_MSG(x) (((x) >> 24) & FIT_MFD_MASK)
|
||||||
|
#define FIT_MFD_DATA(x) ((x) & FIT_MFD_MASK)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w
|
||||||
|
* Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR)
|
||||||
|
* (was Response buffer in docs)
|
||||||
|
*/
|
||||||
|
#define FIT_MSG_TO_DEVICE_ARG 0x580u
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hardware (ASIC) version, 32-bit r/o
|
||||||
|
*/
|
||||||
|
#define FIT_HW_VERSION 0x588u
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Scatter/gather list descriptor.
|
||||||
|
* 32-bytes and must be aligned on a 32-byte boundary.
|
||||||
|
* All fields are in little endian order.
|
||||||
|
*/
|
||||||
|
struct fit_sg_descriptor {
|
||||||
|
uint32_t control;
|
||||||
|
uint32_t byte_count;
|
||||||
|
uint64_t host_side_addr;
|
||||||
|
uint64_t dev_side_addr;
|
||||||
|
uint64_t next_desc_ptr;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define FIT_SGD_CONTROL_NOT_LAST 0x000u
|
||||||
|
#define FIT_SGD_CONTROL_LAST 0x40Eu
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Header at the beginning of a FIT message. The header
|
||||||
|
* is followed by SSDI requests each 64 bytes.
|
||||||
|
* A FIT message can be up to 512 bytes long and must start
|
||||||
|
* on a 64-byte boundary.
|
||||||
|
*/
|
||||||
|
struct fit_msg_hdr {
|
||||||
|
uint8_t protocol_id;
|
||||||
|
uint8_t num_protocol_cmds_coalesced;
|
||||||
|
uint8_t _reserved[62];
|
||||||
|
};
|
||||||
|
|
||||||
|
#define FIT_PROTOCOL_ID_FIT 1
|
||||||
|
#define FIT_PROTOCOL_ID_SSDI 2
|
||||||
|
#define FIT_PROTOCOL_ID_SOFIT 3
|
||||||
|
|
||||||
|
|
||||||
|
#define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF)
|
||||||
|
#define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Format of a completion entry. The completion queue is circular
|
||||||
|
* and must have at least as many entries as the maximum number
|
||||||
|
* of commands that may be issued to the device.
|
||||||
|
*
|
||||||
|
* There are no head/tail pointers. The cycle value is used to
|
||||||
|
* infer the presence of new completion records.
|
||||||
|
* Initially the cycle in all entries is 0, the index is 0, and
|
||||||
|
* the cycle value to expect is 1. When completions are added
|
||||||
|
* their cycle values are set to 1. When the index wraps the
|
||||||
|
* cycle value to expect is incremented.
|
||||||
|
*
|
||||||
|
* Command_context is opaque and taken verbatim from the SSDI command.
|
||||||
|
* All other fields are big endian.
|
||||||
|
*/
|
||||||
|
#define FIT_PROTOCOL_VERSION_0 0
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Protocol major version 1 completion entry.
|
||||||
|
* The major protocol version is found in bits
|
||||||
|
* 20-23 of the FIT_MTD_FITFW_INIT response.
|
||||||
|
*/
|
||||||
|
struct fit_completion_entry_v1 {
|
||||||
|
uint32_t num_returned_bytes;
|
||||||
|
uint16_t tag;
|
||||||
|
uint8_t status; /* SCSI status */
|
||||||
|
uint8_t cycle;
|
||||||
|
};
|
||||||
|
#define FIT_PROTOCOL_VERSION_1 1
|
||||||
|
#define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1
|
||||||
|
|
||||||
|
struct fit_comp_error_info {
|
||||||
|
uint8_t type:7; /* 00: Bits0-6 indicates the type of sense data. */
|
||||||
|
uint8_t valid:1; /* 00: Bit 7 := 1 ==> info field is valid. */
|
||||||
|
uint8_t reserved0; /* 01: Obsolete field */
|
||||||
|
uint8_t key:4; /* 02: Bits0-3 indicate the sense key. */
|
||||||
|
uint8_t reserved2:1; /* 02: Reserved bit. */
|
||||||
|
uint8_t bad_length:1; /* 02: Incorrect Length Indicator */
|
||||||
|
uint8_t end_medium:1; /* 02: End of Medium */
|
||||||
|
uint8_t file_mark:1; /* 02: Filemark */
|
||||||
|
uint8_t info[4]; /* 03: */
|
||||||
|
uint8_t reserved1; /* 07: Additional Sense Length */
|
||||||
|
uint8_t cmd_spec[4]; /* 08: Command Specific Information */
|
||||||
|
uint8_t code; /* 0C: Additional Sense Code */
|
||||||
|
uint8_t qual; /* 0D: Additional Sense Code Qualifier */
|
||||||
|
uint8_t fruc; /* 0E: Field Replaceable Unit Code */
|
||||||
|
uint8_t sks_high:7; /* 0F: Sense Key Specific (MSB) */
|
||||||
|
uint8_t sks_valid:1; /* 0F: Sense Key Specific Valid */
|
||||||
|
uint16_t sks_low; /* 10: Sense Key Specific (LSW) */
|
||||||
|
uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */
|
||||||
|
uint16_t uec; /* 14: Additional Sense Bytes */
|
||||||
|
uint64_t per; /* 16: Additional Sense Bytes */
|
||||||
|
uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* Task management constants */
|
||||||
|
#define SOFT_TASK_SIMPLE 0x00
|
||||||
|
#define SOFT_TASK_HEAD_OF_QUEUE 0x01
|
||||||
|
#define SOFT_TASK_ORDERED 0x02
|
||||||
|
|
||||||
|
/* Version zero has the last 32 bits reserved,
|
||||||
|
* Version one has the last 32 bits sg_list_len_bytes;
|
||||||
|
*/
|
||||||
|
struct skd_command_header {
|
||||||
|
uint64_t sg_list_dma_address;
|
||||||
|
uint16_t tag;
|
||||||
|
uint8_t attribute;
|
||||||
|
uint8_t add_cdb_len; /* In 32 bit words */
|
||||||
|
uint32_t sg_list_len_bytes;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct skd_scsi_request {
|
||||||
|
struct skd_command_header hdr;
|
||||||
|
unsigned char cdb[16];
|
||||||
|
/* unsigned char _reserved[16]; */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct driver_inquiry_data {
|
||||||
|
uint8_t peripheral_device_type:5;
|
||||||
|
uint8_t qualifier:3;
|
||||||
|
uint8_t page_code;
|
||||||
|
uint16_t page_length;
|
||||||
|
uint16_t pcie_bus_number;
|
||||||
|
uint8_t pcie_device_number;
|
||||||
|
uint8_t pcie_function_number;
|
||||||
|
uint8_t pcie_link_speed;
|
||||||
|
uint8_t pcie_link_lanes;
|
||||||
|
uint16_t pcie_vendor_id;
|
||||||
|
uint16_t pcie_device_id;
|
||||||
|
uint16_t pcie_subsystem_vendor_id;
|
||||||
|
uint16_t pcie_subsystem_device_id;
|
||||||
|
uint8_t reserved1[2];
|
||||||
|
uint8_t reserved2[3];
|
||||||
|
uint8_t driver_version_length;
|
||||||
|
uint8_t driver_version[0x14];
|
||||||
|
};
|
||||||
|
|
||||||
|
#pragma pack(pop, s1120_h)
|
||||||
|
|
||||||
|
#endif /* SKD_S1120_H */
|
|
@ -887,6 +887,8 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
|
||||||
unsigned long secure;
|
unsigned long secure;
|
||||||
struct phys_req preq;
|
struct phys_req preq;
|
||||||
|
|
||||||
|
xen_blkif_get(blkif);
|
||||||
|
|
||||||
preq.sector_number = req->u.discard.sector_number;
|
preq.sector_number = req->u.discard.sector_number;
|
||||||
preq.nr_sects = req->u.discard.nr_sectors;
|
preq.nr_sects = req->u.discard.nr_sectors;
|
||||||
|
|
||||||
|
@ -899,7 +901,6 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
|
||||||
}
|
}
|
||||||
blkif->st_ds_req++;
|
blkif->st_ds_req++;
|
||||||
|
|
||||||
xen_blkif_get(blkif);
|
|
||||||
secure = (blkif->vbd.discard_secure &&
|
secure = (blkif->vbd.discard_secure &&
|
||||||
(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
|
(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
|
||||||
BLKDEV_DISCARD_SECURE : 0;
|
BLKDEV_DISCARD_SECURE : 0;
|
||||||
|
|
|
@ -121,7 +121,8 @@ struct blkfront_info
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
struct gnttab_free_callback callback;
|
struct gnttab_free_callback callback;
|
||||||
struct blk_shadow shadow[BLK_RING_SIZE];
|
struct blk_shadow shadow[BLK_RING_SIZE];
|
||||||
struct list_head persistent_gnts;
|
struct list_head grants;
|
||||||
|
struct list_head indirect_pages;
|
||||||
unsigned int persistent_gnts_c;
|
unsigned int persistent_gnts_c;
|
||||||
unsigned long shadow_free;
|
unsigned long shadow_free;
|
||||||
unsigned int feature_flush;
|
unsigned int feature_flush;
|
||||||
|
@ -200,15 +201,17 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
|
||||||
if (!gnt_list_entry)
|
if (!gnt_list_entry)
|
||||||
goto out_of_memory;
|
goto out_of_memory;
|
||||||
|
|
||||||
granted_page = alloc_page(GFP_NOIO);
|
if (info->feature_persistent) {
|
||||||
if (!granted_page) {
|
granted_page = alloc_page(GFP_NOIO);
|
||||||
kfree(gnt_list_entry);
|
if (!granted_page) {
|
||||||
goto out_of_memory;
|
kfree(gnt_list_entry);
|
||||||
|
goto out_of_memory;
|
||||||
|
}
|
||||||
|
gnt_list_entry->pfn = page_to_pfn(granted_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
gnt_list_entry->pfn = page_to_pfn(granted_page);
|
|
||||||
gnt_list_entry->gref = GRANT_INVALID_REF;
|
gnt_list_entry->gref = GRANT_INVALID_REF;
|
||||||
list_add(&gnt_list_entry->node, &info->persistent_gnts);
|
list_add(&gnt_list_entry->node, &info->grants);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,9 +219,10 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
|
||||||
|
|
||||||
out_of_memory:
|
out_of_memory:
|
||||||
list_for_each_entry_safe(gnt_list_entry, n,
|
list_for_each_entry_safe(gnt_list_entry, n,
|
||||||
&info->persistent_gnts, node) {
|
&info->grants, node) {
|
||||||
list_del(&gnt_list_entry->node);
|
list_del(&gnt_list_entry->node);
|
||||||
__free_page(pfn_to_page(gnt_list_entry->pfn));
|
if (info->feature_persistent)
|
||||||
|
__free_page(pfn_to_page(gnt_list_entry->pfn));
|
||||||
kfree(gnt_list_entry);
|
kfree(gnt_list_entry);
|
||||||
i--;
|
i--;
|
||||||
}
|
}
|
||||||
|
@ -227,13 +231,14 @@ static int fill_grant_buffer(struct blkfront_info *info, int num)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct grant *get_grant(grant_ref_t *gref_head,
|
static struct grant *get_grant(grant_ref_t *gref_head,
|
||||||
|
unsigned long pfn,
|
||||||
struct blkfront_info *info)
|
struct blkfront_info *info)
|
||||||
{
|
{
|
||||||
struct grant *gnt_list_entry;
|
struct grant *gnt_list_entry;
|
||||||
unsigned long buffer_mfn;
|
unsigned long buffer_mfn;
|
||||||
|
|
||||||
BUG_ON(list_empty(&info->persistent_gnts));
|
BUG_ON(list_empty(&info->grants));
|
||||||
gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
|
gnt_list_entry = list_first_entry(&info->grants, struct grant,
|
||||||
node);
|
node);
|
||||||
list_del(&gnt_list_entry->node);
|
list_del(&gnt_list_entry->node);
|
||||||
|
|
||||||
|
@ -245,6 +250,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
|
||||||
/* Assign a gref to this page */
|
/* Assign a gref to this page */
|
||||||
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
|
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
|
||||||
BUG_ON(gnt_list_entry->gref == -ENOSPC);
|
BUG_ON(gnt_list_entry->gref == -ENOSPC);
|
||||||
|
if (!info->feature_persistent) {
|
||||||
|
BUG_ON(!pfn);
|
||||||
|
gnt_list_entry->pfn = pfn;
|
||||||
|
}
|
||||||
buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
|
buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
|
||||||
gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
|
gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
|
||||||
info->xbdev->otherend_id,
|
info->xbdev->otherend_id,
|
||||||
|
@ -400,10 +409,13 @@ static int blkif_queue_request(struct request *req)
|
||||||
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
|
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
max_grefs = info->max_indirect_segments ?
|
max_grefs = req->nr_phys_segments;
|
||||||
info->max_indirect_segments +
|
if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||||
INDIRECT_GREFS(info->max_indirect_segments) :
|
/*
|
||||||
BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
* If we are using indirect segments we need to account
|
||||||
|
* for the indirect grefs used in the request.
|
||||||
|
*/
|
||||||
|
max_grefs += INDIRECT_GREFS(req->nr_phys_segments);
|
||||||
|
|
||||||
/* Check if we have enough grants to allocate a requests */
|
/* Check if we have enough grants to allocate a requests */
|
||||||
if (info->persistent_gnts_c < max_grefs) {
|
if (info->persistent_gnts_c < max_grefs) {
|
||||||
|
@ -477,22 +489,34 @@ static int blkif_queue_request(struct request *req)
|
||||||
|
|
||||||
if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
|
if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
|
||||||
(i % SEGS_PER_INDIRECT_FRAME == 0)) {
|
(i % SEGS_PER_INDIRECT_FRAME == 0)) {
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
if (segments)
|
if (segments)
|
||||||
kunmap_atomic(segments);
|
kunmap_atomic(segments);
|
||||||
|
|
||||||
n = i / SEGS_PER_INDIRECT_FRAME;
|
n = i / SEGS_PER_INDIRECT_FRAME;
|
||||||
gnt_list_entry = get_grant(&gref_head, info);
|
if (!info->feature_persistent) {
|
||||||
|
struct page *indirect_page;
|
||||||
|
|
||||||
|
/* Fetch a pre-allocated page to use for indirect grefs */
|
||||||
|
BUG_ON(list_empty(&info->indirect_pages));
|
||||||
|
indirect_page = list_first_entry(&info->indirect_pages,
|
||||||
|
struct page, lru);
|
||||||
|
list_del(&indirect_page->lru);
|
||||||
|
pfn = page_to_pfn(indirect_page);
|
||||||
|
}
|
||||||
|
gnt_list_entry = get_grant(&gref_head, pfn, info);
|
||||||
info->shadow[id].indirect_grants[n] = gnt_list_entry;
|
info->shadow[id].indirect_grants[n] = gnt_list_entry;
|
||||||
segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
|
segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
|
||||||
ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
|
ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
|
||||||
}
|
}
|
||||||
|
|
||||||
gnt_list_entry = get_grant(&gref_head, info);
|
gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
|
||||||
ref = gnt_list_entry->gref;
|
ref = gnt_list_entry->gref;
|
||||||
|
|
||||||
info->shadow[id].grants_used[i] = gnt_list_entry;
|
info->shadow[id].grants_used[i] = gnt_list_entry;
|
||||||
|
|
||||||
if (rq_data_dir(req)) {
|
if (rq_data_dir(req) && info->feature_persistent) {
|
||||||
char *bvec_data;
|
char *bvec_data;
|
||||||
void *shared_data;
|
void *shared_data;
|
||||||
|
|
||||||
|
@ -904,21 +928,36 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
||||||
blk_stop_queue(info->rq);
|
blk_stop_queue(info->rq);
|
||||||
|
|
||||||
/* Remove all persistent grants */
|
/* Remove all persistent grants */
|
||||||
if (!list_empty(&info->persistent_gnts)) {
|
if (!list_empty(&info->grants)) {
|
||||||
list_for_each_entry_safe(persistent_gnt, n,
|
list_for_each_entry_safe(persistent_gnt, n,
|
||||||
&info->persistent_gnts, node) {
|
&info->grants, node) {
|
||||||
list_del(&persistent_gnt->node);
|
list_del(&persistent_gnt->node);
|
||||||
if (persistent_gnt->gref != GRANT_INVALID_REF) {
|
if (persistent_gnt->gref != GRANT_INVALID_REF) {
|
||||||
gnttab_end_foreign_access(persistent_gnt->gref,
|
gnttab_end_foreign_access(persistent_gnt->gref,
|
||||||
0, 0UL);
|
0, 0UL);
|
||||||
info->persistent_gnts_c--;
|
info->persistent_gnts_c--;
|
||||||
}
|
}
|
||||||
__free_page(pfn_to_page(persistent_gnt->pfn));
|
if (info->feature_persistent)
|
||||||
|
__free_page(pfn_to_page(persistent_gnt->pfn));
|
||||||
kfree(persistent_gnt);
|
kfree(persistent_gnt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BUG_ON(info->persistent_gnts_c != 0);
|
BUG_ON(info->persistent_gnts_c != 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Remove indirect pages, this only happens when using indirect
|
||||||
|
* descriptors but not persistent grants
|
||||||
|
*/
|
||||||
|
if (!list_empty(&info->indirect_pages)) {
|
||||||
|
struct page *indirect_page, *n;
|
||||||
|
|
||||||
|
BUG_ON(info->feature_persistent);
|
||||||
|
list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
|
||||||
|
list_del(&indirect_page->lru);
|
||||||
|
__free_page(indirect_page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < BLK_RING_SIZE; i++) {
|
for (i = 0; i < BLK_RING_SIZE; i++) {
|
||||||
/*
|
/*
|
||||||
* Clear persistent grants present in requests already
|
* Clear persistent grants present in requests already
|
||||||
|
@ -933,7 +972,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
||||||
for (j = 0; j < segs; j++) {
|
for (j = 0; j < segs; j++) {
|
||||||
persistent_gnt = info->shadow[i].grants_used[j];
|
persistent_gnt = info->shadow[i].grants_used[j];
|
||||||
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
|
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
|
||||||
__free_page(pfn_to_page(persistent_gnt->pfn));
|
if (info->feature_persistent)
|
||||||
|
__free_page(pfn_to_page(persistent_gnt->pfn));
|
||||||
kfree(persistent_gnt);
|
kfree(persistent_gnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -992,7 +1032,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
||||||
nseg = s->req.operation == BLKIF_OP_INDIRECT ?
|
nseg = s->req.operation == BLKIF_OP_INDIRECT ?
|
||||||
s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
|
s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
|
||||||
|
|
||||||
if (bret->operation == BLKIF_OP_READ) {
|
if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
|
||||||
/*
|
/*
|
||||||
* Copy the data received from the backend into the bvec.
|
* Copy the data received from the backend into the bvec.
|
||||||
* Since bv_offset can be different than 0, and bv_len different
|
* Since bv_offset can be different than 0, and bv_len different
|
||||||
|
@ -1013,13 +1053,51 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
||||||
}
|
}
|
||||||
/* Add the persistent grant into the list of free grants */
|
/* Add the persistent grant into the list of free grants */
|
||||||
for (i = 0; i < nseg; i++) {
|
for (i = 0; i < nseg; i++) {
|
||||||
list_add(&s->grants_used[i]->node, &info->persistent_gnts);
|
if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
|
||||||
info->persistent_gnts_c++;
|
/*
|
||||||
|
* If the grant is still mapped by the backend (the
|
||||||
|
* backend has chosen to make this grant persistent)
|
||||||
|
* we add it at the head of the list, so it will be
|
||||||
|
* reused first.
|
||||||
|
*/
|
||||||
|
if (!info->feature_persistent)
|
||||||
|
pr_alert_ratelimited("backed has not unmapped grant: %u\n",
|
||||||
|
s->grants_used[i]->gref);
|
||||||
|
list_add(&s->grants_used[i]->node, &info->grants);
|
||||||
|
info->persistent_gnts_c++;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* If the grant is not mapped by the backend we end the
|
||||||
|
* foreign access and add it to the tail of the list,
|
||||||
|
* so it will not be picked again unless we run out of
|
||||||
|
* persistent grants.
|
||||||
|
*/
|
||||||
|
gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
|
||||||
|
s->grants_used[i]->gref = GRANT_INVALID_REF;
|
||||||
|
list_add_tail(&s->grants_used[i]->node, &info->grants);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (s->req.operation == BLKIF_OP_INDIRECT) {
|
if (s->req.operation == BLKIF_OP_INDIRECT) {
|
||||||
for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
|
for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
|
||||||
list_add(&s->indirect_grants[i]->node, &info->persistent_gnts);
|
if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
|
||||||
info->persistent_gnts_c++;
|
if (!info->feature_persistent)
|
||||||
|
pr_alert_ratelimited("backed has not unmapped grant: %u\n",
|
||||||
|
s->indirect_grants[i]->gref);
|
||||||
|
list_add(&s->indirect_grants[i]->node, &info->grants);
|
||||||
|
info->persistent_gnts_c++;
|
||||||
|
} else {
|
||||||
|
struct page *indirect_page;
|
||||||
|
|
||||||
|
gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
|
||||||
|
/*
|
||||||
|
* Add the used indirect page back to the list of
|
||||||
|
* available pages for indirect grefs.
|
||||||
|
*/
|
||||||
|
indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
|
||||||
|
list_add(&indirect_page->lru, &info->indirect_pages);
|
||||||
|
s->indirect_grants[i]->gref = GRANT_INVALID_REF;
|
||||||
|
list_add_tail(&s->indirect_grants[i]->node, &info->grants);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1313,7 +1391,8 @@ static int blkfront_probe(struct xenbus_device *dev,
|
||||||
spin_lock_init(&info->io_lock);
|
spin_lock_init(&info->io_lock);
|
||||||
info->xbdev = dev;
|
info->xbdev = dev;
|
||||||
info->vdevice = vdevice;
|
info->vdevice = vdevice;
|
||||||
INIT_LIST_HEAD(&info->persistent_gnts);
|
INIT_LIST_HEAD(&info->grants);
|
||||||
|
INIT_LIST_HEAD(&info->indirect_pages);
|
||||||
info->persistent_gnts_c = 0;
|
info->persistent_gnts_c = 0;
|
||||||
info->connected = BLKIF_STATE_DISCONNECTED;
|
info->connected = BLKIF_STATE_DISCONNECTED;
|
||||||
INIT_WORK(&info->work, blkif_restart_queue);
|
INIT_WORK(&info->work, blkif_restart_queue);
|
||||||
|
@ -1609,6 +1688,23 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
|
||||||
if (err)
|
if (err)
|
||||||
goto out_of_memory;
|
goto out_of_memory;
|
||||||
|
|
||||||
|
if (!info->feature_persistent && info->max_indirect_segments) {
|
||||||
|
/*
|
||||||
|
* We are using indirect descriptors but not persistent
|
||||||
|
* grants, we need to allocate a set of pages that can be
|
||||||
|
* used for mapping indirect grefs
|
||||||
|
*/
|
||||||
|
int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE;
|
||||||
|
|
||||||
|
BUG_ON(!list_empty(&info->indirect_pages));
|
||||||
|
for (i = 0; i < num; i++) {
|
||||||
|
struct page *indirect_page = alloc_page(GFP_NOIO);
|
||||||
|
if (!indirect_page)
|
||||||
|
goto out_of_memory;
|
||||||
|
list_add(&indirect_page->lru, &info->indirect_pages);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < BLK_RING_SIZE; i++) {
|
for (i = 0; i < BLK_RING_SIZE; i++) {
|
||||||
info->shadow[i].grants_used = kzalloc(
|
info->shadow[i].grants_used = kzalloc(
|
||||||
sizeof(info->shadow[i].grants_used[0]) * segs,
|
sizeof(info->shadow[i].grants_used[0]) * segs,
|
||||||
|
@ -1639,6 +1735,13 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
|
||||||
kfree(info->shadow[i].indirect_grants);
|
kfree(info->shadow[i].indirect_grants);
|
||||||
info->shadow[i].indirect_grants = NULL;
|
info->shadow[i].indirect_grants = NULL;
|
||||||
}
|
}
|
||||||
|
if (!list_empty(&info->indirect_pages)) {
|
||||||
|
struct page *indirect_page, *n;
|
||||||
|
list_for_each_entry_safe(indirect_page, n, &info->indirect_pages, lru) {
|
||||||
|
list_del(&indirect_page->lru);
|
||||||
|
__free_page(indirect_page);
|
||||||
|
}
|
||||||
|
}
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2979,12 +2979,12 @@ static int dasd_alloc_queue(struct dasd_block *block)
|
||||||
|
|
||||||
elevator_exit(block->request_queue->elevator);
|
elevator_exit(block->request_queue->elevator);
|
||||||
block->request_queue->elevator = NULL;
|
block->request_queue->elevator = NULL;
|
||||||
|
mutex_lock(&block->request_queue->sysfs_lock);
|
||||||
rc = elevator_init(block->request_queue, "deadline");
|
rc = elevator_init(block->request_queue, "deadline");
|
||||||
if (rc) {
|
if (rc)
|
||||||
blk_cleanup_queue(block->request_queue);
|
blk_cleanup_queue(block->request_queue);
|
||||||
return rc;
|
mutex_unlock(&block->request_queue->sysfs_lock);
|
||||||
}
|
return rc;
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue