mirror of https://gitee.com/openkylin/linux.git
dax: enable dax in the presence of known media errors (badblocks)
1/ If a mapping overlaps a bad sector fail the request. 2/ Do not opportunistically report more dax-capable capacity than is requested when errors present. Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> [vishal: fix a conflict with system RAM collision patches] [vishal: add a 'size' parameter to ->direct_access] [vishal: fix a conflict with DAX alignment check patches] Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
This commit is contained in:
parent
8b3db9798c
commit
0a70bd4305
|
@ -143,7 +143,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
|
||||||
*/
|
*/
|
||||||
static long
|
static long
|
||||||
axon_ram_direct_access(struct block_device *device, sector_t sector,
|
axon_ram_direct_access(struct block_device *device, sector_t sector,
|
||||||
void __pmem **kaddr, pfn_t *pfn)
|
void __pmem **kaddr, pfn_t *pfn, long size)
|
||||||
{
|
{
|
||||||
struct axon_ram_bank *bank = device->bd_disk->private_data;
|
struct axon_ram_bank *bank = device->bd_disk->private_data;
|
||||||
loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
|
loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
|
||||||
|
|
|
@ -381,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_RAM_DAX
|
#ifdef CONFIG_BLK_DEV_RAM_DAX
|
||||||
static long brd_direct_access(struct block_device *bdev, sector_t sector,
|
static long brd_direct_access(struct block_device *bdev, sector_t sector,
|
||||||
void __pmem **kaddr, pfn_t *pfn)
|
void __pmem **kaddr, pfn_t *pfn, long size)
|
||||||
{
|
{
|
||||||
struct brd_device *brd = bdev->bd_disk->private_data;
|
struct brd_device *brd = bdev->bd_disk->private_data;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
|
@ -182,14 +182,22 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
||||||
}
|
}
|
||||||
|
|
||||||
static long pmem_direct_access(struct block_device *bdev, sector_t sector,
|
static long pmem_direct_access(struct block_device *bdev, sector_t sector,
|
||||||
void __pmem **kaddr, pfn_t *pfn)
|
void __pmem **kaddr, pfn_t *pfn, long size)
|
||||||
{
|
{
|
||||||
struct pmem_device *pmem = bdev->bd_disk->private_data;
|
struct pmem_device *pmem = bdev->bd_disk->private_data;
|
||||||
resource_size_t offset = sector * 512 + pmem->data_offset;
|
resource_size_t offset = sector * 512 + pmem->data_offset;
|
||||||
|
|
||||||
|
if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
|
||||||
|
return -EIO;
|
||||||
*kaddr = pmem->virt_addr + offset;
|
*kaddr = pmem->virt_addr + offset;
|
||||||
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
|
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If badblocks are present, limit known good range to the
|
||||||
|
* requested range.
|
||||||
|
*/
|
||||||
|
if (unlikely(pmem->bb.count))
|
||||||
|
return size;
|
||||||
return pmem->size - pmem->pfn_pad - offset;
|
return pmem->size - pmem->pfn_pad - offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode);
|
||||||
static blk_qc_t dcssblk_make_request(struct request_queue *q,
|
static blk_qc_t dcssblk_make_request(struct request_queue *q,
|
||||||
struct bio *bio);
|
struct bio *bio);
|
||||||
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
|
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
|
||||||
void __pmem **kaddr, pfn_t *pfn);
|
void __pmem **kaddr, pfn_t *pfn, long size);
|
||||||
|
|
||||||
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
|
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
|
||||||
|
|
||||||
|
@ -883,7 +883,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
|
||||||
|
|
||||||
static long
|
static long
|
||||||
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
|
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
|
||||||
void __pmem **kaddr, pfn_t *pfn)
|
void __pmem **kaddr, pfn_t *pfn, long size)
|
||||||
{
|
{
|
||||||
struct dcssblk_dev_info *dev_info;
|
struct dcssblk_dev_info *dev_info;
|
||||||
unsigned long offset, dev_sz;
|
unsigned long offset, dev_sz;
|
||||||
|
|
|
@ -29,7 +29,6 @@
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
#include <linux/cleancache.h>
|
#include <linux/cleancache.h>
|
||||||
#include <linux/dax.h>
|
#include <linux/dax.h>
|
||||||
#include <linux/badblocks.h>
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
@ -501,7 +500,7 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
|
||||||
sector += get_start_sect(bdev);
|
sector += get_start_sect(bdev);
|
||||||
if (sector % (PAGE_SIZE / 512))
|
if (sector % (PAGE_SIZE / 512))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn);
|
avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
|
||||||
if (!avail)
|
if (!avail)
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
if (avail > 0 && avail & ~PAGE_MASK)
|
if (avail > 0 && avail & ~PAGE_MASK)
|
||||||
|
@ -561,7 +560,6 @@ EXPORT_SYMBOL_GPL(bdev_dax_supported);
|
||||||
*/
|
*/
|
||||||
bool bdev_dax_capable(struct block_device *bdev)
|
bool bdev_dax_capable(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
struct gendisk *disk = bdev->bd_disk;
|
|
||||||
struct blk_dax_ctl dax = {
|
struct blk_dax_ctl dax = {
|
||||||
.size = PAGE_SIZE,
|
.size = PAGE_SIZE,
|
||||||
};
|
};
|
||||||
|
@ -577,15 +575,6 @@ bool bdev_dax_capable(struct block_device *bdev)
|
||||||
if (bdev_direct_access(bdev, &dax) < 0)
|
if (bdev_direct_access(bdev, &dax) < 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/*
|
|
||||||
* If the device has known bad blocks, force all I/O through the
|
|
||||||
* driver / page cache.
|
|
||||||
*
|
|
||||||
* TODO: support finer grained dax error handling
|
|
||||||
*/
|
|
||||||
if (disk->bb && disk->bb->count)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1668,7 +1668,7 @@ struct block_device_operations {
|
||||||
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
||||||
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
|
||||||
long (*direct_access)(struct block_device *, sector_t, void __pmem **,
|
long (*direct_access)(struct block_device *, sector_t, void __pmem **,
|
||||||
pfn_t *);
|
pfn_t *, long);
|
||||||
unsigned int (*check_events) (struct gendisk *disk,
|
unsigned int (*check_events) (struct gendisk *disk,
|
||||||
unsigned int clearing);
|
unsigned int clearing);
|
||||||
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
|
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
|
||||||
|
|
Loading…
Reference in New Issue