staging: comedi: protect buffer from being freed while mmapped

If a comedi device is automatically detached by `comedi_auto_unconfig()`
any data buffers associated with subdevices that support asynchronous
commands will be freed.  If the buffer is mmapped at the time, bad
things are likely to happen!  Prevent this by moving some of the buffer
details from `struct comedi_async` into a new, dynamically allocated,
and kref-counted `struct comedi_buf_map`.  This holds a list of pages, a
reference count, and enough information to free the pages.  The new
member `buf_map` of `struct comedi_async` points to a `struct
comedi_buf_map` when the buffer size is non-zero.

Provide a new helper function `comedi_buf_is_mapped()` to check whether
an a buffer is mmapped.  If it is mmapped, the buffer is not allowed to
be resized and the device is not allowed to be manually detached by the
`COMEDI_DEVCONFIG` ioctl.  Provide helper functions
`comedi_buf_map_get()` and `comedi_buf_map_put()` to manipulate the
reference count of the `struct comedi_buf_map`, which will be freed
along with its contents via the 'release' callback of the `kref_put()`
call.  The reference count is manipulated by the vma operations and the
mmap file operation.

Now, when the comedi device is automatically detached, the buffer will
be effectively freed by calling `comedi_buf_alloc()` with a new buffer
size of 0.  That calls local function `__comedi_buf_free()` which calls
`comedi_buf_map_put()` on the `buf_map` member to free it.  It won't
actually be freed until the final 'put'.

Signed-off-by: Ian Abbott <abbotti@mev.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Ian Abbott 2013-11-08 15:03:43 +00:00 committed by Greg Kroah-Hartman
parent 63ab039511
commit af93da3163
5 changed files with 99 additions and 57 deletions

View File

@ -16,6 +16,7 @@
*/
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "comedidev.h"
#include "comedi_internal.h"
@ -26,31 +27,21 @@
#define COMEDI_PAGE_PROTECTION PAGE_KERNEL
#endif
static void __comedi_buf_free(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned n_pages)
static void comedi_buf_map_kref_release(struct kref *kref)
{
struct comedi_async *async = s->async;
struct comedi_buf_map *bm =
container_of(kref, struct comedi_buf_map, refcount);
struct comedi_buf_page *buf;
unsigned i;
unsigned int i;
if (async->prealloc_buf) {
vunmap(async->prealloc_buf);
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
}
if (!async->buf_page_list)
return;
for (i = 0; i < n_pages; ++i) {
buf = &async->buf_page_list[i];
if (buf->virt_addr) {
if (bm->page_list) {
for (i = 0; i < bm->n_pages; i++) {
buf = &bm->page_list[i];
clear_bit(PG_reserved,
&(virt_to_page(buf->virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
if (bm->dma_dir != DMA_NONE) {
#ifdef CONFIG_HAS_DMA
dma_free_coherent(dev->hw_dev,
dma_free_coherent(bm->dma_hw_dev,
PAGE_SIZE,
buf->virt_addr,
buf->dma_addr);
@ -59,10 +50,26 @@ static void __comedi_buf_free(struct comedi_device *dev,
free_page((unsigned long)buf->virt_addr);
}
}
vfree(bm->page_list);
}
vfree(async->buf_page_list);
async->buf_page_list = NULL;
async->n_buf_pages = 0;
if (bm->dma_dir != DMA_NONE)
put_device(bm->dma_hw_dev);
kfree(bm);
}
static void __comedi_buf_free(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
if (async->prealloc_buf) {
vunmap(async->prealloc_buf);
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
}
comedi_buf_map_put(async->buf_map);
async->buf_map = NULL;
}
static void __comedi_buf_alloc(struct comedi_device *dev,
@ -71,6 +78,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
{
struct comedi_async *async = s->async;
struct page **pages = NULL;
struct comedi_buf_map *bm;
struct comedi_buf_page *buf;
unsigned i;
@ -80,18 +88,29 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
return;
}
async->buf_page_list = vzalloc(sizeof(*buf) * n_pages);
if (async->buf_page_list)
bm = kzalloc(sizeof(*async->buf_map), GFP_KERNEL);
if (!bm)
return;
async->buf_map = bm;
kref_init(&bm->refcount);
bm->dma_dir = s->async_dma_dir;
if (bm->dma_dir != DMA_NONE)
/* Need ref to hardware device to free buffer later. */
bm->dma_hw_dev = get_device(dev->hw_dev);
bm->page_list = vzalloc(sizeof(*buf) * n_pages);
if (bm->page_list)
pages = vmalloc(sizeof(struct page *) * n_pages);
if (!pages)
return;
for (i = 0; i < n_pages; i++) {
buf = &async->buf_page_list[i];
if (s->async_dma_dir != DMA_NONE)
buf = &bm->page_list[i];
if (bm->dma_dir != DMA_NONE)
#ifdef CONFIG_HAS_DMA
buf->virt_addr = dma_alloc_coherent(dev->hw_dev,
buf->virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
PAGE_SIZE,
&buf->dma_addr,
GFP_KERNEL |
@ -108,6 +127,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
pages[i] = virt_to_page(buf->virt_addr);
}
bm->n_pages = i;
/* vmap the prealloc_buf if all the pages were allocated */
if (i == n_pages)
@ -117,6 +137,26 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
vfree(pages);
}
void comedi_buf_map_get(struct comedi_buf_map *bm)
{
if (bm)
kref_get(&bm->refcount);
}
int comedi_buf_map_put(struct comedi_buf_map *bm)
{
if (bm)
return kref_put(&bm->refcount, comedi_buf_map_kref_release);
return 1;
}
bool comedi_buf_is_mmapped(struct comedi_async *async)
{
struct comedi_buf_map *bm = async->buf_map;
return bm && (atomic_read(&bm->refcount.refcount) > 1);
}
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size)
{
@ -130,7 +170,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
return 0;
/* deallocate old buffer */
__comedi_buf_free(dev, s, async->n_buf_pages);
__comedi_buf_free(dev, s);
/* allocate new buffer */
if (new_size) {
@ -140,10 +180,9 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
if (!async->prealloc_buf) {
/* allocation failed */
__comedi_buf_free(dev, s, n_pages);
__comedi_buf_free(dev, s);
return -ENOMEM;
}
async->n_buf_pages = n_pages;
}
async->prealloc_bufsz = new_size;

View File

@ -264,7 +264,7 @@ static int resize_async_buffer(struct comedi_device *dev,
DPRINTK("subdevice is busy, cannot resize buffer\n");
return -EBUSY;
}
if (async->mmap_count) {
if (comedi_buf_is_mmapped(async)) {
DPRINTK("subdevice is mmapped, cannot resize buffer\n");
return -EBUSY;
}
@ -647,7 +647,7 @@ static int is_device_busy(struct comedi_device *dev)
s = &dev->subdevices[i];
if (s->busy)
return 1;
if (s->async && s->async->mmap_count)
if (s->async && comedi_buf_is_mmapped(s->async))
return 1;
}
@ -1899,28 +1899,18 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
static void comedi_vm_open(struct vm_area_struct *area)
{
struct comedi_async *async;
struct comedi_device *dev;
struct comedi_buf_map *bm;
async = area->vm_private_data;
dev = async->subdevice->device;
mutex_lock(&dev->mutex);
async->mmap_count++;
mutex_unlock(&dev->mutex);
bm = area->vm_private_data;
comedi_buf_map_get(bm);
}
static void comedi_vm_close(struct vm_area_struct *area)
{
struct comedi_async *async;
struct comedi_device *dev;
struct comedi_buf_map *bm;
async = area->vm_private_data;
dev = async->subdevice->device;
mutex_lock(&dev->mutex);
async->mmap_count--;
mutex_unlock(&dev->mutex);
bm = area->vm_private_data;
comedi_buf_map_put(bm);
}
static struct vm_operations_struct comedi_vm_ops = {
@ -1934,6 +1924,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
struct comedi_device *dev = file->private_data;
struct comedi_subdevice *s;
struct comedi_async *async;
struct comedi_buf_map *bm;
unsigned long start = vma->vm_start;
unsigned long size;
int n_pages;
@ -1980,8 +1971,13 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
n_pages = size >> PAGE_SHIFT;
bm = async->buf_map;
if (!bm || n_pages > bm->n_pages) {
retval = -EINVAL;
goto done;
}
for (i = 0; i < n_pages; ++i) {
struct comedi_buf_page *buf = &async->buf_page_list[i];
struct comedi_buf_page *buf = &bm->page_list[i];
if (remap_pfn_range(vma, start,
page_to_pfn(virt_to_page(buf->virt_addr)),
@ -1993,9 +1989,9 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
}
vma->vm_ops = &comedi_vm_ops;
vma->vm_private_data = async;
vma->vm_private_data = bm;
async->mmap_count++;
vma->vm_ops->open(vma);
retval = 0;
done:

View File

@ -16,6 +16,9 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s);
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size);
void comedi_buf_reset(struct comedi_async *async);
bool comedi_buf_is_mmapped(struct comedi_async *async);
void comedi_buf_map_get(struct comedi_buf_map *bm);
int comedi_buf_map_put(struct comedi_buf_map *bm);
unsigned int comedi_buf_write_n_allocated(struct comedi_async *async);
void comedi_device_cancel_all(struct comedi_device *dev);

View File

@ -104,18 +104,22 @@ struct comedi_buf_page {
dma_addr_t dma_addr;
};
struct comedi_buf_map {
struct device *dma_hw_dev;
struct comedi_buf_page *page_list;
unsigned int n_pages;
enum dma_data_direction dma_dir;
struct kref refcount;
};
struct comedi_async {
struct comedi_subdevice *subdevice;
void *prealloc_buf; /* pre-allocated buffer */
unsigned int prealloc_bufsz; /* buffer size, in bytes */
/* virtual and dma address of each page */
struct comedi_buf_page *buf_page_list;
unsigned n_buf_pages; /* num elements in buf_page_list */
struct comedi_buf_map *buf_map; /* map of buffer pages */
unsigned int max_bufsize; /* maximum buffer size, bytes */
/* current number of mmaps of prealloc_buf */
unsigned int mmap_count;
/* byte count for writer (write completed) */
unsigned int buf_write_count;

View File

@ -345,7 +345,7 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring,
for (i = 0; i < n_links; i++) {
ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
ring->descriptors[i].addr =
cpu_to_le32(async->buf_page_list[i].dma_addr);
cpu_to_le32(async->buf_map->page_list[i].dma_addr);
ring->descriptors[i].next =
cpu_to_le32(ring->descriptors_dma_addr + (i +
1) *