[media] ivtv: yuv: handle get_user_pages() -errno returns

get_user_pages() may return -errno, such as -EFAULT.  So don't blindly use
its return value as an offset into dma->map[] for the next get_user_pages()
call.  Since we'll give up and return an error if either fails, don't even
make the second call if the first failed to give us exactly what we were
looking for.

The old code would also call put_page() on as many elements of dma->map[]
as we'd asked for, regardless of how many were valid.

[Andy Walls modified this patch to return -EFAULT instead of -EINVAL
as Paul's observation "I'm not sure -EINVAL is the best return code vs
-EFAULT or -ENOMEM, [...]" was correct.  The return value bubbles up
as a return code for write(), for which the V4L2 API spec indicates
EINVAL is incorrect and EFAULT is correct.]

Signed-off-by: Paul Cassella <fortytwo-ivtv@maneteren.bigw.org>
Signed-off-by: Andy Walls <awalls@md.metrocast.net>
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
This commit is contained in:
Paul Cassella 2011-02-12 10:39:51 -03:00 committed by Mauro Carvalho Chehab
parent 0d44b1235c
commit 7fd4b41f05
1 changed files with 38 additions and 10 deletions

View File

@ -77,23 +77,51 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
/* Get user pages for DMA Xfer */
down_read(&current->mm->mmap_sem);
y_pages = get_user_pages(current, current->mm, y_dma.uaddr, y_dma.page_count, 0, 1, &dma->map[0], NULL);
uv_pages = get_user_pages(current, current->mm, uv_dma.uaddr, uv_dma.page_count, 0, 1, &dma->map[y_pages], NULL);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
uv_pages = get_user_pages(current, current->mm,
uv_dma.uaddr, uv_dma.page_count, 0, 1,
&dma->map[y_pages], NULL);
}
up_read(&current->mm->mmap_sem);
dma->page_count = y_dma.page_count + uv_dma.page_count;
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
int rc = -EFAULT;
if (y_pages + uv_pages != dma->page_count) {
IVTV_DEBUG_WARN
("failed to map user pages, returned %d instead of %d\n",
y_pages + uv_pages, dma->page_count);
if (y_pages == y_dma.page_count) {
IVTV_DEBUG_WARN
("failed to map uv user pages, returned %d "
"expecting %d\n", uv_pages, uv_dma.page_count);
for (i = 0; i < dma->page_count; i++) {
put_page(dma->map[i]);
if (uv_pages >= 0) {
for (i = 0; i < uv_pages; i++)
put_page(dma->map[y_pages + i]);
rc = -EFAULT;
} else {
rc = uv_pages;
}
} else {
IVTV_DEBUG_WARN
("failed to map y user pages, returned %d "
"expecting %d\n", y_pages, y_dma.page_count);
}
dma->page_count = 0;
return -EINVAL;
if (y_pages >= 0) {
for (i = 0; i < y_pages; i++)
put_page(dma->map[i]);
/*
* Inherit the -EFAULT from rc's
* initialization, but allow it to be
* overriden by uv_pages above if it was an
* actual errno.
*/
} else {
rc = y_pages;
}
return rc;
}
dma->page_count = y_pages + uv_pages;
/* Fill & map SG List */
if (ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0)) < 0) {
IVTV_DEBUG_WARN("could not allocate bounce buffers for highmem userspace buffers\n");