usb: gadget: f_fs: Allow scatter-gather buffers

Some protocols implemented in userspace with FunctionFS might require large
buffers, e.g. 64kB or more. Currently the said memory is allocated with
kmalloc, which might fail should system memory be highly fragmented.

On the other hand, some UDC hardware allows scatter-gather operation and
this patch takes advantage of this capability: if the requested buffer
is larger than PAGE_SIZE and the UDC allows scatter-gather operation, then
the buffer is allocated with vmalloc and a scatterlist describing it is
created and passed to usb request.

Signed-off-by: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
This commit is contained in:
Andrzej Pietrasiewicz 2018-11-14 10:47:48 +01:00 committed by Felipe Balbi
parent 7f7c548c5f
commit 772a7a724f
1 changed files with 86 additions and 7 deletions

View File

@ -18,9 +18,12 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/hid.h> #include <linux/hid.h>
#include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/vmalloc.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <linux/usb/ccid.h> #include <linux/usb/ccid.h>
@ -219,6 +222,8 @@ struct ffs_io_data {
struct usb_ep *ep; struct usb_ep *ep;
struct usb_request *req; struct usb_request *req;
struct sg_table sgt;
bool use_sg;
struct ffs_data *ffs; struct ffs_data *ffs;
}; };
@ -750,6 +755,65 @@ static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
return ret; return ret;
} }
/*
* allocate a virtually contiguous buffer and create a scatterlist describing it
* @sg_table - pointer to a place to be filled with sg_table contents
* @size - required buffer size
*/
static void *ffs_build_sg_list(struct sg_table *sgt, size_t sz)
{
struct page **pages;
void *vaddr, *ptr;
unsigned int n_pages;
int i;
vaddr = vmalloc(sz);
if (!vaddr)
return NULL;
n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
vfree(vaddr);
return NULL;
}
for (i = 0, ptr = vaddr; i < n_pages; ++i, ptr += PAGE_SIZE)
pages[i] = vmalloc_to_page(ptr);
if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL)) {
kvfree(pages);
vfree(vaddr);
return NULL;
}
kvfree(pages);
return vaddr;
}
static inline void *ffs_alloc_buffer(struct ffs_io_data *io_data,
size_t data_len)
{
if (io_data->use_sg)
return ffs_build_sg_list(&io_data->sgt, data_len);
return kmalloc(data_len, GFP_KERNEL);
}
static inline void ffs_free_buffer(struct ffs_io_data *io_data)
{
if (!io_data->buf)
return;
if (io_data->use_sg) {
sg_free_table(&io_data->sgt);
vfree(io_data->buf);
} else {
kfree(io_data->buf);
}
}
static void ffs_user_copy_worker(struct work_struct *work) static void ffs_user_copy_worker(struct work_struct *work)
{ {
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
@ -777,7 +841,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
if (io_data->read) if (io_data->read)
kfree(io_data->to_free); kfree(io_data->to_free);
kfree(io_data->buf); ffs_free_buffer(io_data);
kfree(io_data); kfree(io_data);
} }
@ -933,6 +997,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* earlier * earlier
*/ */
gadget = epfile->ffs->gadget; gadget = epfile->ffs->gadget;
io_data->use_sg = gadget->sg_supported && data_len > PAGE_SIZE;
spin_lock_irq(&epfile->ffs->eps_lock); spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */ /* In the meantime, endpoint got disabled or changed. */
@ -949,7 +1014,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
spin_unlock_irq(&epfile->ffs->eps_lock); spin_unlock_irq(&epfile->ffs->eps_lock);
data = kmalloc(data_len, GFP_KERNEL); data = ffs_alloc_buffer(io_data, data_len);
if (unlikely(!data)) { if (unlikely(!data)) {
ret = -ENOMEM; ret = -ENOMEM;
goto error_mutex; goto error_mutex;
@ -989,8 +1054,16 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
bool interrupted = false; bool interrupted = false;
req = ep->req; req = ep->req;
req->buf = data; if (io_data->use_sg) {
req->length = data_len; req->buf = NULL;
req->sg = io_data->sgt.sgl;
req->num_sgs = io_data->sgt.nents;
} else {
req->buf = data;
}
req->length = data_len;
io_data->buf = data;
req->context = &done; req->context = &done;
req->complete = ffs_epfile_io_complete; req->complete = ffs_epfile_io_complete;
@ -1023,8 +1096,14 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
} else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) { } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) {
ret = -ENOMEM; ret = -ENOMEM;
} else { } else {
req->buf = data; if (io_data->use_sg) {
req->length = data_len; req->buf = NULL;
req->sg = io_data->sgt.sgl;
req->num_sgs = io_data->sgt.nents;
} else {
req->buf = data;
}
req->length = data_len;
io_data->buf = data; io_data->buf = data;
io_data->ep = ep->ep; io_data->ep = ep->ep;
@ -1053,7 +1132,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
error_mutex: error_mutex:
mutex_unlock(&epfile->mutex); mutex_unlock(&epfile->mutex);
error: error:
kfree(data); ffs_free_buffer(io_data);
return ret; return ret;
} }