Merge branch 'for-linus-4.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
Pull UML update from Richard Weinberger: "A performance enhancement for UML's block driver" * 'for-linus-4.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml: um: UBD Improvements
This commit is contained in:
commit
9936f44add
|
@ -11,5 +11,10 @@ extern int start_io_thread(unsigned long sp, int *fds_out);
|
|||
extern int io_thread(void *arg);
|
||||
extern int kernel_fd;
|
||||
|
||||
extern int ubd_read_poll(int timeout);
|
||||
extern int ubd_write_poll(int timeout);
|
||||
|
||||
#define UBD_REQ_BUFFER_SIZE 64
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2015-2016 Anton Ivanov (aivanov@brocade.com)
|
||||
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
|
||||
* Licensed under the GPL
|
||||
*/
|
||||
|
@ -58,6 +59,17 @@ struct io_thread_req {
|
|||
int error;
|
||||
};
|
||||
|
||||
|
||||
static struct io_thread_req * (*irq_req_buffer)[];
|
||||
static struct io_thread_req *irq_remainder;
|
||||
static int irq_remainder_size;
|
||||
|
||||
static struct io_thread_req * (*io_req_buffer)[];
|
||||
static struct io_thread_req *io_remainder;
|
||||
static int io_remainder_size;
|
||||
|
||||
|
||||
|
||||
static inline int ubd_test_bit(__u64 bit, unsigned char *data)
|
||||
{
|
||||
__u64 n;
|
||||
|
@ -442,29 +454,91 @@ static void do_ubd_request(struct request_queue * q);
|
|||
static int thread_fd = -1;
|
||||
static LIST_HEAD(restart);
|
||||
|
||||
/* XXX - move this inside ubd_intr. */
|
||||
/* Function to read several request pointers at a time
|
||||
* handling fractional reads if (and as) needed
|
||||
*/
|
||||
|
||||
static int bulk_req_safe_read(
|
||||
int fd,
|
||||
struct io_thread_req * (*request_buffer)[],
|
||||
struct io_thread_req **remainder,
|
||||
int *remainder_size,
|
||||
int max_recs
|
||||
)
|
||||
{
|
||||
int n = 0;
|
||||
int res = 0;
|
||||
|
||||
if (*remainder_size > 0) {
|
||||
memmove(
|
||||
(char *) request_buffer,
|
||||
(char *) remainder, *remainder_size
|
||||
);
|
||||
n = *remainder_size;
|
||||
}
|
||||
|
||||
res = os_read_file(
|
||||
fd,
|
||||
((char *) request_buffer) + *remainder_size,
|
||||
sizeof(struct io_thread_req *)*max_recs
|
||||
- *remainder_size
|
||||
);
|
||||
if (res > 0) {
|
||||
n += res;
|
||||
if ((n % sizeof(struct io_thread_req *)) > 0) {
|
||||
/*
|
||||
* Read somehow returned not a multiple of dword
|
||||
* theoretically possible, but never observed in the
|
||||
* wild, so read routine must be able to handle it
|
||||
*/
|
||||
*remainder_size = n % sizeof(struct io_thread_req *);
|
||||
WARN(*remainder_size > 0, "UBD IPC read returned a partial result");
|
||||
memmove(
|
||||
remainder,
|
||||
((char *) request_buffer) +
|
||||
(n/sizeof(struct io_thread_req *))*sizeof(struct io_thread_req *),
|
||||
*remainder_size
|
||||
);
|
||||
n = n - *remainder_size;
|
||||
}
|
||||
} else {
|
||||
n = res;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
/* Called without dev->lock held, and only in interrupt context. */
|
||||
static void ubd_handler(void)
|
||||
{
|
||||
struct io_thread_req *req;
|
||||
struct ubd *ubd;
|
||||
struct list_head *list, *next_ele;
|
||||
unsigned long flags;
|
||||
int n;
|
||||
int count;
|
||||
|
||||
while(1){
|
||||
n = os_read_file(thread_fd, &req,
|
||||
sizeof(struct io_thread_req *));
|
||||
if(n != sizeof(req)){
|
||||
n = bulk_req_safe_read(
|
||||
thread_fd,
|
||||
irq_req_buffer,
|
||||
&irq_remainder,
|
||||
&irq_remainder_size,
|
||||
UBD_REQ_BUFFER_SIZE
|
||||
);
|
||||
if (n < 0) {
|
||||
if(n == -EAGAIN)
|
||||
break;
|
||||
printk(KERN_ERR "spurious interrupt in ubd_handler, "
|
||||
"err = %d\n", -n);
|
||||
return;
|
||||
}
|
||||
|
||||
blk_end_request(req->req, 0, req->length);
|
||||
kfree(req);
|
||||
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
|
||||
blk_end_request(
|
||||
(*irq_req_buffer)[count]->req,
|
||||
0,
|
||||
(*irq_req_buffer)[count]->length
|
||||
);
|
||||
kfree((*irq_req_buffer)[count]);
|
||||
}
|
||||
}
|
||||
reactivate_fd(thread_fd, UBD_IRQ);
|
||||
|
||||
|
@ -1064,6 +1138,28 @@ static int __init ubd_init(void)
|
|||
if (register_blkdev(fake_major, "ubd"))
|
||||
return -1;
|
||||
}
|
||||
|
||||
irq_req_buffer = kmalloc(
|
||||
sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE,
|
||||
GFP_KERNEL
|
||||
);
|
||||
irq_remainder = 0;
|
||||
|
||||
if (irq_req_buffer == NULL) {
|
||||
printk(KERN_ERR "Failed to initialize ubd buffering\n");
|
||||
return -1;
|
||||
}
|
||||
io_req_buffer = kmalloc(
|
||||
sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE,
|
||||
GFP_KERNEL
|
||||
);
|
||||
|
||||
io_remainder = 0;
|
||||
|
||||
if (io_req_buffer == NULL) {
|
||||
printk(KERN_ERR "Failed to initialize ubd buffering\n");
|
||||
return -1;
|
||||
}
|
||||
platform_driver_register(&ubd_driver);
|
||||
mutex_lock(&ubd_lock);
|
||||
for (i = 0; i < MAX_DEV; i++){
|
||||
|
@ -1458,31 +1554,51 @@ static int io_count = 0;
|
|||
|
||||
int io_thread(void *arg)
|
||||
{
|
||||
struct io_thread_req *req;
|
||||
int n;
|
||||
int n, count, written, res;
|
||||
|
||||
os_fix_helper_signals();
|
||||
|
||||
while(1){
|
||||
n = os_read_file(kernel_fd, &req,
|
||||
sizeof(struct io_thread_req *));
|
||||
if(n != sizeof(struct io_thread_req *)){
|
||||
if(n < 0)
|
||||
n = bulk_req_safe_read(
|
||||
kernel_fd,
|
||||
io_req_buffer,
|
||||
&io_remainder,
|
||||
&io_remainder_size,
|
||||
UBD_REQ_BUFFER_SIZE
|
||||
);
|
||||
if (n < 0) {
|
||||
if (n == -EAGAIN) {
|
||||
ubd_read_poll(-1);
|
||||
continue;
|
||||
} else {
|
||||
printk("io_thread - read failed, fd = %d, "
|
||||
"err = %d,"
|
||||
"reminder = %d\n",
|
||||
kernel_fd, -n, io_remainder_size);
|
||||
}
|
||||
}
|
||||
|
||||
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
|
||||
io_count++;
|
||||
do_io((*io_req_buffer)[count]);
|
||||
}
|
||||
|
||||
written = 0;
|
||||
|
||||
do {
|
||||
res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n);
|
||||
if (res > 0) {
|
||||
written += res;
|
||||
} else {
|
||||
if (res != -EAGAIN) {
|
||||
printk("io_thread - read failed, fd = %d, "
|
||||
"err = %d\n", kernel_fd, -n);
|
||||
else {
|
||||
printk("io_thread - short read, fd = %d, "
|
||||
"length = %d\n", kernel_fd, n);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
io_count++;
|
||||
do_io(req);
|
||||
n = os_write_file(kernel_fd, &req,
|
||||
sizeof(struct io_thread_req *));
|
||||
if(n != sizeof(struct io_thread_req *))
|
||||
printk("io_thread - write failed, fd = %d, err = %d\n",
|
||||
kernel_fd, -n);
|
||||
if (written < n) {
|
||||
ubd_write_poll(-1);
|
||||
}
|
||||
} while (written < n);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2016 Anton Ivanov (aivanov@brocade.com)
|
||||
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
|
||||
* Copyright (C) 2001 Ridgerun,Inc (glonnon@ridgerun.com)
|
||||
* Licensed under the GPL
|
||||
|
@ -20,6 +21,9 @@
|
|||
|
||||
#include "ubd.h"
|
||||
#include <os.h>
|
||||
#include <poll.h>
|
||||
|
||||
struct pollfd kernel_pollfd;
|
||||
|
||||
int start_io_thread(unsigned long sp, int *fd_out)
|
||||
{
|
||||
|
@ -32,9 +36,12 @@ int start_io_thread(unsigned long sp, int *fd_out)
|
|||
}
|
||||
|
||||
kernel_fd = fds[0];
|
||||
kernel_pollfd.fd = kernel_fd;
|
||||
kernel_pollfd.events = POLLIN;
|
||||
*fd_out = fds[1];
|
||||
|
||||
err = os_set_fd_block(*fd_out, 0);
|
||||
err = os_set_fd_block(kernel_fd, 0);
|
||||
if (err) {
|
||||
printk("start_io_thread - failed to set nonblocking I/O.\n");
|
||||
goto out_close;
|
||||
|
@ -57,3 +64,15 @@ int start_io_thread(unsigned long sp, int *fd_out)
|
|||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int ubd_read_poll(int timeout)
|
||||
{
|
||||
kernel_pollfd.events = POLLIN;
|
||||
return poll(&kernel_pollfd, 1, timeout);
|
||||
}
|
||||
int ubd_write_poll(int timeout)
|
||||
{
|
||||
kernel_pollfd.events = POLLOUT;
|
||||
return poll(&kernel_pollfd, 1, timeout);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue