vhost-net: batch use/unuse mm

Move use/unuse mm to vhost.c which makes it possible to batch these
operations.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Michael S. Tsirkin 2010-10-06 15:34:45 +02:00
parent 533a19b4b8
commit 64e1c80748
2 changed files with 6 additions and 8 deletions

View File

@ -10,7 +10,6 @@
#include <linux/eventfd.h> #include <linux/eventfd.h>
#include <linux/vhost.h> #include <linux/vhost.h>
#include <linux/virtio_net.h> #include <linux/virtio_net.h>
#include <linux/mmu_context.h>
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
@ -142,7 +141,6 @@ static void handle_tx(struct vhost_net *net)
return; return;
} }
use_mm(net->dev.mm);
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
vhost_disable_notify(vq); vhost_disable_notify(vq);
@ -207,7 +205,6 @@ static void handle_tx(struct vhost_net *net)
} }
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
unuse_mm(net->dev.mm);
} }
static int peek_head_len(struct sock *sk) static int peek_head_len(struct sock *sk)
@ -312,7 +309,6 @@ static void handle_rx_big(struct vhost_net *net)
if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
return; return;
use_mm(net->dev.mm);
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
vhost_disable_notify(vq); vhost_disable_notify(vq);
hdr_size = vq->vhost_hlen; hdr_size = vq->vhost_hlen;
@ -391,7 +387,6 @@ static void handle_rx_big(struct vhost_net *net)
} }
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
unuse_mm(net->dev.mm);
} }
/* Expects to be always run from workqueue - which acts as /* Expects to be always run from workqueue - which acts as
@ -423,7 +418,6 @@ static void handle_rx_mergeable(struct vhost_net *net)
if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
return; return;
use_mm(net->dev.mm);
mutex_lock(&vq->mutex); mutex_lock(&vq->mutex);
vhost_disable_notify(vq); vhost_disable_notify(vq);
vhost_hlen = vq->vhost_hlen; vhost_hlen = vq->vhost_hlen;
@ -500,7 +494,6 @@ static void handle_rx_mergeable(struct vhost_net *net)
} }
mutex_unlock(&vq->mutex); mutex_unlock(&vq->mutex);
unuse_mm(net->dev.mm);
} }
static void handle_rx(struct vhost_net *net) static void handle_rx(struct vhost_net *net)

View File

@ -15,6 +15,7 @@
#include <linux/vhost.h> #include <linux/vhost.h>
#include <linux/virtio_net.h> #include <linux/virtio_net.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
@ -177,6 +178,8 @@ static int vhost_worker(void *data)
struct vhost_work *work = NULL; struct vhost_work *work = NULL;
unsigned uninitialized_var(seq); unsigned uninitialized_var(seq);
use_mm(dev->mm);
for (;;) { for (;;) {
/* mb paired w/ kthread_stop */ /* mb paired w/ kthread_stop */
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
@ -191,7 +194,7 @@ static int vhost_worker(void *data)
if (kthread_should_stop()) { if (kthread_should_stop()) {
spin_unlock_irq(&dev->work_lock); spin_unlock_irq(&dev->work_lock);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
return 0; break;
} }
if (!list_empty(&dev->work_list)) { if (!list_empty(&dev->work_list)) {
work = list_first_entry(&dev->work_list, work = list_first_entry(&dev->work_list,
@ -209,6 +212,8 @@ static int vhost_worker(void *data)
schedule(); schedule();
} }
unuse_mm(dev->mm);
return 0;
} }
/* Helper to allocate iovec buffers for all vqs. */ /* Helper to allocate iovec buffers for all vqs. */