mirror of https://gitee.com/openkylin/qemu.git
net: reduce the unnecessary memory allocation of multiqueue
Edivaldo reports a problem that the array of NetClientState in NICState is too large - MAX_QUEUE_NUM(1024) which will wastes memory even if multiqueue is not used. Instead of static arrays, solving this issue by allocating the queues on demand for both the NetClientState array in NICState and VirtIONetQueue array in VirtIONet. Tested by myself, with single virtio-net-pci device. The memory allocation is almost the same as when multiqueue is not merged. Cc: Edivaldo de Araujo Pereira <edivaldoapereira@yahoo.com.br> Cc: qemu-stable@nongnu.org Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
d26e445c80
commit
f6b26cf257
|
@ -44,7 +44,7 @@ typedef struct VirtIONet
|
|||
VirtIODevice vdev;
|
||||
uint8_t mac[ETH_ALEN];
|
||||
uint16_t status;
|
||||
VirtIONetQueue vqs[MAX_QUEUE_NUM];
|
||||
VirtIONetQueue *vqs;
|
||||
VirtQueue *ctrl_vq;
|
||||
NICState *nic;
|
||||
uint32_t tx_timeout;
|
||||
|
@ -1326,8 +1326,9 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
|
|||
n->vdev.set_status = virtio_net_set_status;
|
||||
n->vdev.guest_notifier_mask = virtio_net_guest_notifier_mask;
|
||||
n->vdev.guest_notifier_pending = virtio_net_guest_notifier_pending;
|
||||
n->max_queues = MAX(conf->queues, 1);
|
||||
n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
|
||||
n->vqs[0].rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
|
||||
n->max_queues = conf->queues;
|
||||
n->curr_queues = 1;
|
||||
n->vqs[0].n = n;
|
||||
n->tx_timeout = net->txtimer;
|
||||
|
@ -1412,6 +1413,7 @@ void virtio_net_exit(VirtIODevice *vdev)
|
|||
}
|
||||
}
|
||||
|
||||
g_free(n->vqs);
|
||||
qemu_del_nic(n->nic);
|
||||
virtio_cleanup(&n->vdev);
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ struct NetClientState {
|
|||
};
|
||||
|
||||
typedef struct NICState {
|
||||
NetClientState ncs[MAX_QUEUE_NUM];
|
||||
NetClientState *ncs;
|
||||
NICConf *conf;
|
||||
void *opaque;
|
||||
bool peer_deleted;
|
||||
|
|
19
net/net.c
19
net/net.c
|
@ -235,23 +235,20 @@ NICState *qemu_new_nic(NetClientInfo *info,
|
|||
const char *name,
|
||||
void *opaque)
|
||||
{
|
||||
NetClientState *nc;
|
||||
NetClientState **peers = conf->peers.ncs;
|
||||
NICState *nic;
|
||||
int i;
|
||||
int i, queues = MAX(1, conf->queues);
|
||||
|
||||
assert(info->type == NET_CLIENT_OPTIONS_KIND_NIC);
|
||||
assert(info->size >= sizeof(NICState));
|
||||
|
||||
nc = qemu_new_net_client(info, peers[0], model, name);
|
||||
nc->queue_index = 0;
|
||||
|
||||
nic = qemu_get_nic(nc);
|
||||
nic = g_malloc0(info->size + sizeof(NetClientState) * queues);
|
||||
nic->ncs = (void *)nic + info->size;
|
||||
nic->conf = conf;
|
||||
nic->opaque = opaque;
|
||||
|
||||
for (i = 1; i < conf->queues; i++) {
|
||||
qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, nc->name,
|
||||
for (i = 0; i < queues; i++) {
|
||||
qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
|
||||
NULL);
|
||||
nic->ncs[i].queue_index = i;
|
||||
}
|
||||
|
@ -261,7 +258,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
|
|||
|
||||
NetClientState *qemu_get_subqueue(NICState *nic, int queue_index)
|
||||
{
|
||||
return &nic->ncs[queue_index];
|
||||
return nic->ncs + queue_index;
|
||||
}
|
||||
|
||||
NetClientState *qemu_get_queue(NICState *nic)
|
||||
|
@ -273,7 +270,7 @@ NICState *qemu_get_nic(NetClientState *nc)
|
|||
{
|
||||
NetClientState *nc0 = nc - nc->queue_index;
|
||||
|
||||
return DO_UPCAST(NICState, ncs[0], nc0);
|
||||
return (NICState *)((void *)nc0 - nc->info->size);
|
||||
}
|
||||
|
||||
void *qemu_get_nic_opaque(NetClientState *nc)
|
||||
|
@ -368,6 +365,8 @@ void qemu_del_nic(NICState *nic)
|
|||
qemu_cleanup_net_client(nc);
|
||||
qemu_free_net_client(nc);
|
||||
}
|
||||
|
||||
g_free(nic);
|
||||
}
|
||||
|
||||
void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
|
||||
|
|
Loading…
Reference in New Issue