mirror of https://gitee.com/openkylin/qemu.git
qemu/queue.h: leave head structs anonymous unless necessary
Most list head structs need not be given a name. In most cases the name is given just in case one is going to use QTAILQ_LAST, QTAILQ_PREV or reverse iteration, but this does not apply to lists of other kinds, and even for QTAILQ in practice this is only rarely needed. In addition, we will soon reimplement those macros completely so that they do not need a name for the head struct. So clean up everything, not giving a name except in the rare case where it is necessary. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
10ca76b4d2
commit
b58deb344d
|
@ -86,7 +86,7 @@ struct KVMState
|
|||
int robust_singlestep;
|
||||
int debugregs;
|
||||
#ifdef KVM_CAP_SET_GUEST_DEBUG
|
||||
struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
|
||||
QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
|
||||
#endif
|
||||
int many_ioeventfds;
|
||||
int intx_set_mask;
|
||||
|
@ -102,7 +102,7 @@ struct KVMState
|
|||
int nr_allocated_irq_routes;
|
||||
unsigned long *used_gsi_bitmap;
|
||||
unsigned int gsi_count;
|
||||
QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
|
||||
QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
|
||||
#endif
|
||||
KVMMemoryListener memory_listener;
|
||||
QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
|
||||
|
|
|
@ -72,7 +72,7 @@ typedef struct ListElement {
|
|||
GlfsPreopened saved;
|
||||
} ListElement;
|
||||
|
||||
static QLIST_HEAD(glfs_list, ListElement) glfs_list;
|
||||
static QLIST_HEAD(, ListElement) glfs_list;
|
||||
|
||||
static QemuOptsList qemu_gluster_create_opts = {
|
||||
.name = "qemu-gluster-create-opts",
|
||||
|
|
|
@ -72,7 +72,7 @@ typedef struct MirrorBlockJob {
|
|||
unsigned long *in_flight_bitmap;
|
||||
int in_flight;
|
||||
int64_t bytes_in_flight;
|
||||
QTAILQ_HEAD(MirrorOpList, MirrorOp) ops_in_flight;
|
||||
QTAILQ_HEAD(, MirrorOp) ops_in_flight;
|
||||
int ret;
|
||||
bool unmap;
|
||||
int target_cluster_size;
|
||||
|
|
|
@ -77,8 +77,6 @@ typedef struct Qcow2BitmapTable {
|
|||
uint32_t size; /* number of 64bit entries */
|
||||
QSIMPLEQ_ENTRY(Qcow2BitmapTable) entry;
|
||||
} Qcow2BitmapTable;
|
||||
typedef QSIMPLEQ_HEAD(Qcow2BitmapTableList, Qcow2BitmapTable)
|
||||
Qcow2BitmapTableList;
|
||||
|
||||
typedef struct Qcow2Bitmap {
|
||||
Qcow2BitmapTable table;
|
||||
|
@ -1316,7 +1314,7 @@ void qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs, Error **errp)
|
|||
int ret;
|
||||
Qcow2BitmapList *bm_list;
|
||||
Qcow2Bitmap *bm;
|
||||
Qcow2BitmapTableList drop_tables;
|
||||
QSIMPLEQ_HEAD(, Qcow2BitmapTable) drop_tables;
|
||||
Qcow2BitmapTable *tb, *tb_next;
|
||||
|
||||
if (!bdrv_has_changed_persistent_bitmaps(bs)) {
|
||||
|
|
|
@ -278,7 +278,10 @@ typedef struct BDRVQcow2State {
|
|||
QEMUTimer *cache_clean_timer;
|
||||
unsigned cache_clean_interval;
|
||||
|
||||
QLIST_HEAD(QCowClusterAlloc, QCowL2Meta) cluster_allocs;
|
||||
uint8_t *cluster_cache;
|
||||
uint8_t *cluster_data;
|
||||
uint64_t cluster_cache_offset;
|
||||
QLIST_HEAD(, QCowL2Meta) cluster_allocs;
|
||||
|
||||
uint64_t *refcount_table;
|
||||
uint64_t refcount_table_offset;
|
||||
|
|
|
@ -391,12 +391,12 @@ struct BDRVSheepdogState {
|
|||
uint32_t aioreq_seq_num;
|
||||
|
||||
/* Every aio request must be linked to either of these queues. */
|
||||
QLIST_HEAD(inflight_aio_head, AIOReq) inflight_aio_head;
|
||||
QLIST_HEAD(failed_aio_head, AIOReq) failed_aio_head;
|
||||
QLIST_HEAD(, AIOReq) inflight_aio_head;
|
||||
QLIST_HEAD(, AIOReq) failed_aio_head;
|
||||
|
||||
CoMutex queue_lock;
|
||||
CoQueue overlapping_queue;
|
||||
QLIST_HEAD(inflight_aiocb_head, SheepdogAIOCB) inflight_aiocb_head;
|
||||
QLIST_HEAD(, SheepdogAIOCB) inflight_aiocb_head;
|
||||
};
|
||||
|
||||
typedef struct BDRVSheepdogReopenState {
|
||||
|
|
|
@ -398,7 +398,7 @@ typedef struct BDRVVHDXState {
|
|||
|
||||
bool log_replayed_on_open;
|
||||
|
||||
QLIST_HEAD(VHDXRegionHead, VHDXRegionEntry) regions;
|
||||
QLIST_HEAD(, VHDXRegionEntry) regions;
|
||||
} BDRVVHDXState;
|
||||
|
||||
void vhdx_guid_generate(MSGUID *guid);
|
||||
|
|
|
@ -2266,7 +2266,7 @@ void qmp_transaction(TransactionActionList *dev_list,
|
|||
BlkActionState *state, *next;
|
||||
Error *local_err = NULL;
|
||||
|
||||
QSIMPLEQ_HEAD(snap_bdrv_states, BlkActionState) snap_bdrv_states;
|
||||
QSIMPLEQ_HEAD(, BlkActionState) snap_bdrv_states;
|
||||
QSIMPLEQ_INIT(&snap_bdrv_states);
|
||||
|
||||
/* Does this transaction get canceled as a group on failure?
|
||||
|
|
|
@ -46,9 +46,7 @@ typedef struct IvshmemClientPeer {
|
|||
int vectors[IVSHMEM_CLIENT_MAX_VECTORS]; /**< one fd per vector */
|
||||
unsigned vectors_count; /**< number of vectors */
|
||||
} IvshmemClientPeer;
|
||||
QTAILQ_HEAD(IvshmemClientPeerList, IvshmemClientPeer);
|
||||
|
||||
typedef struct IvshmemClientPeerList IvshmemClientPeerList;
|
||||
typedef struct IvshmemClient IvshmemClient;
|
||||
|
||||
/**
|
||||
|
@ -73,7 +71,7 @@ struct IvshmemClient {
|
|||
int sock_fd; /**< unix sock filedesc */
|
||||
int shm_fd; /**< shm file descriptor */
|
||||
|
||||
IvshmemClientPeerList peer_list; /**< list of peers */
|
||||
QTAILQ_HEAD(, IvshmemClientPeer) peer_list; /**< list of peers */
|
||||
IvshmemClientPeer local; /**< our own infos */
|
||||
|
||||
IvshmemClientNotifCb notif_cb; /**< notification callback */
|
||||
|
|
|
@ -52,9 +52,6 @@ typedef struct IvshmemServerPeer {
|
|||
EventNotifier vectors[IVSHMEM_SERVER_MAX_VECTORS]; /**< one per vector */
|
||||
unsigned vectors_count; /**< number of vectors */
|
||||
} IvshmemServerPeer;
|
||||
QTAILQ_HEAD(IvshmemServerPeerList, IvshmemServerPeer);
|
||||
|
||||
typedef struct IvshmemServerPeerList IvshmemServerPeerList;
|
||||
|
||||
/**
|
||||
* Structure describing an ivshmem server
|
||||
|
@ -72,7 +69,7 @@ typedef struct IvshmemServer {
|
|||
unsigned n_vectors; /**< number of vectors */
|
||||
uint16_t cur_id; /**< id to be given to next client */
|
||||
bool verbose; /**< true in verbose mode */
|
||||
IvshmemServerPeerList peer_list; /**< list of peers */
|
||||
QTAILQ_HEAD(, IvshmemServerPeer) peer_list; /**< list of peers */
|
||||
} IvshmemServer;
|
||||
|
||||
/**
|
||||
|
|
2
exec.c
2
exec.c
|
@ -3471,7 +3471,7 @@ typedef struct MapClient {
|
|||
} MapClient;
|
||||
|
||||
QemuMutex map_client_list_lock;
|
||||
static QLIST_HEAD(map_client_list, MapClient) map_client_list
|
||||
static QLIST_HEAD(, MapClient) map_client_list
|
||||
= QLIST_HEAD_INITIALIZER(map_client_list);
|
||||
|
||||
static void cpu_unregister_map_client_do(MapClient *client)
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include "qemu/error-report.h"
|
||||
#include "qemu/option.h"
|
||||
|
||||
static QTAILQ_HEAD(FsDriverEntry_head, FsDriverListEntry) fsdriver_entries =
|
||||
static QTAILQ_HEAD(, FsDriverListEntry) fsdriver_entries =
|
||||
QTAILQ_HEAD_INITIALIZER(fsdriver_entries);
|
||||
|
||||
static FsDriverTable FsDrivers[] = {
|
||||
|
|
|
@ -29,8 +29,8 @@ typedef struct NvmeSQueue {
|
|||
uint64_t dma_addr;
|
||||
QEMUTimer *timer;
|
||||
NvmeRequest *io_req;
|
||||
QTAILQ_HEAD(sq_req_list, NvmeRequest) req_list;
|
||||
QTAILQ_HEAD(out_req_list, NvmeRequest) out_req_list;
|
||||
QTAILQ_HEAD(, NvmeRequest) req_list;
|
||||
QTAILQ_HEAD(, NvmeRequest) out_req_list;
|
||||
QTAILQ_ENTRY(NvmeSQueue) entry;
|
||||
} NvmeSQueue;
|
||||
|
||||
|
@ -45,8 +45,8 @@ typedef struct NvmeCQueue {
|
|||
uint32_t size;
|
||||
uint64_t dma_addr;
|
||||
QEMUTimer *timer;
|
||||
QTAILQ_HEAD(sq_list, NvmeSQueue) sq_list;
|
||||
QTAILQ_HEAD(cq_req_list, NvmeRequest) req_list;
|
||||
QTAILQ_HEAD(, NvmeSQueue) sq_list;
|
||||
QTAILQ_HEAD(, NvmeRequest) req_list;
|
||||
} NvmeCQueue;
|
||||
|
||||
typedef struct NvmeNamespace {
|
||||
|
|
|
@ -82,9 +82,9 @@ struct XenBlkDev {
|
|||
int more_work;
|
||||
|
||||
/* request lists */
|
||||
QLIST_HEAD(inflight_head, ioreq) inflight;
|
||||
QLIST_HEAD(finished_head, ioreq) finished;
|
||||
QLIST_HEAD(freelist_head, ioreq) freelist;
|
||||
QLIST_HEAD(, ioreq) inflight;
|
||||
QLIST_HEAD(, ioreq) finished;
|
||||
QLIST_HEAD(, ioreq) freelist;
|
||||
int requests_total;
|
||||
int requests_inflight;
|
||||
int requests_finished;
|
||||
|
|
|
@ -35,7 +35,7 @@ typedef struct QEMUResetEntry {
|
|||
void *opaque;
|
||||
} QEMUResetEntry;
|
||||
|
||||
static QTAILQ_HEAD(reset_handlers, QEMUResetEntry) reset_handlers =
|
||||
static QTAILQ_HEAD(, QEMUResetEntry) reset_handlers =
|
||||
QTAILQ_HEAD_INITIALIZER(reset_handlers);
|
||||
|
||||
void qemu_register_reset(QEMUResetHandler *func, void *opaque)
|
||||
|
|
|
@ -71,7 +71,7 @@ typedef struct MapCacheRev {
|
|||
typedef struct MapCache {
|
||||
MapCacheEntry *entry;
|
||||
unsigned long nr_buckets;
|
||||
QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
|
||||
QTAILQ_HEAD(, MapCacheRev) locked_entries;
|
||||
|
||||
/* For most cases (>99.9%), the page address is the same. */
|
||||
MapCacheEntry *last_entry;
|
||||
|
|
|
@ -42,7 +42,7 @@ enum sPAPRTCEAccess {
|
|||
#define IOMMU_PAGE_SIZE(shift) (1ULL << (shift))
|
||||
#define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1))
|
||||
|
||||
static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
|
||||
static QLIST_HEAD(, sPAPRTCETable) spapr_tce_tables;
|
||||
|
||||
sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn)
|
||||
{
|
||||
|
|
|
@ -119,11 +119,11 @@ struct EmulatedState {
|
|||
char *db;
|
||||
uint8_t atr[MAX_ATR_SIZE];
|
||||
uint8_t atr_length;
|
||||
QSIMPLEQ_HEAD(event_list, EmulEvent) event_list;
|
||||
QSIMPLEQ_HEAD(, EmulEvent) event_list;
|
||||
QemuMutex event_list_mutex;
|
||||
QemuThread event_thread_id;
|
||||
VReader *reader;
|
||||
QSIMPLEQ_HEAD(guest_apdu_list, EmulEvent) guest_apdu_list;
|
||||
QSIMPLEQ_HEAD(, EmulEvent) guest_apdu_list;
|
||||
QemuMutex vreader_mutex; /* and guest_apdu_list mutex */
|
||||
QemuMutex handle_apdu_mutex;
|
||||
QemuCond handle_apdu_cond;
|
||||
|
|
|
@ -648,7 +648,7 @@ typedef struct USBNetState {
|
|||
char usbstring_mac[13];
|
||||
NICState *nic;
|
||||
NICConf conf;
|
||||
QTAILQ_HEAD(rndis_resp_head, rndis_response) rndis_resp;
|
||||
QTAILQ_HEAD(, rndis_response) rndis_resp;
|
||||
} USBNetState;
|
||||
|
||||
#define TYPE_USB_NET "usb-net"
|
||||
|
|
|
@ -72,7 +72,7 @@ struct usbback_stub {
|
|||
USBPort port;
|
||||
unsigned int speed;
|
||||
bool attached;
|
||||
QTAILQ_HEAD(submit_q_head, usbback_req) submit_q;
|
||||
QTAILQ_HEAD(, usbback_req) submit_q;
|
||||
};
|
||||
|
||||
struct usbback_req {
|
||||
|
@ -108,8 +108,8 @@ struct usbback_info {
|
|||
int num_ports;
|
||||
int usb_ver;
|
||||
bool ring_error;
|
||||
QTAILQ_HEAD(req_free_q_head, usbback_req) req_free_q;
|
||||
QSIMPLEQ_HEAD(hotplug_q_head, usbback_hotplug) hotplug_q;
|
||||
QTAILQ_HEAD(, usbback_req) req_free_q;
|
||||
QSIMPLEQ_HEAD(, usbback_hotplug) hotplug_q;
|
||||
struct usbback_stub ports[USBBACK_MAXPORTS];
|
||||
struct usbback_stub *addr_table[USB_DEV_ADDR_SIZE];
|
||||
QEMUBH *bh;
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#include "qemu/help_option.h"
|
||||
|
||||
static WatchdogAction watchdog_action = WATCHDOG_ACTION_RESET;
|
||||
static QLIST_HEAD(watchdog_list, WatchdogTimerModel) watchdog_list;
|
||||
static QLIST_HEAD(, WatchdogTimerModel) watchdog_list;
|
||||
|
||||
void watchdog_add_model(WatchdogTimerModel *model)
|
||||
{
|
||||
|
|
|
@ -31,10 +31,10 @@ struct xs_dirs {
|
|||
QTAILQ_ENTRY(xs_dirs) list;
|
||||
};
|
||||
|
||||
static QTAILQ_HEAD(xs_dirs_head, xs_dirs) xs_cleanup =
|
||||
static QTAILQ_HEAD(, xs_dirs) xs_cleanup =
|
||||
QTAILQ_HEAD_INITIALIZER(xs_cleanup);
|
||||
|
||||
static QTAILQ_HEAD(XenDeviceHead, XenDevice) xendevs =
|
||||
static QTAILQ_HEAD(, XenDevice) xendevs =
|
||||
QTAILQ_HEAD_INITIALIZER(xendevs);
|
||||
|
||||
/* ------------------------------------------------------------- */
|
||||
|
|
|
@ -379,9 +379,9 @@ struct MemoryRegion {
|
|||
MemoryRegion *alias;
|
||||
hwaddr alias_offset;
|
||||
int32_t priority;
|
||||
QTAILQ_HEAD(subregions, MemoryRegion) subregions;
|
||||
QTAILQ_HEAD(, MemoryRegion) subregions;
|
||||
QTAILQ_ENTRY(MemoryRegion) subregions_link;
|
||||
QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
|
||||
QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
|
||||
const char *name;
|
||||
unsigned ioeventfd_nb;
|
||||
MemoryRegionIoeventfd *ioeventfds;
|
||||
|
|
|
@ -53,7 +53,7 @@ typedef struct VFIOPlatformDevice {
|
|||
VFIORegion **regions;
|
||||
QLIST_HEAD(, VFIOINTp) intp_list; /* list of IRQs */
|
||||
/* queue of pending IRQs */
|
||||
QSIMPLEQ_HEAD(pending_intp_queue, VFIOINTp) pending_intp_queue;
|
||||
QSIMPLEQ_HEAD(, VFIOINTp) pending_intp_queue;
|
||||
char *compat; /* DT compatible values, separated by NUL */
|
||||
unsigned int num_compat; /* number of compatible values */
|
||||
uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */
|
||||
|
|
|
@ -376,9 +376,9 @@ struct CPUState {
|
|||
QTAILQ_ENTRY(CPUState) node;
|
||||
|
||||
/* ice debug support */
|
||||
QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
|
||||
QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
|
||||
|
||||
QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
|
||||
QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
|
||||
CPUWatchpoint *watchpoint_hit;
|
||||
|
||||
void *opaque;
|
||||
|
|
|
@ -412,8 +412,6 @@ struct kvm_sw_breakpoint {
|
|||
QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
|
||||
};
|
||||
|
||||
QTAILQ_HEAD(kvm_sw_breakpoint_head, kvm_sw_breakpoint);
|
||||
|
||||
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
|
||||
target_ulong pc);
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ struct RngBackend
|
|||
|
||||
/*< protected >*/
|
||||
bool opened;
|
||||
QSIMPLEQ_HEAD(requests, RngRequest) requests;
|
||||
QSIMPLEQ_HEAD(, RngRequest) requests;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -2844,7 +2844,7 @@ struct elf_note_info {
|
|||
struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
|
||||
struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
|
||||
|
||||
QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
|
||||
QTAILQ_HEAD(, elf_thread_status) thread_list;
|
||||
#if 0
|
||||
/*
|
||||
* Current version of ELF coredump doesn't support
|
||||
|
|
2
memory.c
2
memory.c
|
@ -2795,7 +2795,7 @@ struct MemoryRegionList {
|
|||
QTAILQ_ENTRY(MemoryRegionList) mrqueue;
|
||||
};
|
||||
|
||||
typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead;
|
||||
typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
|
||||
|
||||
#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
|
||||
int128_sub((size), int128_one())) : 0)
|
||||
|
|
|
@ -116,7 +116,7 @@ typedef struct DirtyBitmapMigBitmapState {
|
|||
} DirtyBitmapMigBitmapState;
|
||||
|
||||
typedef struct DirtyBitmapMigState {
|
||||
QSIMPLEQ_HEAD(dbms_list, DirtyBitmapMigBitmapState) dbms_list;
|
||||
QSIMPLEQ_HEAD(, DirtyBitmapMigBitmapState) dbms_list;
|
||||
|
||||
bool bulk_completed;
|
||||
bool no_bitmaps;
|
||||
|
|
|
@ -93,12 +93,12 @@ typedef struct BlkMigBlock {
|
|||
} BlkMigBlock;
|
||||
|
||||
typedef struct BlkMigState {
|
||||
QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
|
||||
QSIMPLEQ_HEAD(, BlkMigDevState) bmds_list;
|
||||
int64_t total_sector_sum;
|
||||
bool zero_blocks;
|
||||
|
||||
/* Protected by lock. */
|
||||
QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
|
||||
QSIMPLEQ_HEAD(, BlkMigBlock) blk_list;
|
||||
int submitted;
|
||||
int read_done;
|
||||
|
||||
|
|
|
@ -322,7 +322,7 @@ struct RAMState {
|
|||
RAMBlock *last_req_rb;
|
||||
/* Queue of outstanding page requests from the destination */
|
||||
QemuMutex src_page_req_mutex;
|
||||
QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
|
||||
QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
|
||||
};
|
||||
typedef struct RAMState RAMState;
|
||||
|
||||
|
|
|
@ -266,12 +266,12 @@ typedef struct QMPRequest QMPRequest;
|
|||
/* Protects mon_list, monitor_qapi_event_state, monitor_destroyed. */
|
||||
static QemuMutex monitor_lock;
|
||||
static GHashTable *monitor_qapi_event_state;
|
||||
static QTAILQ_HEAD(mon_list, Monitor) mon_list;
|
||||
static QTAILQ_HEAD(, Monitor) mon_list;
|
||||
static bool monitor_destroyed;
|
||||
|
||||
/* Protects mon_fdsets */
|
||||
static QemuMutex mon_fdsets_lock;
|
||||
static QLIST_HEAD(mon_fdsets, MonFdset) mon_fdsets;
|
||||
static QLIST_HEAD(, MonFdset) mon_fdsets;
|
||||
|
||||
static int mon_refcount;
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ struct NetQueue {
|
|||
uint32_t nq_count;
|
||||
NetQueueDeliverFunc *deliver;
|
||||
|
||||
QTAILQ_HEAD(packets, NetPacket) packets;
|
||||
QTAILQ_HEAD(, NetPacket) packets;
|
||||
|
||||
unsigned delivering : 1;
|
||||
};
|
||||
|
|
|
@ -85,7 +85,7 @@ typedef struct SlirpState {
|
|||
} SlirpState;
|
||||
|
||||
static struct slirp_config_str *slirp_configs;
|
||||
static QTAILQ_HEAD(slirp_stacks, SlirpState) slirp_stacks =
|
||||
static QTAILQ_HEAD(, SlirpState) slirp_stacks =
|
||||
QTAILQ_HEAD_INITIALIZER(slirp_stacks);
|
||||
|
||||
static int slirp_hostfwd(SlirpState *s, const char *redir_str, Error **errp);
|
||||
|
|
|
@ -47,7 +47,7 @@ static const uint8_t special_ethaddr[ETH_ALEN] = {
|
|||
|
||||
u_int curtime;
|
||||
|
||||
static QTAILQ_HEAD(slirp_instances, Slirp) slirp_instances =
|
||||
static QTAILQ_HEAD(, Slirp) slirp_instances =
|
||||
QTAILQ_HEAD_INITIALIZER(slirp_instances);
|
||||
|
||||
static struct in_addr dns_addr;
|
||||
|
|
|
@ -206,7 +206,7 @@ typedef struct KVMDevice {
|
|||
int dev_fd;
|
||||
} KVMDevice;
|
||||
|
||||
static QSLIST_HEAD(kvm_devices_head, KVMDevice) kvm_devices_head;
|
||||
static QSLIST_HEAD(, KVMDevice) kvm_devices_head;
|
||||
|
||||
static void kvm_arm_devlistener_add(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
|
|
|
@ -56,7 +56,7 @@ typedef struct HAXMapping {
|
|||
* send to the kernel only the removal of the pages from the MMIO hole after
|
||||
* having computed locally the result of the deletion and additions.
|
||||
*/
|
||||
static QTAILQ_HEAD(HAXMappingListHead, HAXMapping) mappings =
|
||||
static QTAILQ_HEAD(, HAXMapping) mappings =
|
||||
QTAILQ_HEAD_INITIALIZER(mappings);
|
||||
|
||||
/**
|
||||
|
|
|
@ -708,7 +708,7 @@ struct TCGContext {
|
|||
|
||||
/* These structures are private to tcg-target.inc.c. */
|
||||
#ifdef TCG_TARGET_NEED_LDST_LABELS
|
||||
QSIMPLEQ_HEAD(ldst_labels, TCGLabelQemuLdst) ldst_labels;
|
||||
QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
|
||||
#endif
|
||||
#ifdef TCG_TARGET_NEED_POOL_LABELS
|
||||
struct TCGLabelPoolData *pool_labels;
|
||||
|
|
|
@ -108,7 +108,7 @@ static void reclaim_list_el(struct rcu_head *prcu)
|
|||
}
|
||||
|
||||
#if TEST_LIST_TYPE == 1
|
||||
static QLIST_HEAD(q_list_head, list_element) Q_list_head;
|
||||
static QLIST_HEAD(, list_element) Q_list_head;
|
||||
|
||||
#define TEST_NAME "qlist"
|
||||
#define TEST_LIST_REMOVE_RCU QLIST_REMOVE_RCU
|
||||
|
|
2
vl.c
2
vl.c
|
@ -1529,7 +1529,7 @@ struct vm_change_state_entry {
|
|||
QLIST_ENTRY (vm_change_state_entry) entries;
|
||||
};
|
||||
|
||||
static QLIST_HEAD(vm_change_state_head, vm_change_state_entry) vm_change_state_head;
|
||||
static QLIST_HEAD(, vm_change_state_entry) vm_change_state_head;
|
||||
|
||||
VMChangeStateEntry *qemu_add_vm_change_state_handler(VMChangeStateHandler *cb,
|
||||
void *opaque)
|
||||
|
|
Loading…
Reference in New Issue