diff --git a/include/qemu/ratelimit.h b/include/qemu/ratelimit.h index 8dece483f5..1b38291823 100644 --- a/include/qemu/ratelimit.h +++ b/include/qemu/ratelimit.h @@ -36,7 +36,7 @@ typedef struct { static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n) { int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); - uint64_t delay_slices; + double delay_slices; assert(limit->slice_quota && limit->slice_ns); @@ -55,12 +55,11 @@ static inline int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n) return 0; } - /* Quota exceeded. Calculate the next time slice we may start - * sending data again. */ - delay_slices = (limit->dispatched + limit->slice_quota - 1) / - limit->slice_quota; + /* Quota exceeded. Wait based on the excess amount and then start a new + * slice. */ + delay_slices = (double)limit->dispatched / limit->slice_quota; limit->slice_end_time = limit->slice_start_time + - delay_slices * limit->slice_ns; + (uint64_t)(delay_slices * limit->slice_ns); return limit->slice_end_time - now; } diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c index 78fb79acf8..5a80c10690 100644 --- a/util/qemu-coroutine-lock.c +++ b/util/qemu-coroutine-lock.c @@ -89,7 +89,7 @@ void qemu_co_queue_run_restart(Coroutine *co) * invalid memory. Therefore, use a temporary queue and do not touch * the "co" coroutine as soon as you enter another one. * - * In its turn resumed "co" can pupulate "co_queue_wakeup" queue with + * In its turn resumed "co" can populate "co_queue_wakeup" queue with * new coroutines to be woken up. The caller, who has resumed "co", * will be responsible for traversing the same queue, which may cause * a different wakeup order but not any missing wakeups. diff --git a/vl.c b/vl.c index 21878496ec..7a5554bc41 100644 --- a/vl.c +++ b/vl.c @@ -4767,10 +4767,18 @@ int main(int argc, char **argv, char **envp) main_loop(); replay_disable_events(); - iothread_stop_all(); + /* The ordering of the following is delicate. Stop vcpus to prevent new + * I/O requests being queued by the guest. Then stop IOThreads (this + * includes a drain operation and completes all request processing). At + * this point emulated devices are still associated with their IOThreads + * (if any) but no longer have any work to do. Only then can we close + * block devices safely because we know there is no more I/O coming. + */ pause_all_vcpus(); + iothread_stop_all(); bdrv_close_all(); + res_free(); /* vhost-user must be cleaned up before chardevs. */