migration: fix calculating xbzrle_counters.cache_miss_rate

As Peter pointed out:
| - xbzrle_counters.cache_miss is done in save_xbzrle_page(), so it's
|   per-guest-page granularity
|
| - RAMState.iterations is done for each ram_find_and_save_block(), so
|   it's per-host-page granularity
|
| An example is that when we migrate a 2M huge page in the guest, we
| will only increase the RAMState.iterations by 1 (since
| ram_find_and_save_block() will be called once), but we might increase
| xbzrle_counters.cache_miss for 2M/4K=512 times (we'll call
| save_xbzrle_page() that many times) if all the pages got cache miss.
| Then IMHO the cache miss rate will be 512/1=51200% (while it should
| actually be just 100% cache miss).

And he also suggested as xbzrle_counters.cache_miss_rate is the only
user of rs->iterations we can adapt it to count target guest page
numbers

After that, rename 'iterations' to 'target_page_count' to better reflect
its meaning

Suggested-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com>
Message-Id: <20180903092644.25812-3-xiaoguangrong@tencent.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
Xiao Guangrong 2018-09-03 17:26:42 +08:00 committed by Dr. David Alan Gilbert
parent 449f91b2c8
commit be8b02edae
1 changed files with 9 additions and 9 deletions

View File

@ -301,10 +301,10 @@ struct RAMState {
uint64_t num_dirty_pages_period; uint64_t num_dirty_pages_period;
/* xbzrle misses since the beginning of the period */ /* xbzrle misses since the beginning of the period */
uint64_t xbzrle_cache_miss_prev; uint64_t xbzrle_cache_miss_prev;
/* number of iterations at the beginning of period */ /* total handled target pages at the beginning of period */
uint64_t iterations_prev; uint64_t target_page_count_prev;
/* Iterations since start */ /* total handled target pages since start */
uint64_t iterations; uint64_t target_page_count;
/* number of dirty bits in the bitmap */ /* number of dirty bits in the bitmap */
uint64_t migration_dirty_pages; uint64_t migration_dirty_pages;
/* protects modification of the bitmap */ /* protects modification of the bitmap */
@ -1592,19 +1592,19 @@ uint64_t ram_pagesize_summary(void)
static void migration_update_rates(RAMState *rs, int64_t end_time) static void migration_update_rates(RAMState *rs, int64_t end_time)
{ {
uint64_t iter_count = rs->iterations - rs->iterations_prev; uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
/* calculate period counters */ /* calculate period counters */
ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
/ (end_time - rs->time_last_bitmap_sync); / (end_time - rs->time_last_bitmap_sync);
if (!iter_count) { if (!page_count) {
return; return;
} }
if (migrate_use_xbzrle()) { if (migrate_use_xbzrle()) {
xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss - xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
rs->xbzrle_cache_miss_prev) / iter_count; rs->xbzrle_cache_miss_prev) / page_count;
rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss; rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
} }
} }
@ -1662,7 +1662,7 @@ static void migration_bitmap_sync(RAMState *rs)
migration_update_rates(rs, end_time); migration_update_rates(rs, end_time);
rs->iterations_prev = rs->iterations; rs->target_page_count_prev = rs->target_page_count;
/* reset period counters */ /* reset period counters */
rs->time_last_bitmap_sync = end_time; rs->time_last_bitmap_sync = end_time;
@ -3196,7 +3196,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
done = 1; done = 1;
break; break;
} }
rs->iterations++; rs->target_page_count += pages;
/* we want to check in the 1st loop, just in case it was the 1st time /* we want to check in the 1st loop, just in case it was the 1st time
and we had to sync the dirty bitmap. and we had to sync the dirty bitmap.