From 4d885131574ba530e48ef90d5c0ca4dffc9c3759 Mon Sep 17 00:00:00 2001
From: Peter Maydell <peter.maydell@linaro.org>
Date: Fri, 10 Jun 2016 17:09:22 +0100
Subject: [PATCH 01/13] migration: Don't use *_to_cpup() and cpu_to_*w()

The *_to_cpup() and cpu_to_*w() functions just compose a pointer
dereference with a byteswap. Instead use ld*_p() and st*_p(),
which handle potential pointer misalignment and avoid the need
to cast the pointer.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Amit Shah <amit.shah@redhat.com>
Message-Id: <1465574962-2710-1-git-send-email-peter.maydell@linaro.org>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/migration.c | 12 ++++++------
 migration/savevm.c    |  4 ++--
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/migration/migration.c b/migration/migration.c
index 20f88757d8..a56013662d 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1384,7 +1384,7 @@ static void *source_return_path_thread(void *opaque)
         /* OK, we have the message and the data */
         switch (header_type) {
         case MIG_RP_MSG_SHUT:
-            sibling_error = be32_to_cpup((uint32_t *)buf);
+            sibling_error = ldl_be_p(buf);
             trace_source_return_path_thread_shut(sibling_error);
             if (sibling_error) {
                 error_report("RP: Sibling indicated error %d", sibling_error);
@@ -1398,13 +1398,13 @@ static void *source_return_path_thread(void *opaque)
             goto out;
 
         case MIG_RP_MSG_PONG:
-            tmp32 = be32_to_cpup((uint32_t *)buf);
+            tmp32 = ldl_be_p(buf);
             trace_source_return_path_thread_pong(tmp32);
             break;
 
         case MIG_RP_MSG_REQ_PAGES:
-            start = be64_to_cpup((uint64_t *)buf);
-            len = be32_to_cpup((uint32_t *)(buf + 8));
+            start = ldq_be_p(buf);
+            len = ldl_be_p(buf + 8);
             migrate_handle_rp_req_pages(ms, NULL, start, len);
             break;
 
@@ -1412,8 +1412,8 @@ static void *source_return_path_thread(void *opaque)
             expected_len = 12 + 1; /* header + termination */
 
             if (header_len >= expected_len) {
-                start = be64_to_cpup((uint64_t *)buf);
-                len = be32_to_cpup((uint32_t *)(buf + 8));
+                start = ldq_be_p(buf);
+                len = ldl_be_p(buf + 8);
                 /* Now we expect an idstr */
                 tmp32 = buf[12]; /* Length of the following idstr */
                 buf[13 + tmp32] = '\0';
diff --git a/migration/savevm.c b/migration/savevm.c
index ae2ef8b5d4..6da084c2cf 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -823,9 +823,9 @@ void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name,
     buf[tmplen++] = '\0';
 
     for (t = 0; t < len; t++) {
-        cpu_to_be64w((uint64_t *)(buf + tmplen), start_list[t]);
+        stq_be_p(buf + tmplen, start_list[t]);
         tmplen += 8;
-        cpu_to_be64w((uint64_t *)(buf + tmplen), length_list[t]);
+        stq_be_p(buf + tmplen, length_list[t]);
         tmplen += 8;
     }
     qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RAM_DISCARD, tmplen, buf);

From 023ad1a6e9296470d8fd81db3bf0379057fc8994 Mon Sep 17 00:00:00 2001
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Date: Tue, 14 Jun 2016 10:36:26 +0100
Subject: [PATCH 02/13] migration: Trace improvements

A couple of improvements to tracing that have come out of helping
people with migration problems:
  * vmstate_n_elems trace the count/name - for when you have problems
    getting array counts right
  * vmstate_subsection_load_bad - add the idstr, for when you receive a
    subsection you weren't expecting.

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <1465896986-16132-1-git-send-email-dgilbert@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/vmstate.c | 13 +++++++------
 trace-events        |  3 ++-
 2 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/migration/vmstate.c b/migration/vmstate.c
index 46dc55ea40..fc29acf74d 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -32,6 +32,7 @@ static int vmstate_n_elems(void *opaque, VMStateField *field)
         n_elems *= field->num;
     }
 
+    trace_vmstate_n_elems(field->name, n_elems);
     return n_elems;
 }
 
@@ -381,25 +382,25 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
         len = qemu_peek_byte(f, 1);
         if (len < strlen(vmsd->name) + 1) {
             /* subsection name has be be "section_name/a" */
-            trace_vmstate_subsection_load_bad(vmsd->name, "(short)");
+            trace_vmstate_subsection_load_bad(vmsd->name, "(short)", "");
             return 0;
         }
         size = qemu_peek_buffer(f, (uint8_t **)&idstr_ret, len, 2);
         if (size != len) {
-            trace_vmstate_subsection_load_bad(vmsd->name, "(peek fail)");
+            trace_vmstate_subsection_load_bad(vmsd->name, "(peek fail)", "");
             return 0;
         }
         memcpy(idstr, idstr_ret, size);
         idstr[size] = 0;
 
         if (strncmp(vmsd->name, idstr, strlen(vmsd->name)) != 0) {
-            trace_vmstate_subsection_load_bad(vmsd->name, idstr);
-            /* it don't have a valid subsection name */
+            trace_vmstate_subsection_load_bad(vmsd->name, idstr, "(prefix)");
+            /* it doesn't have a valid subsection name */
             return 0;
         }
         sub_vmsd = vmstate_get_subsection(vmsd->subsections, idstr);
         if (sub_vmsd == NULL) {
-            trace_vmstate_subsection_load_bad(vmsd->name, "(lookup)");
+            trace_vmstate_subsection_load_bad(vmsd->name, idstr, "(lookup)");
             return -ENOENT;
         }
         qemu_file_skip(f, 1); /* subsection */
@@ -409,7 +410,7 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd,
 
         ret = vmstate_load_state(f, sub_vmsd, opaque, version_id);
         if (ret) {
-            trace_vmstate_subsection_load_bad(vmsd->name, "(child)");
+            trace_vmstate_subsection_load_bad(vmsd->name, idstr, "(child)");
             return ret;
         }
     }
diff --git a/trace-events b/trace-events
index 104b64fae1..17e901bdc3 100644
--- a/trace-events
+++ b/trace-events
@@ -1271,8 +1271,9 @@ vmstate_load_field_error(const char *field, int ret) "field \"%s\" load failed,
 vmstate_load_state(const char *name, int version_id) "%s v%d"
 vmstate_load_state_end(const char *name, const char *reason, int val) "%s %s/%d"
 vmstate_load_state_field(const char *name, const char *field) "%s:%s"
+vmstate_n_elems(const char *name, int n_elems) "%s: %d"
 vmstate_subsection_load(const char *parent) "%s"
-vmstate_subsection_load_bad(const char *parent,  const char *sub) "%s: %s"
+vmstate_subsection_load_bad(const char *parent,  const char *sub, const char *sub2) "%s: %s/%s"
 vmstate_subsection_load_good(const char *parent) "%s"
 
 # qemu-file.c

From 6dcf66681aea2a371ddd63770bf80615652f5c94 Mon Sep 17 00:00:00 2001
From: "Denis V. Lunev" <den@openvz.org>
Date: Wed, 15 Jun 2016 18:06:43 +0300
Subject: [PATCH 03/13] migration: fix inability to save VM after snapshot

The following sequence of operations fails:
    virsh start vm
    virsh snapshot-create vm
    virshh save vm --file file
with the following error
    error: Failed to save domain vm to file
    error: internal error: unable to execute QEMU command 'migrate':
    There's a migration process in progress

The problem is that qemu_savevm_state() calls migrate_init() which sets
migration state to MIGRATION_STATUS_SETUP and never cleaned it up.
This patch do the job.

Signed-off-by: Denis V. Lunev <den@openvz.org>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
CC: Juan Quintela <quintela@redhat.com>
CC: Amit Shah <amit.shah@redhat.com>
Message-Id: <1466003203-26263-1-git-send-email-den@openvz.org>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/savevm.c | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/migration/savevm.c b/migration/savevm.c
index 6da084c2cf..38b85ee77b 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1150,10 +1150,12 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
         .shared = 0
     };
     MigrationState *ms = migrate_init(&params);
+    MigrationStatus status;
     ms->to_dst_file = f;
 
     if (migration_is_blocked(errp)) {
-        return -EINVAL;
+        ret = -EINVAL;
+        goto done;
     }
 
     qemu_mutex_unlock_iothread();
@@ -1176,6 +1178,14 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
     if (ret != 0) {
         error_setg_errno(errp, -ret, "Error while writing VM state");
     }
+
+done:
+    if (ret != 0) {
+        status = MIGRATION_STATUS_FAILED;
+    } else {
+        status = MIGRATION_STATUS_COMPLETED;
+    }
+    migrate_set_state(&ms->state, MIGRATION_STATUS_SETUP, status);
     return ret;
 }
 

From 73a8912b8aeed1c992e3f9cb4880e9d1edb935de Mon Sep 17 00:00:00 2001
From: Liang Li <liang.z.li@intel.com>
Date: Thu, 5 May 2016 15:32:51 +0800
Subject: [PATCH 04/13] migration: Fix multi-thread compression bug

Recently, a bug related to multiple thread compression feature for
live migration is reported. The destination side will be blocked
during live migration if there are heavy workload in host and
memory intensive workload in guest, this is most likely to happen
when there is one decompression thread.

Some parts of the decompression code are incorrect:
1. The main thread receives data from source side will enter a busy
loop to wait for a free decompression thread.
2. A lock is needed to protect the decomp_param[idx]->start, because
it is checked in the main thread and is updated in the decompression
thread.

Fix these two issues by following the code pattern for compression.

Signed-off-by: Liang Li <liang.z.li@intel.com>
Reported-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Daniel P. Berrange <berrange@redhat.com>

Signed-off-by: Liang Li <liang.z.li@intel.com>
Message-Id: <1462433579-13691-2-git-send-email-liang.z.li@intel.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/ram.c | 40 ++++++++++++++++++++++++++++------------
 1 file changed, 28 insertions(+), 12 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 42fb8ac6d6..f3fe6c7aae 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -265,6 +265,7 @@ typedef struct CompressParam CompressParam;
 
 struct DecompressParam {
     bool start;
+    bool done;
     QemuMutex mutex;
     QemuCond cond;
     void *des;
@@ -289,6 +290,8 @@ static bool quit_comp_thread;
 static bool quit_decomp_thread;
 static DecompressParam *decomp_param;
 static QemuThread *decompress_threads;
+static QemuMutex decomp_done_lock;
+static QemuCond decomp_done_cond;
 
 static int do_compress_ram_page(CompressParam *param);
 
@@ -834,6 +837,7 @@ static inline void start_compression(CompressParam *param)
 
 static inline void start_decompression(DecompressParam *param)
 {
+    param->done = false;
     qemu_mutex_lock(&param->mutex);
     param->start = true;
     qemu_cond_signal(&param->cond);
@@ -2196,19 +2200,24 @@ static void *do_data_decompress(void *opaque)
         qemu_mutex_lock(&param->mutex);
         while (!param->start && !quit_decomp_thread) {
             qemu_cond_wait(&param->cond, &param->mutex);
-            pagesize = TARGET_PAGE_SIZE;
-            if (!quit_decomp_thread) {
-                /* uncompress() will return failed in some case, especially
-                 * when the page is dirted when doing the compression, it's
-                 * not a problem because the dirty page will be retransferred
-                 * and uncompress() won't break the data in other pages.
-                 */
-                uncompress((Bytef *)param->des, &pagesize,
-                           (const Bytef *)param->compbuf, param->len);
-            }
-            param->start = false;
         }
+        if (!quit_decomp_thread) {
+            pagesize = TARGET_PAGE_SIZE;
+            /* uncompress() will return failed in some case, especially
+             * when the page is dirted when doing the compression, it's
+             * not a problem because the dirty page will be retransferred
+             * and uncompress() won't break the data in other pages.
+             */
+            uncompress((Bytef *)param->des, &pagesize,
+                       (const Bytef *)param->compbuf, param->len);
+        }
+        param->start = false;
         qemu_mutex_unlock(&param->mutex);
+
+        qemu_mutex_lock(&decomp_done_lock);
+        param->done = true;
+        qemu_cond_signal(&decomp_done_cond);
+        qemu_mutex_unlock(&decomp_done_lock);
     }
 
     return NULL;
@@ -2222,10 +2231,13 @@ void migrate_decompress_threads_create(void)
     decompress_threads = g_new0(QemuThread, thread_count);
     decomp_param = g_new0(DecompressParam, thread_count);
     quit_decomp_thread = false;
+    qemu_mutex_init(&decomp_done_lock);
+    qemu_cond_init(&decomp_done_cond);
     for (i = 0; i < thread_count; i++) {
         qemu_mutex_init(&decomp_param[i].mutex);
         qemu_cond_init(&decomp_param[i].cond);
         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
+        decomp_param[i].done = true;
         qemu_thread_create(decompress_threads + i, "decompress",
                            do_data_decompress, decomp_param + i,
                            QEMU_THREAD_JOINABLE);
@@ -2261,9 +2273,10 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
     int idx, thread_count;
 
     thread_count = migrate_decompress_threads();
+    qemu_mutex_lock(&decomp_done_lock);
     while (true) {
         for (idx = 0; idx < thread_count; idx++) {
-            if (!decomp_param[idx].start) {
+            if (decomp_param[idx].done) {
                 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
                 decomp_param[idx].des = host;
                 decomp_param[idx].len = len;
@@ -2273,8 +2286,11 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
         }
         if (idx < thread_count) {
             break;
+        } else {
+            qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
         }
     }
+    qemu_mutex_unlock(&decomp_done_lock);
 }
 
 /*

From 5533b2e9bcd222e37ca6c2ff06e79adf9bf036bf Mon Sep 17 00:00:00 2001
From: Liang Li <liang.z.li@intel.com>
Date: Thu, 5 May 2016 15:32:52 +0800
Subject: [PATCH 05/13] migration: Fix a potential issue

At the end of live migration and before vm_start() on the destination
side, we should make sure all the decompression tasks are finished, if
this can not be guaranteed, the VM may get the incorrect memory data,
or the updated memory may be overwritten by the decompression thread.
Add the code to fix this potential issue.

Suggested-by: David Alan Gilbert <dgilbert@redhat.com>
Suggested-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Liang Li <liang.z.li@intel.com>
Message-Id: <1462433579-13691-3-git-send-email-liang.z.li@intel.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/ram.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/migration/ram.c b/migration/ram.c
index f3fe6c7aae..5ccc06840c 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2223,6 +2223,24 @@ static void *do_data_decompress(void *opaque)
     return NULL;
 }
 
+static void wait_for_decompress_done(void)
+{
+    int idx, thread_count;
+
+    if (!migrate_use_compression()) {
+        return;
+    }
+
+    thread_count = migrate_decompress_threads();
+    qemu_mutex_lock(&decomp_done_lock);
+    for (idx = 0; idx < thread_count; idx++) {
+        while (!decomp_param[idx].done) {
+            qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
+        }
+    }
+    qemu_mutex_unlock(&decomp_done_lock);
+}
+
 void migrate_decompress_threads_create(void)
 {
     int i, thread_count;
@@ -2557,6 +2575,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
         }
     }
 
+    wait_for_decompress_done();
     rcu_read_unlock();
     DPRINTF("Completed load of VM with exit code %d seq iteration "
             "%" PRIu64 "\n", ret, seq_iter);

From e7bb92e21a47c97dc477dc0dd49acf8ee0e633ad Mon Sep 17 00:00:00 2001
From: Liang Li <liang.z.li@intel.com>
Date: Thu, 5 May 2016 15:32:53 +0800
Subject: [PATCH 06/13] migration: remove useless code

page_buffer is set twice repeatedly, remove the previous set.

Signed-off-by: Liang Li <liang.z.li@intel.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1462433579-13691-4-git-send-email-liang.z.li@intel.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/ram.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/migration/ram.c b/migration/ram.c
index 5ccc06840c..6416e60412 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2359,7 +2359,6 @@ static int ram_load_postcopy(QEMUFile *f)
                 ret = -EINVAL;
                 break;
             }
-            page_buffer = host;
             /*
              * Postcopy requires that we place whole host pages atomically.
              * To make it atomic, the data is read into a temporary page

From b3be28969b797b27d7f7f806827e9898e4ee08f0 Mon Sep 17 00:00:00 2001
From: Liang Li <liang.z.li@intel.com>
Date: Thu, 5 May 2016 15:32:54 +0800
Subject: [PATCH 07/13] qemu-file: Fix qemu_put_compression_data flaw

Current qemu_put_compression_data can only work with no writable
QEMUFile, and can't work with the writable QEMUFile. But it does
not provide any measure to prevent users from using it with a
writable QEMUFile.

We should fix this flaw to make it works with writable QEMUFile.

Signed-off-by: Liang Li <liang.z.li@intel.com>
Suggested-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1462433579-13691-5-git-send-email-liang.z.li@intel.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/qemu-file.c | 23 +++++++++++++++++++++--
 migration/ram.c       | 18 +++++++++++++-----
 2 files changed, 34 insertions(+), 7 deletions(-)

diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index 8aea1c7094..bbc565eb53 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -615,8 +615,14 @@ uint64_t qemu_get_be64(QEMUFile *f)
     return v;
 }
 
-/* compress size bytes of data start at p with specific compression
+/* Compress size bytes of data start at p with specific compression
  * level and store the compressed data to the buffer of f.
+ *
+ * When f is not writable, return -1 if f has no space to save the
+ * compressed data.
+ * When f is wirtable and it has no space to save the compressed data,
+ * do fflush first, if f still has no space to save the compressed
+ * data, return -1.
  */
 
 ssize_t qemu_put_compression_data(QEMUFile *f, const uint8_t *p, size_t size,
@@ -625,7 +631,14 @@ ssize_t qemu_put_compression_data(QEMUFile *f, const uint8_t *p, size_t size,
     ssize_t blen = IO_BUF_SIZE - f->buf_index - sizeof(int32_t);
 
     if (blen < compressBound(size)) {
-        return 0;
+        if (!qemu_file_is_writable(f)) {
+            return -1;
+        }
+        qemu_fflush(f);
+        blen = IO_BUF_SIZE - sizeof(int32_t);
+        if (blen < compressBound(size)) {
+            return -1;
+        }
     }
     if (compress2(f->buf + f->buf_index + sizeof(int32_t), (uLongf *)&blen,
                   (Bytef *)p, size, level) != Z_OK) {
@@ -633,7 +646,13 @@ ssize_t qemu_put_compression_data(QEMUFile *f, const uint8_t *p, size_t size,
         return 0;
     }
     qemu_put_be32(f, blen);
+    if (f->ops->writev_buffer) {
+        add_to_iovec(f, f->buf + f->buf_index, blen);
+    }
     f->buf_index += blen;
+    if (f->buf_index == IO_BUF_SIZE) {
+        qemu_fflush(f);
+    }
     return blen + sizeof(int32_t);
 }
 
diff --git a/migration/ram.c b/migration/ram.c
index 6416e60412..d30c923195 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -821,7 +821,13 @@ static int do_compress_ram_page(CompressParam *param)
                                   RAM_SAVE_FLAG_COMPRESS_PAGE);
     blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
                                      migrate_compress_level());
-    bytes_sent += blen;
+    if (blen < 0) {
+        bytes_sent = 0;
+        qemu_file_set_error(migrate_get_current()->to_dst_file, blen);
+        error_report("compressed data failed!");
+    } else {
+        bytes_sent += blen;
+    }
 
     return bytes_sent;
 }
@@ -965,10 +971,12 @@ static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss,
                  * first page is sent out before other pages
                  */
                 bytes_xmit = do_compress_ram_page(&comp_param[0]);
-                acct_info.norm_pages++;
-                qemu_put_qemu_file(f, comp_param[0].file);
-                *bytes_transferred += bytes_xmit;
-                pages = 1;
+                if (bytes_xmit > 0) {
+                    acct_info.norm_pages++;
+                    qemu_put_qemu_file(f, comp_param[0].file);
+                    *bytes_transferred += bytes_xmit;
+                    pages = 1;
+                }
             }
         } else {
             pages = save_zero_page(f, block, offset, p, bytes_transferred);

From fc50438ed0b7106542048d70686ee4b1c340ea49 Mon Sep 17 00:00:00 2001
From: Liang Li <liang.z.li@intel.com>
Date: Thu, 5 May 2016 15:32:55 +0800
Subject: [PATCH 08/13] migration: refine ram_save_compressed_page

Use qemu_put_compression_data to do the compression directly
instead of using do_compress_ram_page, avoid some data copy.
very small improvement, at the same time, add code to check
if the compression is successful.

Signed-off-by: Liang Li <liang.z.li@intel.com>
Message-Id: <1462433579-13691-6-git-send-email-liang.z.li@intel.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/ram.c | 27 +++++++++++++--------------
 1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index d30c923195..9e4f5e5efc 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -929,24 +929,20 @@ static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss,
                                     uint64_t *bytes_transferred)
 {
     int pages = -1;
-    uint64_t bytes_xmit;
+    uint64_t bytes_xmit = 0;
     uint8_t *p;
-    int ret;
+    int ret, blen;
     RAMBlock *block = pss->block;
     ram_addr_t offset = pss->offset;
 
     p = block->host + offset;
 
-    bytes_xmit = 0;
     ret = ram_control_save_page(f, block->offset,
                                 offset, TARGET_PAGE_SIZE, &bytes_xmit);
     if (bytes_xmit) {
         *bytes_transferred += bytes_xmit;
         pages = 1;
     }
-    if (block == last_sent_block) {
-        offset |= RAM_SAVE_FLAG_CONTINUE;
-    }
     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
         if (ret != RAM_SAVE_CONTROL_DELAYED) {
             if (bytes_xmit > 0) {
@@ -966,19 +962,22 @@ static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss,
             flush_compressed_data(f);
             pages = save_zero_page(f, block, offset, p, bytes_transferred);
             if (pages == -1) {
-                set_compress_params(&comp_param[0], block, offset);
-                /* Use the qemu thread to compress the data to make sure the
-                 * first page is sent out before other pages
-                 */
-                bytes_xmit = do_compress_ram_page(&comp_param[0]);
-                if (bytes_xmit > 0) {
+                /* Make sure the first page is sent out before other pages */
+                bytes_xmit = save_page_header(f, block, offset |
+                                              RAM_SAVE_FLAG_COMPRESS_PAGE);
+                blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
+                                                 migrate_compress_level());
+                if (blen > 0) {
+                    *bytes_transferred += bytes_xmit + blen;
                     acct_info.norm_pages++;
-                    qemu_put_qemu_file(f, comp_param[0].file);
-                    *bytes_transferred += bytes_xmit;
                     pages = 1;
+                } else {
+                    qemu_file_set_error(f, blen);
+                    error_report("compressed data failed!");
                 }
             }
         } else {
+            offset |= RAM_SAVE_FLAG_CONTINUE;
             pages = save_zero_page(f, block, offset, p, bytes_transferred);
             if (pages == -1) {
                 pages = compress_page_with_multi_thread(f, block, offset,

From 90e56fb46d0a7add88ed463efa4e723a6238f692 Mon Sep 17 00:00:00 2001
From: Liang Li <liang.z.li@intel.com>
Date: Thu, 5 May 2016 15:32:56 +0800
Subject: [PATCH 09/13] migration: protect the quit flag by lock

quit_comp_thread and quit_decomp_thread are accessed by several
thread, it's better to protect them with locks. We use a per
thread flag to replace the global one, and the new flag is protected
by a lock.

Signed-off-by: Liang Li <liang.z.li@intel.com>
Message-Id: <1462433579-13691-7-git-send-email-liang.z.li@intel.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/ram.c | 32 ++++++++++++++++----------------
 1 file changed, 16 insertions(+), 16 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 9e4f5e5efc..a5ed21b92a 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -255,6 +255,7 @@ static struct BitmapRcu {
 struct CompressParam {
     bool start;
     bool done;
+    bool quit;
     QEMUFile *file;
     QemuMutex mutex;
     QemuCond cond;
@@ -266,6 +267,7 @@ typedef struct CompressParam CompressParam;
 struct DecompressParam {
     bool start;
     bool done;
+    bool quit;
     QemuMutex mutex;
     QemuCond cond;
     void *des;
@@ -286,8 +288,6 @@ static QemuCond *comp_done_cond;
 static const QEMUFileOps empty_ops = { };
 
 static bool compression_switch;
-static bool quit_comp_thread;
-static bool quit_decomp_thread;
 static DecompressParam *decomp_param;
 static QemuThread *decompress_threads;
 static QemuMutex decomp_done_lock;
@@ -299,18 +299,18 @@ static void *do_data_compress(void *opaque)
 {
     CompressParam *param = opaque;
 
-    while (!quit_comp_thread) {
+    while (!param->quit) {
         qemu_mutex_lock(&param->mutex);
-        /* Re-check the quit_comp_thread in case of
+        /* Re-check the quit flag in case of
          * terminate_compression_threads is called just before
          * qemu_mutex_lock(&param->mutex) and after
-         * while(!quit_comp_thread), re-check it here can make
+         * while(!param->quit), re-check it here can make
          * sure the compression thread terminate as expected.
          */
-        while (!param->start && !quit_comp_thread) {
+        while (!param->start && !param->quit) {
             qemu_cond_wait(&param->cond, &param->mutex);
         }
-        if (!quit_comp_thread) {
+        if (!param->quit) {
             do_compress_ram_page(param);
         }
         param->start = false;
@@ -330,9 +330,9 @@ static inline void terminate_compression_threads(void)
     int idx, thread_count;
 
     thread_count = migrate_compress_threads();
-    quit_comp_thread = true;
     for (idx = 0; idx < thread_count; idx++) {
         qemu_mutex_lock(&comp_param[idx].mutex);
+        comp_param[idx].quit = true;
         qemu_cond_signal(&comp_param[idx].cond);
         qemu_mutex_unlock(&comp_param[idx].mutex);
     }
@@ -372,7 +372,6 @@ void migrate_compress_threads_create(void)
     if (!migrate_use_compression()) {
         return;
     }
-    quit_comp_thread = false;
     compression_switch = true;
     thread_count = migrate_compress_threads();
     compress_threads = g_new0(QemuThread, thread_count);
@@ -387,6 +386,7 @@ void migrate_compress_threads_create(void)
          */
         comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
         comp_param[i].done = true;
+        comp_param[i].quit = false;
         qemu_mutex_init(&comp_param[i].mutex);
         qemu_cond_init(&comp_param[i].cond);
         qemu_thread_create(compress_threads + i, "compress",
@@ -863,12 +863,12 @@ static void flush_compressed_data(QEMUFile *f)
     for (idx = 0; idx < thread_count; idx++) {
         if (!comp_param[idx].done) {
             qemu_mutex_lock(comp_done_lock);
-            while (!comp_param[idx].done && !quit_comp_thread) {
+            while (!comp_param[idx].done && !comp_param[idx].quit) {
                 qemu_cond_wait(comp_done_cond, comp_done_lock);
             }
             qemu_mutex_unlock(comp_done_lock);
         }
-        if (!quit_comp_thread) {
+        if (!comp_param[idx].quit) {
             len = qemu_put_qemu_file(f, comp_param[idx].file);
             bytes_transferred += len;
         }
@@ -2203,12 +2203,12 @@ static void *do_data_decompress(void *opaque)
     DecompressParam *param = opaque;
     unsigned long pagesize;
 
-    while (!quit_decomp_thread) {
+    while (!param->quit) {
         qemu_mutex_lock(&param->mutex);
-        while (!param->start && !quit_decomp_thread) {
+        while (!param->start && !param->quit) {
             qemu_cond_wait(&param->cond, &param->mutex);
         }
-        if (!quit_decomp_thread) {
+        if (!param->quit) {
             pagesize = TARGET_PAGE_SIZE;
             /* uncompress() will return failed in some case, especially
              * when the page is dirted when doing the compression, it's
@@ -2255,7 +2255,6 @@ void migrate_decompress_threads_create(void)
     thread_count = migrate_decompress_threads();
     decompress_threads = g_new0(QemuThread, thread_count);
     decomp_param = g_new0(DecompressParam, thread_count);
-    quit_decomp_thread = false;
     qemu_mutex_init(&decomp_done_lock);
     qemu_cond_init(&decomp_done_cond);
     for (i = 0; i < thread_count; i++) {
@@ -2263,6 +2262,7 @@ void migrate_decompress_threads_create(void)
         qemu_cond_init(&decomp_param[i].cond);
         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
         decomp_param[i].done = true;
+        decomp_param[i].quit = false;
         qemu_thread_create(decompress_threads + i, "decompress",
                            do_data_decompress, decomp_param + i,
                            QEMU_THREAD_JOINABLE);
@@ -2273,10 +2273,10 @@ void migrate_decompress_threads_join(void)
 {
     int i, thread_count;
 
-    quit_decomp_thread = true;
     thread_count = migrate_decompress_threads();
     for (i = 0; i < thread_count; i++) {
         qemu_mutex_lock(&decomp_param[i].mutex);
+        decomp_param[i].quit = true;
         qemu_cond_signal(&decomp_param[i].cond);
         qemu_mutex_unlock(&decomp_param[i].mutex);
     }

From a7a9a88f9d29da125b0958f3bd1b15182dc94f5f Mon Sep 17 00:00:00 2001
From: Liang Li <liang.z.li@intel.com>
Date: Thu, 5 May 2016 15:32:57 +0800
Subject: [PATCH 10/13] migration: refine the compression code

The current code for multi-thread compression is not clear,
especially in the aspect of using lock. Refine the code
to make it clear.

Signed-off-by: Liang Li <liang.z.li@intel.com>
Message-Id: <1462433579-13691-8-git-send-email-liang.z.li@intel.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/ram.c | 84 +++++++++++++++++++++++--------------------------
 1 file changed, 40 insertions(+), 44 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index a5ed21b92a..59473d98c7 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -253,7 +253,6 @@ static struct BitmapRcu {
 } *migration_bitmap_rcu;
 
 struct CompressParam {
-    bool start;
     bool done;
     bool quit;
     QEMUFile *file;
@@ -293,34 +292,36 @@ static QemuThread *decompress_threads;
 static QemuMutex decomp_done_lock;
 static QemuCond decomp_done_cond;
 
-static int do_compress_ram_page(CompressParam *param);
+static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
+                                ram_addr_t offset);
 
 static void *do_data_compress(void *opaque)
 {
     CompressParam *param = opaque;
+    RAMBlock *block;
+    ram_addr_t offset;
 
+    qemu_mutex_lock(&param->mutex);
     while (!param->quit) {
-        qemu_mutex_lock(&param->mutex);
-        /* Re-check the quit flag in case of
-         * terminate_compression_threads is called just before
-         * qemu_mutex_lock(&param->mutex) and after
-         * while(!param->quit), re-check it here can make
-         * sure the compression thread terminate as expected.
-         */
-        while (!param->start && !param->quit) {
+        if (param->block) {
+            block = param->block;
+            offset = param->offset;
+            param->block = NULL;
+            qemu_mutex_unlock(&param->mutex);
+
+            do_compress_ram_page(param->file, block, offset);
+
+            qemu_mutex_lock(comp_done_lock);
+            param->done = true;
+            qemu_cond_signal(comp_done_cond);
+            qemu_mutex_unlock(comp_done_lock);
+
+            qemu_mutex_lock(&param->mutex);
+        } else {
             qemu_cond_wait(&param->cond, &param->mutex);
         }
-        if (!param->quit) {
-            do_compress_ram_page(param);
-        }
-        param->start = false;
-        qemu_mutex_unlock(&param->mutex);
-
-        qemu_mutex_lock(comp_done_lock);
-        param->done = true;
-        qemu_cond_signal(comp_done_cond);
-        qemu_mutex_unlock(comp_done_lock);
     }
+    qemu_mutex_unlock(&param->mutex);
 
     return NULL;
 }
@@ -808,18 +809,15 @@ static int ram_save_page(QEMUFile *f, PageSearchStatus *pss,
     return pages;
 }
 
-static int do_compress_ram_page(CompressParam *param)
+static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
+                                ram_addr_t offset)
 {
     int bytes_sent, blen;
-    uint8_t *p;
-    RAMBlock *block = param->block;
-    ram_addr_t offset = param->offset;
+    uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
 
-    p = block->host + (offset & TARGET_PAGE_MASK);
-
-    bytes_sent = save_page_header(param->file, block, offset |
+    bytes_sent = save_page_header(f, block, offset |
                                   RAM_SAVE_FLAG_COMPRESS_PAGE);
-    blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
+    blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
                                      migrate_compress_level());
     if (blen < 0) {
         bytes_sent = 0;
@@ -832,15 +830,6 @@ static int do_compress_ram_page(CompressParam *param)
     return bytes_sent;
 }
 
-static inline void start_compression(CompressParam *param)
-{
-    param->done = false;
-    qemu_mutex_lock(&param->mutex);
-    param->start = true;
-    qemu_cond_signal(&param->cond);
-    qemu_mutex_unlock(&param->mutex);
-}
-
 static inline void start_decompression(DecompressParam *param)
 {
     param->done = false;
@@ -860,18 +849,22 @@ static void flush_compressed_data(QEMUFile *f)
         return;
     }
     thread_count = migrate_compress_threads();
+
+    qemu_mutex_lock(comp_done_lock);
     for (idx = 0; idx < thread_count; idx++) {
-        if (!comp_param[idx].done) {
-            qemu_mutex_lock(comp_done_lock);
-            while (!comp_param[idx].done && !comp_param[idx].quit) {
-                qemu_cond_wait(comp_done_cond, comp_done_lock);
-            }
-            qemu_mutex_unlock(comp_done_lock);
+        while (!comp_param[idx].done) {
+            qemu_cond_wait(comp_done_cond, comp_done_lock);
         }
+    }
+    qemu_mutex_unlock(comp_done_lock);
+
+    for (idx = 0; idx < thread_count; idx++) {
+        qemu_mutex_lock(&comp_param[idx].mutex);
         if (!comp_param[idx].quit) {
             len = qemu_put_qemu_file(f, comp_param[idx].file);
             bytes_transferred += len;
         }
+        qemu_mutex_unlock(&comp_param[idx].mutex);
     }
 }
 
@@ -893,9 +886,12 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
     while (true) {
         for (idx = 0; idx < thread_count; idx++) {
             if (comp_param[idx].done) {
+                comp_param[idx].done = false;
                 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
+                qemu_mutex_lock(&comp_param[idx].mutex);
                 set_compress_params(&comp_param[idx], block, offset);
-                start_compression(&comp_param[idx]);
+                qemu_cond_signal(&comp_param[idx].cond);
+                qemu_mutex_unlock(&comp_param[idx].mutex);
                 pages = 1;
                 acct_info.norm_pages++;
                 *bytes_transferred += bytes_xmit;

From 33d151f4188a40faee224aba1c7b9ad7b1568eb4 Mon Sep 17 00:00:00 2001
From: Liang Li <liang.z.li@intel.com>
Date: Thu, 5 May 2016 15:32:58 +0800
Subject: [PATCH 11/13] migration: refine the decompression code

The current code for multi-thread decompression is not clear,
especially in the aspect of using lock. Refine the code
to make it clear.

Signed-off-by: Liang Li <liang.z.li@intel.com>
Message-Id: <1462433579-13691-9-git-send-email-liang.z.li@intel.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/ram.c | 50 ++++++++++++++++++++++++-------------------------
 1 file changed, 25 insertions(+), 25 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 59473d98c7..a44b4f0091 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -264,7 +264,6 @@ struct CompressParam {
 typedef struct CompressParam CompressParam;
 
 struct DecompressParam {
-    bool start;
     bool done;
     bool quit;
     QemuMutex mutex;
@@ -830,15 +829,6 @@ static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
     return bytes_sent;
 }
 
-static inline void start_decompression(DecompressParam *param)
-{
-    param->done = false;
-    qemu_mutex_lock(&param->mutex);
-    param->start = true;
-    qemu_cond_signal(&param->cond);
-    qemu_mutex_unlock(&param->mutex);
-}
-
 static uint64_t bytes_transferred;
 
 static void flush_compressed_data(QEMUFile *f)
@@ -2198,30 +2188,37 @@ static void *do_data_decompress(void *opaque)
 {
     DecompressParam *param = opaque;
     unsigned long pagesize;
+    uint8_t *des;
+    int len;
 
+    qemu_mutex_lock(&param->mutex);
     while (!param->quit) {
-        qemu_mutex_lock(&param->mutex);
-        while (!param->start && !param->quit) {
-            qemu_cond_wait(&param->cond, &param->mutex);
-        }
-        if (!param->quit) {
+        if (param->des) {
+            des = param->des;
+            len = param->len;
+            param->des = 0;
+            qemu_mutex_unlock(&param->mutex);
+
             pagesize = TARGET_PAGE_SIZE;
             /* uncompress() will return failed in some case, especially
              * when the page is dirted when doing the compression, it's
              * not a problem because the dirty page will be retransferred
              * and uncompress() won't break the data in other pages.
              */
-            uncompress((Bytef *)param->des, &pagesize,
-                       (const Bytef *)param->compbuf, param->len);
-        }
-        param->start = false;
-        qemu_mutex_unlock(&param->mutex);
+            uncompress((Bytef *)des, &pagesize,
+                       (const Bytef *)param->compbuf, len);
 
-        qemu_mutex_lock(&decomp_done_lock);
-        param->done = true;
-        qemu_cond_signal(&decomp_done_cond);
-        qemu_mutex_unlock(&decomp_done_lock);
+            qemu_mutex_lock(&decomp_done_lock);
+            param->done = true;
+            qemu_cond_signal(&decomp_done_cond);
+            qemu_mutex_unlock(&decomp_done_lock);
+
+            qemu_mutex_lock(&param->mutex);
+        } else {
+            qemu_cond_wait(&param->cond, &param->mutex);
+        }
     }
+    qemu_mutex_unlock(&param->mutex);
 
     return NULL;
 }
@@ -2298,10 +2295,13 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
     while (true) {
         for (idx = 0; idx < thread_count; idx++) {
             if (decomp_param[idx].done) {
+                decomp_param[idx].done = false;
+                qemu_mutex_lock(&decomp_param[idx].mutex);
                 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
                 decomp_param[idx].des = host;
                 decomp_param[idx].len = len;
-                start_decompression(&decomp_param[idx]);
+                qemu_cond_signal(&decomp_param[idx].cond);
+                qemu_mutex_unlock(&decomp_param[idx].mutex);
                 break;
             }
         }

From 0d9f9a5c5237c7c9241b38769a2d06959c943f8b Mon Sep 17 00:00:00 2001
From: Liang Li <liang.z.li@intel.com>
Date: Thu, 5 May 2016 15:32:59 +0800
Subject: [PATCH 12/13] migration: code clean up

Use 'QemuMutex comp_done_lock' and 'QemuCond comp_done_cond' instead
of 'QemuMutex *comp_done_lock' and 'QemuCond comp_done_cond'. To keep
consistent with 'QemuMutex decomp_done_lock' and
'QemuCond comp_done_cond'.

Signed-off-by: Liang Li <liang.z.li@intel.com>
Message-Id: <1462433579-13691-10-git-send-email-liang.z.li@intel.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 migration/ram.c | 36 +++++++++++++++---------------------
 1 file changed, 15 insertions(+), 21 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index a44b4f0091..815bc0e11a 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -280,8 +280,8 @@ static QemuThread *compress_threads;
  * one of the compression threads has finished the compression.
  * comp_done_lock is used to co-work with comp_done_cond.
  */
-static QemuMutex *comp_done_lock;
-static QemuCond *comp_done_cond;
+static QemuMutex comp_done_lock;
+static QemuCond comp_done_cond;
 /* The empty QEMUFileOps will be used by file in CompressParam */
 static const QEMUFileOps empty_ops = { };
 
@@ -310,10 +310,10 @@ static void *do_data_compress(void *opaque)
 
             do_compress_ram_page(param->file, block, offset);
 
-            qemu_mutex_lock(comp_done_lock);
+            qemu_mutex_lock(&comp_done_lock);
             param->done = true;
-            qemu_cond_signal(comp_done_cond);
-            qemu_mutex_unlock(comp_done_lock);
+            qemu_cond_signal(&comp_done_cond);
+            qemu_mutex_unlock(&comp_done_lock);
 
             qemu_mutex_lock(&param->mutex);
         } else {
@@ -353,16 +353,12 @@ void migrate_compress_threads_join(void)
         qemu_mutex_destroy(&comp_param[i].mutex);
         qemu_cond_destroy(&comp_param[i].cond);
     }
-    qemu_mutex_destroy(comp_done_lock);
-    qemu_cond_destroy(comp_done_cond);
+    qemu_mutex_destroy(&comp_done_lock);
+    qemu_cond_destroy(&comp_done_cond);
     g_free(compress_threads);
     g_free(comp_param);
-    g_free(comp_done_cond);
-    g_free(comp_done_lock);
     compress_threads = NULL;
     comp_param = NULL;
-    comp_done_cond = NULL;
-    comp_done_lock = NULL;
 }
 
 void migrate_compress_threads_create(void)
@@ -376,10 +372,8 @@ void migrate_compress_threads_create(void)
     thread_count = migrate_compress_threads();
     compress_threads = g_new0(QemuThread, thread_count);
     comp_param = g_new0(CompressParam, thread_count);
-    comp_done_cond = g_new0(QemuCond, 1);
-    comp_done_lock = g_new0(QemuMutex, 1);
-    qemu_cond_init(comp_done_cond);
-    qemu_mutex_init(comp_done_lock);
+    qemu_cond_init(&comp_done_cond);
+    qemu_mutex_init(&comp_done_lock);
     for (i = 0; i < thread_count; i++) {
         /* com_param[i].file is just used as a dummy buffer to save data, set
          * it's ops to empty.
@@ -840,13 +834,13 @@ static void flush_compressed_data(QEMUFile *f)
     }
     thread_count = migrate_compress_threads();
 
-    qemu_mutex_lock(comp_done_lock);
+    qemu_mutex_lock(&comp_done_lock);
     for (idx = 0; idx < thread_count; idx++) {
         while (!comp_param[idx].done) {
-            qemu_cond_wait(comp_done_cond, comp_done_lock);
+            qemu_cond_wait(&comp_done_cond, &comp_done_lock);
         }
     }
-    qemu_mutex_unlock(comp_done_lock);
+    qemu_mutex_unlock(&comp_done_lock);
 
     for (idx = 0; idx < thread_count; idx++) {
         qemu_mutex_lock(&comp_param[idx].mutex);
@@ -872,7 +866,7 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
     int idx, thread_count, bytes_xmit = -1, pages = -1;
 
     thread_count = migrate_compress_threads();
-    qemu_mutex_lock(comp_done_lock);
+    qemu_mutex_lock(&comp_done_lock);
     while (true) {
         for (idx = 0; idx < thread_count; idx++) {
             if (comp_param[idx].done) {
@@ -891,10 +885,10 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
         if (pages > 0) {
             break;
         } else {
-            qemu_cond_wait(comp_done_cond, comp_done_lock);
+            qemu_cond_wait(&comp_done_cond, &comp_done_lock);
         }
     }
-    qemu_mutex_unlock(comp_done_lock);
+    qemu_mutex_unlock(&comp_done_lock);
 
     return pages;
 }

From 0794d8895ef8d1bd6db528e5d9ca19b86297803b Mon Sep 17 00:00:00 2001
From: Amit Shah <amit.shah@redhat.com>
Date: Fri, 17 Jun 2016 17:46:39 +0530
Subject: [PATCH 13/13] vmstate-static-checker: fix size mismatch detection in
 unused fields

If a field changed from something to unused, the checker wasn't flagging
if the field size mismatched.  This was noticed in:

http://thread.gmane.org/gmane.comp.emulators.qemu/419802

where the 4->1 size change along with field name change to 'unused'
wasn't being flagged.  Fix this.

Signed-off-by: Amit Shah <amit.shah@redhat.com>
Message-Id: <d7ec03a9b2edfa0616764887a51ba8f64fdd3f68.1466165736.git.amit.shah@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
---
 scripts/vmstate-static-checker.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py
index b5ecaf644d..14a27e7f6a 100755
--- a/scripts/vmstate-static-checker.py
+++ b/scripts/vmstate-static-checker.py
@@ -185,7 +185,7 @@ def check_fields(src_fields, dest_fields, desc, sec):
             if unused_count == 0:
                 advance_dest = True
 
-        if unused_count > 0:
+        if unused_count != 0:
             if advance_dest == False:
                 unused_count = unused_count - s_item["size"]
                 if unused_count == 0: