From d861714097da3915493277a7ea39704a2b3f12ba Mon Sep 17 00:00:00 2001 From: Tao Bao Date: Wed, 30 Aug 2017 15:54:59 -0700 Subject: [PATCH] Skip checking files that have less blocks in block map. When creating ext4 images with mke2fs, it may skip allocating some blocks if they contain all zeros. As a result, there could be less blocks listed in the block map than the actual file length. For example, for a file with a length of 112200-byte (27+ blocks), the listed blocks in block.map could be '43665-43688' (24 blocks). Because some all-zero blocks are not taking actual space. The generated ext4 images are perfectly valid - kernel will figure out that data block is not allocated and writes all zeros into user buffer. However, we can't fully reconstruct a file from its block list in our Python script. Ideally this can be avoided by mounting or parsing an ext4 image directly, which is yet to be supported in our script. This CL skips checking for such files to avoid failing validate_target_files.py. Bug: 65213616 Test: validate_target_files.py passes on targets with mke2fs generated images (e.g. marlin). Change-Id: Id9cc59e345b9283844044ef94ceb5702f0ca0526 (cherry picked from commit b418c30e3a2a1cfc07bc17577fad29bdbc3301a5) --- tools/releasetools/validate_target_files.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py index 1dd31591b..0ad079841 100755 --- a/tools/releasetools/validate_target_files.py +++ b/tools/releasetools/validate_target_files.py @@ -71,8 +71,20 @@ def ValidateFileConsistency(input_zip, input_tmp): file_size = len(file_data) file_size_rounded_up = RoundUpTo4K(file_size) file_data += '\0' * (file_size_rounded_up - file_size) - file_sha1 = common.File(entry, file_data).sha1 + unpacked_file = common.File(entry, file_data) + file_size = unpacked_file.size + # block.map may contain less blocks, because mke2fs may skip allocating + # blocks if they contain all zeros. We can't reconstruct such a file from + # its block list. (Bug: 65213616) + if file_size > ranges.size() * 4096: + logging.warning( + 'Skipping %s that has less blocks: file size %d-byte,' + ' ranges %s (%d-byte)', entry, file_size, ranges, + ranges.size() * 4096) + continue + + file_sha1 = unpacked_file.sha1 assert blocks_sha1 == file_sha1, \ 'file: %s, range: %s, blocks_sha1: %s, file_sha1: %s' % ( entry, ranges, blocks_sha1, file_sha1)