From b418c30e3a2a1cfc07bc17577fad29bdbc3301a5 Mon Sep 17 00:00:00 2001 From: Tao Bao Date: Wed, 30 Aug 2017 15:54:59 -0700 Subject: [PATCH] Skip checking files that have less blocks in block map. When creating ext4 images with mke2fs, it may skip allocating some blocks if they contain all zeros. As a result, there could be less blocks listed in the block map than the actual file length. For example, for a file with a length of 112200-byte (27+ blocks), the listed blocks in block.map could be '43665-43688' (24 blocks). Because some all-zero blocks are not taking actual space. The generated ext4 images are perfectly valid - kernel will figure out that data block is not allocated and writes all zeros into user buffer. However, we can't fully reconstruct a file from its block list in our Python script. Ideally this can be avoided by mounting or parsing an ext4 image directly, which is yet to be supported in our script. This CL skips checking for such files to avoid failing validate_target_files.py. Bug: 65213616 Test: validate_target_files.py passes on targets with mke2fs generated images (e.g. marlin). Change-Id: Id9cc59e345b9283844044ef94ceb5702f0ca0526 --- tools/releasetools/validate_target_files.py | 22 ++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py index 8ac3322c6..4b34820c8 100755 --- a/tools/releasetools/validate_target_files.py +++ b/tools/releasetools/validate_target_files.py @@ -44,8 +44,8 @@ def _GetImage(which, tmpdir): return sparse_img.SparseImage(path, mappath, clobbered_blocks) -def _CalculateFileSha1(file_name, unpacked_name, round_up=False): - """Calculate the SHA-1 for a given file. Round up its size to 4K if needed.""" +def _ReadFile(file_name, unpacked_name, round_up=False): + """Constructs and returns a File object. Rounds up its size if needed.""" def RoundUpTo4K(value): rounded_up = value + 4095 @@ -58,7 +58,7 @@ def _CalculateFileSha1(file_name, unpacked_name, round_up=False): if round_up: file_size_rounded_up = RoundUpTo4K(file_size) file_data += '\0' * (file_size_rounded_up - file_size) - return common.File(file_name, file_data).sha1 + return common.File(file_name, file_data) def ValidateFileAgainstSha1(input_tmp, file_name, file_path, expected_sha1): @@ -67,7 +67,7 @@ def ValidateFileAgainstSha1(input_tmp, file_name, file_path, expected_sha1): logging.info('Validating the SHA-1 of {}'.format(file_name)) unpacked_name = os.path.join(input_tmp, file_path) assert os.path.exists(unpacked_name) - actual_sha1 = _CalculateFileSha1(file_name, unpacked_name, False) + actual_sha1 = _ReadFile(file_name, unpacked_name, False).sha1 assert actual_sha1 == expected_sha1, \ 'SHA-1 mismatches for {}. actual {}, expected {}'.format( file_name, actual_sha1, expected_sha1) @@ -92,8 +92,20 @@ def ValidateFileConsistency(input_zip, input_tmp): # The filename under unpacked directory, such as SYSTEM/bin/sh. unpacked_name = os.path.join( input_tmp, which.upper(), entry[(len(prefix) + 1):]) - file_sha1 = _CalculateFileSha1(entry, unpacked_name, True) + unpacked_file = _ReadFile(entry, unpacked_name, True) + file_size = unpacked_file.size + # block.map may contain less blocks, because mke2fs may skip allocating + # blocks if they contain all zeros. We can't reconstruct such a file from + # its block list. (Bug: 65213616) + if file_size > ranges.size() * 4096: + logging.warning( + 'Skipping %s that has less blocks: file size %d-byte,' + ' ranges %s (%d-byte)', entry, file_size, ranges, + ranges.size() * 4096) + continue + + file_sha1 = unpacked_file.sha1 assert blocks_sha1 == file_sha1, \ 'file: %s, range: %s, blocks_sha1: %s, file_sha1: %s' % ( entry, ranges, blocks_sha1, file_sha1)