Merge "Zero out blocks that may be touched by dm-verity." into mnc-dev

This commit is contained in:
Tao Bao 2015-07-10 20:44:24 +00:00 committed by Android (Google) Code Review
commit 7f8ecb7f5c
4 changed files with 73 additions and 14 deletions

View File

@ -83,6 +83,7 @@ class EmptyImage(Image):
blocksize = 4096
care_map = RangeSet()
clobbered_blocks = RangeSet()
extended = RangeSet()
total_blocks = 0
file_map = {}
def ReadRangeSet(self, ranges):
@ -119,6 +120,7 @@ class DataImage(Image):
self.total_blocks = len(self.data) / self.blocksize
self.care_map = RangeSet(data=(0, self.total_blocks))
self.clobbered_blocks = RangeSet()
self.extended = RangeSet()
zero_blocks = []
nonzero_blocks = []
@ -411,7 +413,7 @@ class BlockImageDiff(object):
elif self.version >= 3:
# take into account automatic stashing of overlapping blocks
if xf.src_ranges.overlaps(xf.tgt_ranges):
temp_stash_usage = stashed_blocks + xf.src_ranges.size();
temp_stash_usage = stashed_blocks + xf.src_ranges.size()
if temp_stash_usage > max_stashed_blocks:
max_stashed_blocks = temp_stash_usage
@ -435,7 +437,7 @@ class BlockImageDiff(object):
elif self.version >= 3:
# take into account automatic stashing of overlapping blocks
if xf.src_ranges.overlaps(xf.tgt_ranges):
temp_stash_usage = stashed_blocks + xf.src_ranges.size();
temp_stash_usage = stashed_blocks + xf.src_ranges.size()
if temp_stash_usage > max_stashed_blocks:
max_stashed_blocks = temp_stash_usage
@ -462,18 +464,17 @@ class BlockImageDiff(object):
# stash space
assert max_stashed_blocks * self.tgt.blocksize < (512 << 20)
# Zero out extended blocks as a workaround for bug 20881595.
if self.tgt.extended:
out.append("zero %s\n" % (self.tgt.extended.to_string_raw(),))
# We erase all the blocks on the partition that a) don't contain useful
# data in the new image and b) will not be touched by dm-verity.
all_tgt = RangeSet(data=(0, self.tgt.total_blocks))
if performs_read:
# if some of the original data is used, then at the end we'll
# erase all the blocks on the partition that don't contain data
# in the new image.
new_dontcare = all_tgt.subtract(self.tgt.care_map)
if new_dontcare:
out.append("erase %s\n" % (new_dontcare.to_string_raw(),))
else:
# if nothing is read (ie, this is a full OTA), then we can start
# by erasing the entire partition.
out.insert(0, "erase %s\n" % (all_tgt.to_string_raw(),))
all_tgt_minus_extended = all_tgt.subtract(self.tgt.extended)
new_dontcare = all_tgt_minus_extended.subtract(self.tgt.care_map)
if new_dontcare:
out.append("erase %s\n" % (new_dontcare.to_string_raw(),))
out.insert(0, "%d\n" % (self.version,)) # format version number
out.insert(1, str(total) + "\n")

View File

@ -1253,7 +1253,23 @@ class BlockDifference(object):
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
self.device, ranges_str,
self.tgt.TotalSha1(include_clobbered_blocks=True)))
script.Print('Verified the updated %s image.' % (partition,))
# Bug: 20881595
# Verify that extended blocks are really zeroed out.
if self.tgt.extended:
ranges_str = self.tgt.extended.to_string_raw()
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
self.device, ranges_str,
self._HashZeroBlocks(self.tgt.extended.size())))
script.Print('Verified the updated %s image.' % (partition,))
script.AppendExtra(
'else\n'
' abort("%s partition has unexpected non-zero contents after OTA '
'update");\n'
'endif;' % (partition,))
else:
script.Print('Verified the updated %s image.' % (partition,))
script.AppendExtra(
'else\n'
' abort("%s partition has unexpected contents after OTA update");\n'
@ -1286,6 +1302,15 @@ class BlockDifference(object):
return ctx.hexdigest()
def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
"""Return the hash value for all zero blocks."""
zero_block = '\x00' * 4096
ctx = sha1()
for _ in range(num_blocks):
ctx.update(zero_block)
return ctx.hexdigest()
# TODO(tbao): Due to http://b/20939131, block 0 may be changed without
# remounting R/W. Will change the checking to a finer-grained way to
# mask off those bits.

View File

@ -238,6 +238,28 @@ class RangeSet(object):
out.append(offset + p - start)
return RangeSet(data=out)
def extend(self, n):
"""Extend the RangeSet by 'n' blocks.
The lower bound is guaranteed to be non-negative.
>>> RangeSet("0-9").extend(1)
<RangeSet("0-10")>
>>> RangeSet("10-19").extend(15)
<RangeSet("0-34")>
>>> RangeSet("10-19 30-39").extend(4)
<RangeSet("6-23 26-43")>
>>> RangeSet("10-19 30-39").extend(10)
<RangeSet("0-49")>
"""
out = self
for i in range(0, len(self.data), 2):
s, e = self.data[i:i+2]
s1 = max(0, s - n)
e1 = e + n
out = out.union(RangeSet(str(s1) + "-" + str(e1-1)))
return out
if __name__ == "__main__":
import doctest

View File

@ -110,6 +110,17 @@ class SparseImage(object):
self.care_map = rangelib.RangeSet(care_data)
self.offset_index = [i[0] for i in offset_map]
# Bug: 20881595
# Introduce extended blocks as a workaround for the bug. dm-verity may
# touch blocks that are not in the care_map due to block device
# read-ahead. It will fail if such blocks contain non-zeroes. We zero out
# the extended blocks explicitly to avoid dm-verity failures. 512 blocks
# are the maximum read-ahead we configure for dm-verity block devices.
extended = self.care_map.extend(512)
all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
extended = extended.intersect(all_blocks).subtract(self.care_map)
self.extended = extended
if file_map_fn:
self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
else: