forked from openkylin/platform_build
Merge "Generate care map after merging target_files" am: f92961248b
Original change: https://android-review.googlesource.com/c/platform/build/+/1649488 Change-Id: Ifac4a30a9fcf69a58bbe75e04089de3974906e71
This commit is contained in:
commit
79c96bf3a9
|
@ -59,12 +59,11 @@ import zipfile
|
|||
import build_image
|
||||
import build_super_image
|
||||
import common
|
||||
import rangelib
|
||||
import sparse_img
|
||||
import verity_utils
|
||||
import ota_metadata_pb2
|
||||
|
||||
from apex_utils import GetSystemApexInfoFromTargetFiles
|
||||
from common import AddCareMapForAbOta
|
||||
|
||||
if sys.hexversion < 0x02070000:
|
||||
print("Python 2.7 or newer is required.", file=sys.stderr)
|
||||
|
@ -110,45 +109,6 @@ class OutputFile(object):
|
|||
common.ZipWrite(self._output_zip, self.name, self._zip_name)
|
||||
|
||||
|
||||
def GetCareMap(which, imgname):
|
||||
"""Returns the care_map string for the given partition.
|
||||
|
||||
Args:
|
||||
which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
|
||||
imgname: The filename of the image.
|
||||
|
||||
Returns:
|
||||
(which, care_map_ranges): care_map_ranges is the raw string of the care_map
|
||||
RangeSet; or None.
|
||||
"""
|
||||
assert which in common.PARTITIONS_WITH_CARE_MAP
|
||||
|
||||
# which + "_image_size" contains the size that the actual filesystem image
|
||||
# resides in, which is all that needs to be verified. The additional blocks in
|
||||
# the image file contain verity metadata, by reading which would trigger
|
||||
# invalid reads.
|
||||
image_size = OPTIONS.info_dict.get(which + "_image_size")
|
||||
if not image_size:
|
||||
return None
|
||||
|
||||
image_blocks = int(image_size) // 4096 - 1
|
||||
assert image_blocks > 0, "blocks for {} must be positive".format(which)
|
||||
|
||||
# For sparse images, we will only check the blocks that are listed in the care
|
||||
# map, i.e. the ones with meaningful data.
|
||||
if "extfs_sparse_flag" in OPTIONS.info_dict:
|
||||
simg = sparse_img.SparseImage(imgname)
|
||||
care_map_ranges = simg.care_map.intersect(
|
||||
rangelib.RangeSet("0-{}".format(image_blocks)))
|
||||
|
||||
# Otherwise for non-sparse images, we read all the blocks in the filesystem
|
||||
# image.
|
||||
else:
|
||||
care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
|
||||
|
||||
return [which, care_map_ranges.to_string_raw()]
|
||||
|
||||
|
||||
def AddSystem(output_zip, recovery_img=None, boot_img=None):
|
||||
"""Turn the contents of SYSTEM into a system image and store it in
|
||||
output_zip. Returns the name of the system image file."""
|
||||
|
@ -179,7 +139,8 @@ def AddSystem(output_zip, recovery_img=None, boot_img=None):
|
|||
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
|
||||
boot_img, info_dict=OPTIONS.info_dict)
|
||||
|
||||
block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.map")
|
||||
block_list = OutputFile(output_zip, OPTIONS.input_tmp,
|
||||
"IMAGES", "system.map")
|
||||
CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system", img,
|
||||
block_list=block_list)
|
||||
return img.name
|
||||
|
@ -227,7 +188,8 @@ def AddVendor(output_zip, recovery_img=None, boot_img=None):
|
|||
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
|
||||
boot_img, info_dict=OPTIONS.info_dict)
|
||||
|
||||
block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map")
|
||||
block_list = OutputFile(output_zip, OPTIONS.input_tmp,
|
||||
"IMAGES", "vendor.map")
|
||||
CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "vendor", img,
|
||||
block_list=block_list)
|
||||
return img.name
|
||||
|
@ -299,6 +261,7 @@ def AddVendorDlkm(output_zip):
|
|||
block_list=block_list)
|
||||
return img.name
|
||||
|
||||
|
||||
def AddOdmDlkm(output_zip):
|
||||
"""Turn the contents of OdmDlkm into an odm_dlkm image and store it in output_zip."""
|
||||
|
||||
|
@ -350,6 +313,7 @@ def AddDtbo(output_zip):
|
|||
img.Write()
|
||||
return img.name
|
||||
|
||||
|
||||
def AddPvmfw(output_zip):
|
||||
"""Adds the pvmfw image.
|
||||
|
||||
|
@ -385,6 +349,7 @@ def AddPvmfw(output_zip):
|
|||
img.Write()
|
||||
return img.name
|
||||
|
||||
|
||||
def AddCustomImages(output_zip, partition_name):
|
||||
"""Adds and signs custom images in IMAGES/.
|
||||
|
||||
|
@ -414,7 +379,8 @@ def AddCustomImages(output_zip, partition_name):
|
|||
|
||||
for img_name in OPTIONS.info_dict.get(
|
||||
"avb_{}_image_list".format(partition_name)).split():
|
||||
custom_image = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", img_name)
|
||||
custom_image = OutputFile(
|
||||
output_zip, OPTIONS.input_tmp, "IMAGES", img_name)
|
||||
if os.path.exists(custom_image.name):
|
||||
continue
|
||||
|
||||
|
@ -644,72 +610,6 @@ def CheckAbOtaImages(output_zip, ab_partitions):
|
|||
assert available, "Failed to find " + img_name
|
||||
|
||||
|
||||
def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
|
||||
"""Generates and adds care_map.pb for a/b partition that has care_map.
|
||||
|
||||
Args:
|
||||
output_zip: The output zip file (needs to be already open), or None to
|
||||
write care_map.pb to OPTIONS.input_tmp/.
|
||||
ab_partitions: The list of A/B partitions.
|
||||
image_paths: A map from the partition name to the image path.
|
||||
"""
|
||||
care_map_list = []
|
||||
for partition in ab_partitions:
|
||||
partition = partition.strip()
|
||||
if partition not in common.PARTITIONS_WITH_CARE_MAP:
|
||||
continue
|
||||
|
||||
verity_block_device = "{}_verity_block_device".format(partition)
|
||||
avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
|
||||
if (verity_block_device in OPTIONS.info_dict or
|
||||
OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
|
||||
image_path = image_paths[partition]
|
||||
assert os.path.exists(image_path)
|
||||
|
||||
care_map = GetCareMap(partition, image_path)
|
||||
if not care_map:
|
||||
continue
|
||||
care_map_list += care_map
|
||||
|
||||
# adds fingerprint field to the care_map
|
||||
# TODO(xunchang) revisit the fingerprint calculation for care_map.
|
||||
partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
|
||||
prop_name_list = ["ro.{}.build.fingerprint".format(partition),
|
||||
"ro.{}.build.thumbprint".format(partition)]
|
||||
|
||||
present_props = [x for x in prop_name_list if
|
||||
partition_props and partition_props.GetProp(x)]
|
||||
if not present_props:
|
||||
logger.warning("fingerprint is not present for partition %s", partition)
|
||||
property_id, fingerprint = "unknown", "unknown"
|
||||
else:
|
||||
property_id = present_props[0]
|
||||
fingerprint = partition_props.GetProp(property_id)
|
||||
care_map_list += [property_id, fingerprint]
|
||||
|
||||
if not care_map_list:
|
||||
return
|
||||
|
||||
# Converts the list into proto buf message by calling care_map_generator; and
|
||||
# writes the result to a temp file.
|
||||
temp_care_map_text = common.MakeTempFile(prefix="caremap_text-",
|
||||
suffix=".txt")
|
||||
with open(temp_care_map_text, 'w') as text_file:
|
||||
text_file.write('\n'.join(care_map_list))
|
||||
|
||||
temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".pb")
|
||||
care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
|
||||
common.RunAndCheckOutput(care_map_gen_cmd)
|
||||
|
||||
care_map_path = "META/care_map.pb"
|
||||
if output_zip and care_map_path not in output_zip.namelist():
|
||||
common.ZipWrite(output_zip, temp_care_map, arcname=care_map_path)
|
||||
else:
|
||||
shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
|
||||
if output_zip:
|
||||
OPTIONS.replace_updated_files_list.append(care_map_path)
|
||||
|
||||
|
||||
def AddPackRadioImages(output_zip, images):
|
||||
"""Copies images listed in META/pack_radioimages.txt from RADIO/ to IMAGES/.
|
||||
|
||||
|
@ -791,6 +691,7 @@ def HasPartition(partition_name):
|
|||
os.path.join(OPTIONS.input_tmp, "IMAGES",
|
||||
"{}.img".format(partition_name))))
|
||||
|
||||
|
||||
def AddApexInfo(output_zip):
|
||||
apex_infos = GetSystemApexInfoFromTargetFiles(OPTIONS.input_tmp)
|
||||
apex_metadata_proto = ota_metadata_pb2.ApexMetadata()
|
||||
|
@ -1099,6 +1000,7 @@ def main(argv):
|
|||
AddImagesToTargetFiles(args[0])
|
||||
logger.info("done.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
common.CloseInheritedPipes()
|
||||
|
|
|
@ -41,6 +41,7 @@ import zipfile
|
|||
from hashlib import sha1, sha256
|
||||
|
||||
import images
|
||||
import rangelib
|
||||
import sparse_img
|
||||
from blockimgdiff import BlockImageDiff
|
||||
|
||||
|
@ -137,6 +138,7 @@ PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot']
|
|||
# existing search paths.
|
||||
RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
|
||||
|
||||
|
||||
class ErrorCode(object):
|
||||
"""Define error_codes for failures that happen during the actual
|
||||
update package installation.
|
||||
|
@ -225,6 +227,7 @@ def InitLogging():
|
|||
def SetHostToolLocation(tool_name, location):
|
||||
OPTIONS.host_tools[tool_name] = location
|
||||
|
||||
|
||||
def FindHostToolPath(tool_name):
|
||||
"""Finds the path to the host tool.
|
||||
|
||||
|
@ -245,6 +248,7 @@ def FindHostToolPath(tool_name):
|
|||
|
||||
return tool_name
|
||||
|
||||
|
||||
def Run(args, verbose=None, **kwargs):
|
||||
"""Creates and returns a subprocess.Popen object.
|
||||
|
||||
|
@ -776,7 +780,8 @@ def LoadInfoDict(input_file, repacking=False):
|
|||
for partition in PARTITIONS_WITH_BUILD_PROP:
|
||||
fingerprint = build_info.GetPartitionFingerprint(partition)
|
||||
if fingerprint:
|
||||
d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest()
|
||||
d["avb_{}_salt".format(partition)] = sha256(
|
||||
fingerprint.encode()).hexdigest()
|
||||
try:
|
||||
d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
|
||||
except KeyError:
|
||||
|
@ -784,7 +789,6 @@ def LoadInfoDict(input_file, repacking=False):
|
|||
return d
|
||||
|
||||
|
||||
|
||||
def LoadListFromFile(file_path):
|
||||
with open(file_path) as f:
|
||||
return f.read().splitlines()
|
||||
|
@ -1744,15 +1748,19 @@ def _BuildVendorBootImage(sourcedir, info_dict=None):
|
|||
if os.access(fn, os.F_OK):
|
||||
ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
|
||||
for ramdisk_fragment in ramdisk_fragments:
|
||||
fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "mkbootimg_args")
|
||||
fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
|
||||
ramdisk_fragment, "mkbootimg_args")
|
||||
cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
|
||||
fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "prebuilt_ramdisk")
|
||||
fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
|
||||
ramdisk_fragment, "prebuilt_ramdisk")
|
||||
# Use prebuilt image if found, else create ramdisk from supplied files.
|
||||
if os.access(fn, os.F_OK):
|
||||
ramdisk_fragment_pathname = fn
|
||||
else:
|
||||
ramdisk_fragment_root = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
|
||||
ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root, lz4_ramdisks=use_lz4)
|
||||
ramdisk_fragment_root = os.path.join(
|
||||
sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
|
||||
ramdisk_fragment_img = _MakeRamdisk(
|
||||
ramdisk_fragment_root, lz4_ramdisks=use_lz4)
|
||||
ramdisk_fragment_imgs.append(ramdisk_fragment_img)
|
||||
ramdisk_fragment_pathname = ramdisk_fragment_img.name
|
||||
cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
|
||||
|
@ -3683,7 +3691,8 @@ def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
|
|||
"""
|
||||
tmp_dir = MakeTempDir('boot_', suffix='.img')
|
||||
try:
|
||||
RunAndCheckOutput(['unpack_bootimg', '--boot_img', boot_img, '--out', tmp_dir])
|
||||
RunAndCheckOutput(['unpack_bootimg', '--boot_img',
|
||||
boot_img, '--out', tmp_dir])
|
||||
ramdisk = os.path.join(tmp_dir, 'ramdisk')
|
||||
if not os.path.isfile(ramdisk):
|
||||
logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
|
||||
|
@ -3711,7 +3720,8 @@ def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
|
|||
prop_file = os.path.join(extracted_ramdisk, search_path)
|
||||
if os.path.isfile(prop_file):
|
||||
return prop_file
|
||||
logger.warning('Unable to get boot image timestamp: no %s in ramdisk', search_path)
|
||||
logger.warning(
|
||||
'Unable to get boot image timestamp: no %s in ramdisk', search_path)
|
||||
|
||||
return None
|
||||
|
||||
|
@ -3744,9 +3754,116 @@ def GetBootImageTimestamp(boot_img):
|
|||
timestamp = props.GetProp('ro.bootimage.build.date.utc')
|
||||
if timestamp:
|
||||
return int(timestamp)
|
||||
logger.warning('Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
|
||||
logger.warning(
|
||||
'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
|
||||
return None
|
||||
|
||||
except ExternalError as e:
|
||||
logger.warning('Unable to get boot image timestamp: %s', e)
|
||||
return None
|
||||
|
||||
|
||||
def GetCareMap(which, imgname):
|
||||
"""Returns the care_map string for the given partition.
|
||||
|
||||
Args:
|
||||
which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP.
|
||||
imgname: The filename of the image.
|
||||
|
||||
Returns:
|
||||
(which, care_map_ranges): care_map_ranges is the raw string of the care_map
|
||||
RangeSet; or None.
|
||||
"""
|
||||
assert which in PARTITIONS_WITH_CARE_MAP
|
||||
|
||||
# which + "_image_size" contains the size that the actual filesystem image
|
||||
# resides in, which is all that needs to be verified. The additional blocks in
|
||||
# the image file contain verity metadata, by reading which would trigger
|
||||
# invalid reads.
|
||||
image_size = OPTIONS.info_dict.get(which + "_image_size")
|
||||
if not image_size:
|
||||
return None
|
||||
|
||||
image_blocks = int(image_size) // 4096 - 1
|
||||
assert image_blocks > 0, "blocks for {} must be positive".format(which)
|
||||
|
||||
# For sparse images, we will only check the blocks that are listed in the care
|
||||
# map, i.e. the ones with meaningful data.
|
||||
if "extfs_sparse_flag" in OPTIONS.info_dict:
|
||||
simg = sparse_img.SparseImage(imgname)
|
||||
care_map_ranges = simg.care_map.intersect(
|
||||
rangelib.RangeSet("0-{}".format(image_blocks)))
|
||||
|
||||
# Otherwise for non-sparse images, we read all the blocks in the filesystem
|
||||
# image.
|
||||
else:
|
||||
care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks))
|
||||
|
||||
return [which, care_map_ranges.to_string_raw()]
|
||||
|
||||
|
||||
def AddCareMapForAbOta(output_zip, ab_partitions, image_paths):
|
||||
"""Generates and adds care_map.pb for a/b partition that has care_map.
|
||||
|
||||
Args:
|
||||
output_zip: The output zip file (needs to be already open), or None to
|
||||
write care_map.pb to OPTIONS.input_tmp/.
|
||||
ab_partitions: The list of A/B partitions.
|
||||
image_paths: A map from the partition name to the image path.
|
||||
"""
|
||||
care_map_list = []
|
||||
for partition in ab_partitions:
|
||||
partition = partition.strip()
|
||||
if partition not in PARTITIONS_WITH_CARE_MAP:
|
||||
continue
|
||||
|
||||
verity_block_device = "{}_verity_block_device".format(partition)
|
||||
avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition)
|
||||
if (verity_block_device in OPTIONS.info_dict or
|
||||
OPTIONS.info_dict.get(avb_hashtree_enable) == "true"):
|
||||
image_path = image_paths[partition]
|
||||
assert os.path.exists(image_path)
|
||||
|
||||
care_map = GetCareMap(partition, image_path)
|
||||
if not care_map:
|
||||
continue
|
||||
care_map_list += care_map
|
||||
|
||||
# adds fingerprint field to the care_map
|
||||
# TODO(xunchang) revisit the fingerprint calculation for care_map.
|
||||
partition_props = OPTIONS.info_dict.get(partition + ".build.prop")
|
||||
prop_name_list = ["ro.{}.build.fingerprint".format(partition),
|
||||
"ro.{}.build.thumbprint".format(partition)]
|
||||
|
||||
present_props = [x for x in prop_name_list if
|
||||
partition_props and partition_props.GetProp(x)]
|
||||
if not present_props:
|
||||
logger.warning(
|
||||
"fingerprint is not present for partition %s", partition)
|
||||
property_id, fingerprint = "unknown", "unknown"
|
||||
else:
|
||||
property_id = present_props[0]
|
||||
fingerprint = partition_props.GetProp(property_id)
|
||||
care_map_list += [property_id, fingerprint]
|
||||
|
||||
if not care_map_list:
|
||||
return
|
||||
|
||||
# Converts the list into proto buf message by calling care_map_generator; and
|
||||
# writes the result to a temp file.
|
||||
temp_care_map_text = MakeTempFile(prefix="caremap_text-",
|
||||
suffix=".txt")
|
||||
with open(temp_care_map_text, 'w') as text_file:
|
||||
text_file.write('\n'.join(care_map_list))
|
||||
|
||||
temp_care_map = MakeTempFile(prefix="caremap-", suffix=".pb")
|
||||
care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
|
||||
RunAndCheckOutput(care_map_gen_cmd)
|
||||
|
||||
care_map_path = "META/care_map.pb"
|
||||
if output_zip and care_map_path not in output_zip.namelist():
|
||||
ZipWrite(output_zip, temp_care_map, arcname=care_map_path)
|
||||
else:
|
||||
shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path))
|
||||
if output_zip:
|
||||
OPTIONS.replace_updated_files_list.append(care_map_path)
|
||||
|
|
|
@ -103,6 +103,8 @@ import img_from_target_files
|
|||
import find_shareduid_violation
|
||||
import ota_from_target_files
|
||||
|
||||
from common import AddCareMapForAbOta, ExternalError, PARTITIONS_WITH_CARE_MAP
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
OPTIONS = common.OPTIONS
|
||||
|
@ -1092,6 +1094,8 @@ def merge_target_files(temp_dir, framework_target_files, framework_item_list,
|
|||
temp_dir)
|
||||
|
||||
# Create the IMG package from the merged target files package.
|
||||
with zipfile.ZipFile(output_zip, allowZip64=True) as zfp:
|
||||
AddCareMapForAbOta(zfp, PARTITIONS_WITH_CARE_MAP, partition_map)
|
||||
|
||||
if output_img:
|
||||
img_from_target_files.main([output_zip, output_img])
|
||||
|
@ -1164,7 +1168,8 @@ def main():
|
|||
elif o == '--vendor-target-files':
|
||||
OPTIONS.vendor_target_files = a
|
||||
elif o == '--other-item-list':
|
||||
logger.warning('--other-item-list has been renamed to --vendor-item-list')
|
||||
logger.warning(
|
||||
'--other-item-list has been renamed to --vendor-item-list')
|
||||
OPTIONS.vendor_item_list = a
|
||||
elif o == '--vendor-item-list':
|
||||
OPTIONS.vendor_item_list = a
|
||||
|
|
|
@ -21,9 +21,10 @@ import zipfile
|
|||
import common
|
||||
import test_utils
|
||||
from add_img_to_target_files import (
|
||||
AddCareMapForAbOta, AddPackRadioImages,
|
||||
AddPackRadioImages,
|
||||
CheckAbOtaImages, GetCareMap)
|
||||
from rangelib import RangeSet
|
||||
from common import AddCareMapForAbOta
|
||||
|
||||
|
||||
OPTIONS = common.OPTIONS
|
||||
|
@ -254,7 +255,8 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase):
|
|||
|
||||
care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb')
|
||||
expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "unknown",
|
||||
"unknown", 'vendor', RangeSet("0-9").to_string_raw(), "unknown",
|
||||
"unknown", 'vendor', RangeSet(
|
||||
"0-9").to_string_raw(), "unknown",
|
||||
"unknown"]
|
||||
|
||||
self._verifyCareMap(expected, care_map_file)
|
||||
|
|
Loading…
Reference in New Issue