diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py index 4fe10c6ca..d7de85b90 100644 --- a/tools/releasetools/add_img_to_target_files.py +++ b/tools/releasetools/add_img_to_target_files.py @@ -59,12 +59,11 @@ import zipfile import build_image import build_super_image import common -import rangelib -import sparse_img import verity_utils import ota_metadata_pb2 from apex_utils import GetSystemApexInfoFromTargetFiles +from common import AddCareMapForAbOta if sys.hexversion < 0x02070000: print("Python 2.7 or newer is required.", file=sys.stderr) @@ -110,45 +109,6 @@ class OutputFile(object): common.ZipWrite(self._output_zip, self.name, self._zip_name) -def GetCareMap(which, imgname): - """Returns the care_map string for the given partition. - - Args: - which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP. - imgname: The filename of the image. - - Returns: - (which, care_map_ranges): care_map_ranges is the raw string of the care_map - RangeSet; or None. - """ - assert which in common.PARTITIONS_WITH_CARE_MAP - - # which + "_image_size" contains the size that the actual filesystem image - # resides in, which is all that needs to be verified. The additional blocks in - # the image file contain verity metadata, by reading which would trigger - # invalid reads. - image_size = OPTIONS.info_dict.get(which + "_image_size") - if not image_size: - return None - - image_blocks = int(image_size) // 4096 - 1 - assert image_blocks > 0, "blocks for {} must be positive".format(which) - - # For sparse images, we will only check the blocks that are listed in the care - # map, i.e. the ones with meaningful data. - if "extfs_sparse_flag" in OPTIONS.info_dict: - simg = sparse_img.SparseImage(imgname) - care_map_ranges = simg.care_map.intersect( - rangelib.RangeSet("0-{}".format(image_blocks))) - - # Otherwise for non-sparse images, we read all the blocks in the filesystem - # image. - else: - care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks)) - - return [which, care_map_ranges.to_string_raw()] - - def AddSystem(output_zip, recovery_img=None, boot_img=None): """Turn the contents of SYSTEM into a system image and store it in output_zip. Returns the name of the system image file.""" @@ -174,12 +134,13 @@ def AddSystem(output_zip, recovery_img=None, boot_img=None): "board_uses_vendorimage") == "true" if (OPTIONS.rebuild_recovery and not board_uses_vendorimage and - recovery_img is not None and boot_img is not None): + recovery_img is not None and boot_img is not None): logger.info("Building new recovery patch on system at system/vendor") common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img, boot_img, info_dict=OPTIONS.info_dict) - block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.map") + block_list = OutputFile(output_zip, OPTIONS.input_tmp, + "IMAGES", "system.map") CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system", img, block_list=block_list) return img.name @@ -222,12 +183,13 @@ def AddVendor(output_zip, recovery_img=None, boot_img=None): "board_uses_vendorimage") == "true" if (OPTIONS.rebuild_recovery and board_uses_vendorimage and - recovery_img is not None and boot_img is not None): + recovery_img is not None and boot_img is not None): logger.info("Building new recovery patch on vendor") common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img, boot_img, info_dict=OPTIONS.info_dict) - block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "vendor.map") + block_list = OutputFile(output_zip, OPTIONS.input_tmp, + "IMAGES", "vendor.map") CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "vendor", img, block_list=block_list) return img.name @@ -299,6 +261,7 @@ def AddVendorDlkm(output_zip): block_list=block_list) return img.name + def AddOdmDlkm(output_zip): """Turn the contents of OdmDlkm into an odm_dlkm image and store it in output_zip.""" @@ -350,6 +313,7 @@ def AddDtbo(output_zip): img.Write() return img.name + def AddPvmfw(output_zip): """Adds the pvmfw image. @@ -385,6 +349,7 @@ def AddPvmfw(output_zip): img.Write() return img.name + def AddCustomImages(output_zip, partition_name): """Adds and signs custom images in IMAGES/. @@ -413,15 +378,16 @@ def AddCustomImages(output_zip, partition_name): key_path, algorithm, extra_args) for img_name in OPTIONS.info_dict.get( - "avb_{}_image_list".format(partition_name)).split(): - custom_image = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", img_name) + "avb_{}_image_list".format(partition_name)).split(): + custom_image = OutputFile( + output_zip, OPTIONS.input_tmp, "IMAGES", img_name) if os.path.exists(custom_image.name): continue custom_image_prebuilt_path = os.path.join( OPTIONS.input_tmp, "PREBUILT_IMAGES", img_name) assert os.path.exists(custom_image_prebuilt_path), \ - "Failed to find %s at %s" % (img_name, custom_image_prebuilt_path) + "Failed to find %s at %s" % (img_name, custom_image_prebuilt_path) shutil.copy(custom_image_prebuilt_path, custom_image.name) @@ -644,72 +610,6 @@ def CheckAbOtaImages(output_zip, ab_partitions): assert available, "Failed to find " + img_name -def AddCareMapForAbOta(output_zip, ab_partitions, image_paths): - """Generates and adds care_map.pb for a/b partition that has care_map. - - Args: - output_zip: The output zip file (needs to be already open), or None to - write care_map.pb to OPTIONS.input_tmp/. - ab_partitions: The list of A/B partitions. - image_paths: A map from the partition name to the image path. - """ - care_map_list = [] - for partition in ab_partitions: - partition = partition.strip() - if partition not in common.PARTITIONS_WITH_CARE_MAP: - continue - - verity_block_device = "{}_verity_block_device".format(partition) - avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition) - if (verity_block_device in OPTIONS.info_dict or - OPTIONS.info_dict.get(avb_hashtree_enable) == "true"): - image_path = image_paths[partition] - assert os.path.exists(image_path) - - care_map = GetCareMap(partition, image_path) - if not care_map: - continue - care_map_list += care_map - - # adds fingerprint field to the care_map - # TODO(xunchang) revisit the fingerprint calculation for care_map. - partition_props = OPTIONS.info_dict.get(partition + ".build.prop") - prop_name_list = ["ro.{}.build.fingerprint".format(partition), - "ro.{}.build.thumbprint".format(partition)] - - present_props = [x for x in prop_name_list if - partition_props and partition_props.GetProp(x)] - if not present_props: - logger.warning("fingerprint is not present for partition %s", partition) - property_id, fingerprint = "unknown", "unknown" - else: - property_id = present_props[0] - fingerprint = partition_props.GetProp(property_id) - care_map_list += [property_id, fingerprint] - - if not care_map_list: - return - - # Converts the list into proto buf message by calling care_map_generator; and - # writes the result to a temp file. - temp_care_map_text = common.MakeTempFile(prefix="caremap_text-", - suffix=".txt") - with open(temp_care_map_text, 'w') as text_file: - text_file.write('\n'.join(care_map_list)) - - temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".pb") - care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map] - common.RunAndCheckOutput(care_map_gen_cmd) - - care_map_path = "META/care_map.pb" - if output_zip and care_map_path not in output_zip.namelist(): - common.ZipWrite(output_zip, temp_care_map, arcname=care_map_path) - else: - shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path)) - if output_zip: - OPTIONS.replace_updated_files_list.append(care_map_path) - - def AddPackRadioImages(output_zip, images): """Copies images listed in META/pack_radioimages.txt from RADIO/ to IMAGES/. @@ -785,11 +685,12 @@ def HasPartition(partition_name): return ((os.path.isdir( os.path.join(OPTIONS.input_tmp, partition_name.upper())) and - OPTIONS.info_dict.get( - "building_{}_image".format(partition_name)) == "true") or - os.path.exists( - os.path.join(OPTIONS.input_tmp, "IMAGES", - "{}.img".format(partition_name)))) + OPTIONS.info_dict.get( + "building_{}_image".format(partition_name)) == "true") or + os.path.exists( + os.path.join(OPTIONS.input_tmp, "IMAGES", + "{}.img".format(partition_name)))) + def AddApexInfo(output_zip): apex_infos = GetSystemApexInfoFromTargetFiles(OPTIONS.input_tmp) @@ -878,7 +779,7 @@ def AddImagesToTargetFiles(filename): boot_images = OPTIONS.info_dict.get("boot_images") if boot_images is None: boot_images = "boot.img" - for index,b in enumerate(boot_images.split()): + for index, b in enumerate(boot_images.split()): # common.GetBootableImage() returns the image directly if present. boot_image = common.GetBootableImage( "IMAGES/" + b, b, OPTIONS.input_tmp, "BOOT") @@ -1033,7 +934,7 @@ def AddImagesToTargetFiles(filename): if OPTIONS.info_dict.get("build_super_partition") == "true": if OPTIONS.info_dict.get( - "build_retrofit_dynamic_partitions_ota_package") == "true": + "build_retrofit_dynamic_partitions_ota_package") == "true": banner("super split images") AddSuperSplit(output_zip) @@ -1099,6 +1000,7 @@ def main(argv): AddImagesToTargetFiles(args[0]) logger.info("done.") + if __name__ == '__main__': try: common.CloseInheritedPipes() diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py index 414ab9748..c5e1adea1 100644 --- a/tools/releasetools/common.py +++ b/tools/releasetools/common.py @@ -41,6 +41,7 @@ import zipfile from hashlib import sha1, sha256 import images +import rangelib import sparse_img from blockimgdiff import BlockImageDiff @@ -137,6 +138,7 @@ PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot'] # existing search paths. RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop'] + class ErrorCode(object): """Define error_codes for failures that happen during the actual update package installation. @@ -225,6 +227,7 @@ def InitLogging(): def SetHostToolLocation(tool_name, location): OPTIONS.host_tools[tool_name] = location + def FindHostToolPath(tool_name): """Finds the path to the host tool. @@ -245,6 +248,7 @@ def FindHostToolPath(tool_name): return tool_name + def Run(args, verbose=None, **kwargs): """Creates and returns a subprocess.Popen object. @@ -460,7 +464,7 @@ class BuildInfo(object): """Returns the inquired build property for the provided partition.""" # Boot image uses ro.[product.]bootimage instead of boot. - prop_partition = "bootimage" if partition == "boot" else partition + prop_partition = "bootimage" if partition == "boot" else partition # If provided a partition for this property, only look within that # partition's build.prop. @@ -769,7 +773,8 @@ def LoadInfoDict(input_file, repacking=False): for partition in PARTITIONS_WITH_BUILD_PROP: fingerprint = build_info.GetPartitionFingerprint(partition) if fingerprint: - d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest() + d["avb_{}_salt".format(partition)] = sha256( + fingerprint.encode()).hexdigest() try: d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n") except KeyError: @@ -777,7 +782,6 @@ def LoadInfoDict(input_file, repacking=False): return d - def LoadListFromFile(file_path): with open(file_path) as f: return f.read().splitlines() @@ -1091,7 +1095,7 @@ def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict): return " ".join(sorted(combined)) if (framework_dict.get("use_dynamic_partitions") != - "true") or (vendor_dict.get("use_dynamic_partitions") != "true"): + "true") or (vendor_dict.get("use_dynamic_partitions") != "true"): raise ValueError("Both dictionaries must have use_dynamic_partitions=true") merged_dict = {"use_dynamic_partitions": "true"} @@ -1569,7 +1573,7 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None, RunAndCheckOutput(cmd) if (info_dict.get("boot_signer") == "true" and - info_dict.get("verity_key")): + info_dict.get("verity_key")): # Hard-code the path as "/boot" for two-step special recovery image (which # will be loaded into /boot during the two-step OTA). if two_step_image: @@ -1734,15 +1738,19 @@ def _BuildVendorBootImage(sourcedir, info_dict=None): if os.access(fn, os.F_OK): ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n")) for ramdisk_fragment in ramdisk_fragments: - fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "mkbootimg_args") + fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", + ramdisk_fragment, "mkbootimg_args") cmd.extend(shlex.split(open(fn).read().rstrip("\n"))) - fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "prebuilt_ramdisk") + fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", + ramdisk_fragment, "prebuilt_ramdisk") # Use prebuilt image if found, else create ramdisk from supplied files. if os.access(fn, os.F_OK): ramdisk_fragment_pathname = fn else: - ramdisk_fragment_root = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment) - ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root, lz4_ramdisks=use_lz4) + ramdisk_fragment_root = os.path.join( + sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment) + ramdisk_fragment_img = _MakeRamdisk( + ramdisk_fragment_root, lz4_ramdisks=use_lz4) ramdisk_fragment_imgs.append(ramdisk_fragment_img) ramdisk_fragment_pathname = ramdisk_fragment_img.name cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname]) @@ -3513,7 +3521,7 @@ class DynamicPartitionsDifference(object): for g in tgt_groups: for p in shlex.split(info_dict.get( - "super_%s_partition_list" % g, "").strip()): + "super_%s_partition_list" % g, "").strip()): assert p in self._partition_updates, \ "{} is in target super_{}_partition_list but no BlockDifference " \ "object is provided.".format(p, g) @@ -3521,7 +3529,7 @@ class DynamicPartitionsDifference(object): for g in src_groups: for p in shlex.split(source_info_dict.get( - "super_%s_partition_list" % g, "").strip()): + "super_%s_partition_list" % g, "").strip()): assert p in self._partition_updates, \ "{} is in source super_{}_partition_list but no BlockDifference " \ "object is provided.".format(p, g) @@ -3630,7 +3638,7 @@ class DynamicPartitionsDifference(object): if u.src_size is not None and u.tgt_size is None: append('remove_group %s' % g) if (u.src_size is not None and u.tgt_size is not None and - u.src_size > u.tgt_size): + u.src_size > u.tgt_size): comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size)) append('resize_group %s %d' % (g, u.tgt_size)) @@ -3639,7 +3647,7 @@ class DynamicPartitionsDifference(object): comment('Add group %s with maximum size %d' % (g, u.tgt_size)) append('add_group %s %d' % (g, u.tgt_size)) if (u.src_size is not None and u.tgt_size is not None and - u.src_size < u.tgt_size): + u.src_size < u.tgt_size): comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size)) append('resize_group %s %d' % (g, u.tgt_size)) @@ -3673,7 +3681,8 @@ def GetBootImageBuildProp(boot_img): """ tmp_dir = MakeTempDir('boot_', suffix='.img') try: - RunAndCheckOutput(['unpack_bootimg', '--boot_img', boot_img, '--out', tmp_dir]) + RunAndCheckOutput(['unpack_bootimg', '--boot_img', + boot_img, '--out', tmp_dir]) ramdisk = os.path.join(tmp_dir, 'ramdisk') if not os.path.isfile(ramdisk): logger.warning('Unable to get boot image timestamp: no ramdisk in boot') @@ -3686,13 +3695,14 @@ def GetBootImageBuildProp(boot_img): # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from # the host environment. RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'], - cwd=extracted_ramdisk) + cwd=extracted_ramdisk) for search_path in RAMDISK_BUILD_PROP_REL_PATHS: prop_file = os.path.join(extracted_ramdisk, search_path) if os.path.isfile(prop_file): return prop_file - logger.warning('Unable to get boot image timestamp: no %s in ramdisk', search_path) + logger.warning( + 'Unable to get boot image timestamp: no %s in ramdisk', search_path) return None @@ -3725,9 +3735,116 @@ def GetBootImageTimestamp(boot_img): timestamp = props.GetProp('ro.bootimage.build.date.utc') if timestamp: return int(timestamp) - logger.warning('Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined') + logger.warning( + 'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined') return None except ExternalError as e: logger.warning('Unable to get boot image timestamp: %s', e) return None + + +def GetCareMap(which, imgname): + """Returns the care_map string for the given partition. + + Args: + which: The partition name, must be listed in PARTITIONS_WITH_CARE_MAP. + imgname: The filename of the image. + + Returns: + (which, care_map_ranges): care_map_ranges is the raw string of the care_map + RangeSet; or None. + """ + assert which in PARTITIONS_WITH_CARE_MAP + + # which + "_image_size" contains the size that the actual filesystem image + # resides in, which is all that needs to be verified. The additional blocks in + # the image file contain verity metadata, by reading which would trigger + # invalid reads. + image_size = OPTIONS.info_dict.get(which + "_image_size") + if not image_size: + return None + + image_blocks = int(image_size) // 4096 - 1 + assert image_blocks > 0, "blocks for {} must be positive".format(which) + + # For sparse images, we will only check the blocks that are listed in the care + # map, i.e. the ones with meaningful data. + if "extfs_sparse_flag" in OPTIONS.info_dict: + simg = sparse_img.SparseImage(imgname) + care_map_ranges = simg.care_map.intersect( + rangelib.RangeSet("0-{}".format(image_blocks))) + + # Otherwise for non-sparse images, we read all the blocks in the filesystem + # image. + else: + care_map_ranges = rangelib.RangeSet("0-{}".format(image_blocks)) + + return [which, care_map_ranges.to_string_raw()] + + +def AddCareMapForAbOta(output_zip, ab_partitions, image_paths): + """Generates and adds care_map.pb for a/b partition that has care_map. + + Args: + output_zip: The output zip file (needs to be already open), or None to + write care_map.pb to OPTIONS.input_tmp/. + ab_partitions: The list of A/B partitions. + image_paths: A map from the partition name to the image path. + """ + care_map_list = [] + for partition in ab_partitions: + partition = partition.strip() + if partition not in PARTITIONS_WITH_CARE_MAP: + continue + + verity_block_device = "{}_verity_block_device".format(partition) + avb_hashtree_enable = "avb_{}_hashtree_enable".format(partition) + if (verity_block_device in OPTIONS.info_dict or + OPTIONS.info_dict.get(avb_hashtree_enable) == "true"): + image_path = image_paths[partition] + assert os.path.exists(image_path) + + care_map = GetCareMap(partition, image_path) + if not care_map: + continue + care_map_list += care_map + + # adds fingerprint field to the care_map + # TODO(xunchang) revisit the fingerprint calculation for care_map. + partition_props = OPTIONS.info_dict.get(partition + ".build.prop") + prop_name_list = ["ro.{}.build.fingerprint".format(partition), + "ro.{}.build.thumbprint".format(partition)] + + present_props = [x for x in prop_name_list if + partition_props and partition_props.GetProp(x)] + if not present_props: + logger.warning( + "fingerprint is not present for partition %s", partition) + property_id, fingerprint = "unknown", "unknown" + else: + property_id = present_props[0] + fingerprint = partition_props.GetProp(property_id) + care_map_list += [property_id, fingerprint] + + if not care_map_list: + return + + # Converts the list into proto buf message by calling care_map_generator; and + # writes the result to a temp file. + temp_care_map_text = MakeTempFile(prefix="caremap_text-", + suffix=".txt") + with open(temp_care_map_text, 'w') as text_file: + text_file.write('\n'.join(care_map_list)) + + temp_care_map = MakeTempFile(prefix="caremap-", suffix=".pb") + care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map] + RunAndCheckOutput(care_map_gen_cmd) + + care_map_path = "META/care_map.pb" + if output_zip and care_map_path not in output_zip.namelist(): + ZipWrite(output_zip, temp_care_map, arcname=care_map_path) + else: + shutil.copy(temp_care_map, os.path.join(OPTIONS.input_tmp, care_map_path)) + if output_zip: + OPTIONS.replace_updated_files_list.append(care_map_path) diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py index 16cab4fbd..c2fd45073 100755 --- a/tools/releasetools/merge_target_files.py +++ b/tools/releasetools/merge_target_files.py @@ -103,6 +103,8 @@ import img_from_target_files import find_shareduid_violation import ota_from_target_files +from common import AddCareMapForAbOta, ExternalError, PARTITIONS_WITH_CARE_MAP + logger = logging.getLogger(__name__) OPTIONS = common.OPTIONS @@ -356,7 +358,7 @@ def validate_config_lists(framework_item_list, framework_misc_info_keys, has_error = True if ('dynamic_partition_list' in framework_misc_info_keys) or ( - 'super_partition_groups' in framework_misc_info_keys): + 'super_partition_groups' in framework_misc_info_keys): logger.error('Dynamic partition misc info keys should come from ' 'the vendor instance of META/misc_info.txt.') has_error = True @@ -448,7 +450,7 @@ def process_misc_info_txt(framework_target_files_temp_dir, # Merge misc info keys used for Dynamic Partitions. if (merged_dict.get('use_dynamic_partitions') == 'true') and ( - framework_dict.get('use_dynamic_partitions') == 'true'): + framework_dict.get('use_dynamic_partitions') == 'true'): merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts( framework_dict=framework_dict, vendor_dict=merged_dict) merged_dict.update(merged_dynamic_partitions_dict) @@ -693,7 +695,7 @@ def compile_split_sepolicy(product_out, partition_map, output_policy): vendor_plat_version_file = get_file('vendor', 'etc/selinux/plat_sepolicy_vers.txt') if not vendor_plat_version_file or not os.path.exists( - vendor_plat_version_file): + vendor_plat_version_file): raise ExternalError('Missing required sepolicy file %s', vendor_plat_version_file) with open(vendor_plat_version_file) as f: @@ -1092,6 +1094,8 @@ def merge_target_files(temp_dir, framework_target_files, framework_item_list, temp_dir) # Create the IMG package from the merged target files package. + with zipfile.ZipFile(output_zip, allowZip64=True) as zfp: + AddCareMapForAbOta(zfp, PARTITIONS_WITH_CARE_MAP, partition_map) if output_img: img_from_target_files.main([output_zip, output_img]) @@ -1164,7 +1168,8 @@ def main(): elif o == '--vendor-target-files': OPTIONS.vendor_target_files = a elif o == '--other-item-list': - logger.warning('--other-item-list has been renamed to --vendor-item-list') + logger.warning( + '--other-item-list has been renamed to --vendor-item-list') OPTIONS.vendor_item_list = a elif o == '--vendor-item-list': OPTIONS.vendor_item_list = a @@ -1220,7 +1225,7 @@ def main(): if (args or OPTIONS.framework_target_files is None or OPTIONS.vendor_target_files is None or (OPTIONS.output_target_files is None and OPTIONS.output_dir is None) or - (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None)): + (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None)): common.Usage(__doc__) sys.exit(1) @@ -1246,9 +1251,9 @@ def main(): output_item_list = None if not validate_config_lists( - framework_item_list=framework_item_list, - framework_misc_info_keys=framework_misc_info_keys, - vendor_item_list=vendor_item_list): + framework_item_list=framework_item_list, + framework_misc_info_keys=framework_misc_info_keys, + vendor_item_list=vendor_item_list): sys.exit(1) call_func_with_temp_dir( diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py index 6b7a7db4e..3d5300e57 100644 --- a/tools/releasetools/test_add_img_to_target_files.py +++ b/tools/releasetools/test_add_img_to_target_files.py @@ -21,9 +21,10 @@ import zipfile import common import test_utils from add_img_to_target_files import ( - AddCareMapForAbOta, AddPackRadioImages, + AddPackRadioImages, CheckAbOtaImages, GetCareMap) from rangelib import RangeSet +from common import AddCareMapForAbOta OPTIONS = common.OPTIONS @@ -123,9 +124,9 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): def _test_AddCareMapForAbOta(): """Helper function to set up the test for test_AddCareMapForAbOta().""" OPTIONS.info_dict = { - 'extfs_sparse_flag' : '-s', - 'system_image_size' : 65536, - 'vendor_image_size' : 40960, + 'extfs_sparse_flag': '-s', + 'system_image_size': 65536, + 'vendor_image_size': 40960, 'system_verity_block_device': '/dev/block/system', 'vendor_verity_block_device': '/dev/block/vendor', 'system.build.prop': common.PartitionBuildProps.FromDictionary( @@ -153,8 +154,8 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): (0xCAC2, 12)]) image_paths = { - 'system' : system_image, - 'vendor' : vendor_image, + 'system': system_image, + 'vendor': vendor_image, } return image_paths @@ -243,9 +244,9 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): """Tests the case for partitions without fingerprint.""" image_paths = self._test_AddCareMapForAbOta() OPTIONS.info_dict = { - 'extfs_sparse_flag' : '-s', - 'system_image_size' : 65536, - 'vendor_image_size' : 40960, + 'extfs_sparse_flag': '-s', + 'system_image_size': 65536, + 'vendor_image_size': 40960, 'system_verity_block_device': '/dev/block/system', 'vendor_verity_block_device': '/dev/block/vendor', } @@ -254,8 +255,9 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): care_map_file = os.path.join(OPTIONS.input_tmp, 'META', 'care_map.pb') expected = ['system', RangeSet("0-5 10-15").to_string_raw(), "unknown", - "unknown", 'vendor', RangeSet("0-9").to_string_raw(), "unknown", - "unknown"] + "unknown", 'vendor', RangeSet( + "0-9").to_string_raw(), "unknown", + "unknown"] self._verifyCareMap(expected, care_map_file) @@ -395,8 +397,8 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): (0xCAC3, 4), (0xCAC1, 6)]) OPTIONS.info_dict = { - 'extfs_sparse_flag' : '-s', - 'system_image_size' : 53248, + 'extfs_sparse_flag': '-s', + 'system_image_size': 53248, } name, care_map = GetCareMap('system', sparse_image) self.assertEqual('system', name) @@ -411,14 +413,14 @@ class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase): (0xCAC3, 4), (0xCAC1, 6)]) OPTIONS.info_dict = { - 'extfs_sparse_flag' : '-s', - 'system_image_size' : -45056, + 'extfs_sparse_flag': '-s', + 'system_image_size': -45056, } self.assertRaises(AssertionError, GetCareMap, 'system', sparse_image) def test_GetCareMap_nonSparseImage(self): OPTIONS.info_dict = { - 'system_image_size' : 53248, + 'system_image_size': 53248, } # 'foo' is the image filename, which is expected to be not used by # GetCareMap().