Make merge_target_files more lenient, needed for cross-release merging.
These changes were necessary to begin merging a new cross-release target whose vendor half is frozen. - MergeDynamicPartitionInfoDicts - Filters combined fields to remove duplicates - Merges `super_block_devices` as well as other keys that were not previously used by other targets consuming this tool. - Introduces --allow-duplicate-apkapex-keys. This gives a warning rather than fatal error on duplicate apk/apex keys. This flag is needed for targets that cannot update a frozen half. - (Formats merge_target_files.py) Bug: 170683837 Test: Use merge_target_files to merge an S+R build, and boot. Change-Id: Id5f787e730de8f8ef697e1f2f29ac6514221e58d
This commit is contained in:
parent
cfa39194ad
commit
b0c75911b8
|
@ -1003,15 +1003,35 @@ def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
|
|||
Returns:
|
||||
The merged dynamic partition info dictionary.
|
||||
"""
|
||||
merged_dict = {}
|
||||
|
||||
def uniq_concat(a, b):
|
||||
combined = set(a.split(" "))
|
||||
combined.update(set(b.split(" ")))
|
||||
combined = [item.strip() for item in combined if item.strip()]
|
||||
return " ".join(sorted(combined))
|
||||
|
||||
if (framework_dict.get("use_dynamic_partitions") !=
|
||||
"true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
|
||||
raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
|
||||
|
||||
merged_dict = {"use_dynamic_partitions": "true"}
|
||||
|
||||
merged_dict["dynamic_partition_list"] = uniq_concat(
|
||||
framework_dict.get("dynamic_partition_list", ""),
|
||||
vendor_dict.get("dynamic_partition_list", ""))
|
||||
|
||||
# Super block devices are defined by the vendor dict.
|
||||
if "super_block_devices" in vendor_dict:
|
||||
merged_dict["super_block_devices"] = vendor_dict["super_block_devices"]
|
||||
for block_device in merged_dict["super_block_devices"].split(" "):
|
||||
key = "super_%s_device_size" % block_device
|
||||
if key not in vendor_dict:
|
||||
raise ValueError("Vendor dict does not contain required key %s." % key)
|
||||
merged_dict[key] = vendor_dict[key]
|
||||
|
||||
# Partition groups and group sizes are defined by the vendor dict because
|
||||
# these values may vary for each board that uses a shared system image.
|
||||
merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
|
||||
framework_dynamic_partition_list = framework_dict.get(
|
||||
"dynamic_partition_list", "")
|
||||
vendor_dynamic_partition_list = vendor_dict.get("dynamic_partition_list", "")
|
||||
merged_dict["dynamic_partition_list"] = ("%s %s" % (
|
||||
framework_dynamic_partition_list, vendor_dynamic_partition_list)).strip()
|
||||
for partition_group in merged_dict["super_partition_groups"].split(" "):
|
||||
# Set the partition group's size using the value from the vendor dict.
|
||||
key = "super_%s_group_size" % partition_group
|
||||
|
@ -1022,15 +1042,16 @@ def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
|
|||
# Set the partition group's partition list using a concatenation of the
|
||||
# framework and vendor partition lists.
|
||||
key = "super_%s_partition_list" % partition_group
|
||||
merged_dict[key] = (
|
||||
"%s %s" %
|
||||
(framework_dict.get(key, ""), vendor_dict.get(key, ""))).strip()
|
||||
merged_dict[key] = uniq_concat(
|
||||
framework_dict.get(key, ""), vendor_dict.get(key, ""))
|
||||
|
||||
# Various other flags should be copied from the vendor dict, if defined.
|
||||
for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake",
|
||||
"super_metadata_device", "super_partition_error_limit",
|
||||
"super_partition_size"):
|
||||
if key in vendor_dict.keys():
|
||||
merged_dict[key] = vendor_dict[key]
|
||||
|
||||
# Pick virtual ab related flags from vendor dict, if defined.
|
||||
if "virtual_ab" in vendor_dict.keys():
|
||||
merged_dict["virtual_ab"] = vendor_dict["virtual_ab"]
|
||||
if "virtual_ab_retrofit" in vendor_dict.keys():
|
||||
merged_dict["virtual_ab_retrofit"] = vendor_dict["virtual_ab_retrofit"]
|
||||
return merged_dict
|
||||
|
||||
|
||||
|
|
|
@ -70,6 +70,10 @@ Usage: merge_target_files.py [args]
|
|||
--rebuild_recovery
|
||||
Deprecated; does nothing.
|
||||
|
||||
--allow-duplicate-apkapex-keys
|
||||
If provided, duplicate APK/APEX keys are ignored and the value from the
|
||||
framework is used.
|
||||
|
||||
--keep-tmp
|
||||
Keep tempoary files for debugging purposes.
|
||||
"""
|
||||
|
@ -110,6 +114,8 @@ OPTIONS.output_img = None
|
|||
OPTIONS.output_super_empty = None
|
||||
# TODO(b/132730255): Remove this option.
|
||||
OPTIONS.rebuild_recovery = False
|
||||
# TODO(b/150582573): Remove this option.
|
||||
OPTIONS.allow_duplicate_apkapex_keys = False
|
||||
OPTIONS.keep_tmp = False
|
||||
|
||||
# In an item list (framework or vendor), we may see entries that select whole
|
||||
|
@ -526,6 +532,7 @@ def item_list_to_partition_set(item_list):
|
|||
|
||||
Args:
|
||||
item_list: A list of items in a target files package.
|
||||
|
||||
Returns:
|
||||
A set of partitions extracted from the list of items.
|
||||
"""
|
||||
|
@ -547,7 +554,6 @@ def process_apex_keys_apk_certs_common(framework_target_files_dir,
|
|||
output_target_files_dir,
|
||||
framework_partition_set,
|
||||
vendor_partition_set, file_name):
|
||||
|
||||
"""Performs special processing for META/apexkeys.txt or META/apkcerts.txt.
|
||||
|
||||
This function merges the contents of the META/apexkeys.txt or
|
||||
|
@ -597,7 +603,12 @@ def process_apex_keys_apk_certs_common(framework_target_files_dir,
|
|||
|
||||
if partition_tag in partition_set:
|
||||
if key in merged_dict:
|
||||
raise ValueError('Duplicate key %s' % key)
|
||||
if OPTIONS.allow_duplicate_apkapex_keys:
|
||||
# TODO(b/150582573) Always raise on duplicates.
|
||||
logger.warning('Duplicate key %s' % key)
|
||||
continue
|
||||
else:
|
||||
raise ValueError('Duplicate key %s' % key)
|
||||
|
||||
merged_dict[key] = value
|
||||
|
||||
|
@ -647,8 +658,7 @@ def copy_file_contexts(framework_target_files_dir, vendor_target_files_dir,
|
|||
def process_special_cases(framework_target_files_temp_dir,
|
||||
vendor_target_files_temp_dir,
|
||||
output_target_files_temp_dir,
|
||||
framework_misc_info_keys,
|
||||
framework_partition_set,
|
||||
framework_misc_info_keys, framework_partition_set,
|
||||
vendor_partition_set):
|
||||
"""Performs special-case processing for certain target files items.
|
||||
|
||||
|
@ -967,7 +977,7 @@ def merge_target_files(temp_dir, framework_target_files, framework_item_list,
|
|||
rebuild_recovery)
|
||||
|
||||
if not check_target_files_vintf.CheckVintf(output_target_files_temp_dir):
|
||||
raise RuntimeError("Incompatible VINTF metadata")
|
||||
raise RuntimeError('Incompatible VINTF metadata')
|
||||
|
||||
generate_images(output_target_files_temp_dir, rebuild_recovery)
|
||||
|
||||
|
@ -1075,8 +1085,10 @@ def main():
|
|||
OPTIONS.output_img = a
|
||||
elif o == '--output-super-empty':
|
||||
OPTIONS.output_super_empty = a
|
||||
elif o == '--rebuild_recovery': # TODO(b/132730255): Warn
|
||||
elif o == '--rebuild_recovery': # TODO(b/132730255): Warn
|
||||
OPTIONS.rebuild_recovery = True
|
||||
elif o == '--allow-duplicate-apkapex-keys':
|
||||
OPTIONS.allow_duplicate_apkapex_keys = True
|
||||
elif o == '--keep-tmp':
|
||||
OPTIONS.keep_tmp = True
|
||||
else:
|
||||
|
@ -1104,6 +1116,7 @@ def main():
|
|||
'output-img=',
|
||||
'output-super-empty=',
|
||||
'rebuild_recovery',
|
||||
'allow-duplicate-apkapex-keys',
|
||||
'keep-tmp',
|
||||
],
|
||||
extra_option_handler=option_handler)
|
||||
|
|
|
@ -1418,13 +1418,17 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase):
|
|||
|
||||
def test_MergeDynamicPartitionInfoDicts_ReturnsMergedDict(self):
|
||||
framework_dict = {
|
||||
'use_dynamic_partitions': 'true',
|
||||
'super_partition_groups': 'group_a',
|
||||
'dynamic_partition_list': 'system',
|
||||
'super_group_a_partition_list': 'system',
|
||||
}
|
||||
vendor_dict = {
|
||||
'use_dynamic_partitions': 'true',
|
||||
'super_partition_groups': 'group_a group_b',
|
||||
'dynamic_partition_list': 'vendor product',
|
||||
'super_block_devices': 'super',
|
||||
'super_super_device_size': '3000',
|
||||
'super_group_a_partition_list': 'vendor',
|
||||
'super_group_a_group_size': '1000',
|
||||
'super_group_b_partition_list': 'product',
|
||||
|
@ -1434,8 +1438,11 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase):
|
|||
framework_dict=framework_dict,
|
||||
vendor_dict=vendor_dict)
|
||||
expected_merged_dict = {
|
||||
'use_dynamic_partitions': 'true',
|
||||
'super_partition_groups': 'group_a group_b',
|
||||
'dynamic_partition_list': 'system vendor product',
|
||||
'dynamic_partition_list': 'product system vendor',
|
||||
'super_block_devices': 'super',
|
||||
'super_super_device_size': '3000',
|
||||
'super_group_a_partition_list': 'system vendor',
|
||||
'super_group_a_group_size': '1000',
|
||||
'super_group_b_partition_list': 'product',
|
||||
|
@ -1445,12 +1452,14 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase):
|
|||
|
||||
def test_MergeDynamicPartitionInfoDicts_IgnoringFrameworkGroupSize(self):
|
||||
framework_dict = {
|
||||
'use_dynamic_partitions': 'true',
|
||||
'super_partition_groups': 'group_a',
|
||||
'dynamic_partition_list': 'system',
|
||||
'super_group_a_partition_list': 'system',
|
||||
'super_group_a_group_size': '5000',
|
||||
}
|
||||
vendor_dict = {
|
||||
'use_dynamic_partitions': 'true',
|
||||
'super_partition_groups': 'group_a group_b',
|
||||
'dynamic_partition_list': 'vendor product',
|
||||
'super_group_a_partition_list': 'vendor',
|
||||
|
@ -1462,8 +1471,9 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase):
|
|||
framework_dict=framework_dict,
|
||||
vendor_dict=vendor_dict)
|
||||
expected_merged_dict = {
|
||||
'use_dynamic_partitions': 'true',
|
||||
'super_partition_groups': 'group_a group_b',
|
||||
'dynamic_partition_list': 'system vendor product',
|
||||
'dynamic_partition_list': 'product system vendor',
|
||||
'super_group_a_partition_list': 'system vendor',
|
||||
'super_group_a_group_size': '1000',
|
||||
'super_group_b_partition_list': 'product',
|
||||
|
|
Loading…
Reference in New Issue