releasetools: Make scripts pylint clean.
Clean up the following scripts. blockimgdiff.py common.py edify_generator.py img_from_target_files.py ota_from_target_files.py Seems we don't have a way to force pylint-ing the scripts using per-directory pylintrc files (and we don't have pylint tool in AOSP repo), per https://android.googlesource.com/platform/tools/repohooks/#todo_limitations. Test: `m dist` Test: pylint --rcfile=pylintrc <script.py> Change-Id: Ia6fd1ddc86f4d84c68e500f225d4a89d0fea8ec7
This commit is contained in:
parent
401fada3f5
commit
76def243ec
|
@ -163,7 +163,7 @@ class DataImage(Image):
|
||||||
|
|
||||||
def RangeSha1(self, ranges):
|
def RangeSha1(self, ranges):
|
||||||
h = sha1()
|
h = sha1()
|
||||||
for data in self._GetRangeData(ranges):
|
for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
|
||||||
h.update(data)
|
h.update(data)
|
||||||
return h.hexdigest()
|
return h.hexdigest()
|
||||||
|
|
||||||
|
@ -177,7 +177,7 @@ class DataImage(Image):
|
||||||
return sha1(self.data).hexdigest()
|
return sha1(self.data).hexdigest()
|
||||||
|
|
||||||
def WriteRangeDataToFd(self, ranges, fd):
|
def WriteRangeDataToFd(self, ranges, fd):
|
||||||
for data in self._GetRangeData(ranges):
|
for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
|
||||||
fd.write(data)
|
fd.write(data)
|
||||||
|
|
||||||
|
|
||||||
|
@ -320,46 +320,45 @@ class ImgdiffStats(object):
|
||||||
print(''.join([' {}\n'.format(name) for name in values]))
|
print(''.join([' {}\n'.format(name) for name in values]))
|
||||||
|
|
||||||
|
|
||||||
# BlockImageDiff works on two image objects. An image object is
|
|
||||||
# anything that provides the following attributes:
|
|
||||||
#
|
|
||||||
# blocksize: the size in bytes of a block, currently must be 4096.
|
|
||||||
#
|
|
||||||
# total_blocks: the total size of the partition/image, in blocks.
|
|
||||||
#
|
|
||||||
# care_map: a RangeSet containing which blocks (in the range [0,
|
|
||||||
# total_blocks) we actually care about; i.e. which blocks contain
|
|
||||||
# data.
|
|
||||||
#
|
|
||||||
# file_map: a dict that partitions the blocks contained in care_map
|
|
||||||
# into smaller domains that are useful for doing diffs on.
|
|
||||||
# (Typically a domain is a file, and the key in file_map is the
|
|
||||||
# pathname.)
|
|
||||||
#
|
|
||||||
# clobbered_blocks: a RangeSet containing which blocks contain data
|
|
||||||
# but may be altered by the FS. They need to be excluded when
|
|
||||||
# verifying the partition integrity.
|
|
||||||
#
|
|
||||||
# ReadRangeSet(): a function that takes a RangeSet and returns the
|
|
||||||
# data contained in the image blocks of that RangeSet. The data
|
|
||||||
# is returned as a list or tuple of strings; concatenating the
|
|
||||||
# elements together should produce the requested data.
|
|
||||||
# Implementations are free to break up the data into list/tuple
|
|
||||||
# elements in any way that is convenient.
|
|
||||||
#
|
|
||||||
# RangeSha1(): a function that returns (as a hex string) the SHA-1
|
|
||||||
# hash of all the data in the specified range.
|
|
||||||
#
|
|
||||||
# TotalSha1(): a function that returns (as a hex string) the SHA-1
|
|
||||||
# hash of all the data in the image (ie, all the blocks in the
|
|
||||||
# care_map minus clobbered_blocks, or including the clobbered
|
|
||||||
# blocks if include_clobbered_blocks is True).
|
|
||||||
#
|
|
||||||
# When creating a BlockImageDiff, the src image may be None, in which
|
|
||||||
# case the list of transfers produced will never read from the
|
|
||||||
# original image.
|
|
||||||
|
|
||||||
class BlockImageDiff(object):
|
class BlockImageDiff(object):
|
||||||
|
"""Generates the diff of two block image objects.
|
||||||
|
|
||||||
|
BlockImageDiff works on two image objects. An image object is anything that
|
||||||
|
provides the following attributes:
|
||||||
|
|
||||||
|
blocksize: the size in bytes of a block, currently must be 4096.
|
||||||
|
|
||||||
|
total_blocks: the total size of the partition/image, in blocks.
|
||||||
|
|
||||||
|
care_map: a RangeSet containing which blocks (in the range [0,
|
||||||
|
total_blocks) we actually care about; i.e. which blocks contain data.
|
||||||
|
|
||||||
|
file_map: a dict that partitions the blocks contained in care_map into
|
||||||
|
smaller domains that are useful for doing diffs on. (Typically a domain
|
||||||
|
is a file, and the key in file_map is the pathname.)
|
||||||
|
|
||||||
|
clobbered_blocks: a RangeSet containing which blocks contain data but may
|
||||||
|
be altered by the FS. They need to be excluded when verifying the
|
||||||
|
partition integrity.
|
||||||
|
|
||||||
|
ReadRangeSet(): a function that takes a RangeSet and returns the data
|
||||||
|
contained in the image blocks of that RangeSet. The data is returned as
|
||||||
|
a list or tuple of strings; concatenating the elements together should
|
||||||
|
produce the requested data. Implementations are free to break up the
|
||||||
|
data into list/tuple elements in any way that is convenient.
|
||||||
|
|
||||||
|
RangeSha1(): a function that returns (as a hex string) the SHA-1 hash of
|
||||||
|
all the data in the specified range.
|
||||||
|
|
||||||
|
TotalSha1(): a function that returns (as a hex string) the SHA-1 hash of
|
||||||
|
all the data in the image (ie, all the blocks in the care_map minus
|
||||||
|
clobbered_blocks, or including the clobbered blocks if
|
||||||
|
include_clobbered_blocks is True).
|
||||||
|
|
||||||
|
When creating a BlockImageDiff, the src image may be None, in which case the
|
||||||
|
list of transfers produced will never read from the original image.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, tgt, src=None, threads=None, version=4,
|
def __init__(self, tgt, src=None, threads=None, version=4,
|
||||||
disable_imgdiff=False):
|
disable_imgdiff=False):
|
||||||
if threads is None:
|
if threads is None:
|
||||||
|
|
|
@ -44,7 +44,7 @@ class Options(object):
|
||||||
"darwin": "out/host/darwin-x86",
|
"darwin": "out/host/darwin-x86",
|
||||||
}
|
}
|
||||||
|
|
||||||
self.search_path = platform_search_path.get(sys.platform, None)
|
self.search_path = platform_search_path.get(sys.platform)
|
||||||
self.signapk_path = "framework/signapk.jar" # Relative to search_path
|
self.signapk_path = "framework/signapk.jar" # Relative to search_path
|
||||||
self.signapk_shared_library_path = "lib64" # Relative to search_path
|
self.signapk_shared_library_path = "lib64" # Relative to search_path
|
||||||
self.extra_signapk_args = []
|
self.extra_signapk_args = []
|
||||||
|
@ -236,15 +236,15 @@ def LoadInfoDict(input_file, input_dir=None):
|
||||||
makeint("boot_size")
|
makeint("boot_size")
|
||||||
makeint("fstab_version")
|
makeint("fstab_version")
|
||||||
|
|
||||||
system_root_image = d.get("system_root_image", None) == "true"
|
system_root_image = d.get("system_root_image") == "true"
|
||||||
if d.get("no_recovery", None) != "true":
|
if d.get("no_recovery") != "true":
|
||||||
recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
|
recovery_fstab_path = "RECOVERY/RAMDISK/etc/recovery.fstab"
|
||||||
d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
|
d["fstab"] = LoadRecoveryFSTab(
|
||||||
recovery_fstab_path, system_root_image)
|
read_helper, d["fstab_version"], recovery_fstab_path, system_root_image)
|
||||||
elif d.get("recovery_as_boot", None) == "true":
|
elif d.get("recovery_as_boot") == "true":
|
||||||
recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
|
recovery_fstab_path = "BOOT/RAMDISK/etc/recovery.fstab"
|
||||||
d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
|
d["fstab"] = LoadRecoveryFSTab(
|
||||||
recovery_fstab_path, system_root_image)
|
read_helper, d["fstab_version"], recovery_fstab_path, system_root_image)
|
||||||
else:
|
else:
|
||||||
d["fstab"] = None
|
d["fstab"] = None
|
||||||
|
|
||||||
|
@ -440,11 +440,11 @@ def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
|
||||||
cmd.append("--pagesize")
|
cmd.append("--pagesize")
|
||||||
cmd.append(open(fn).read().rstrip("\n"))
|
cmd.append(open(fn).read().rstrip("\n"))
|
||||||
|
|
||||||
args = info_dict.get("mkbootimg_args", None)
|
args = info_dict.get("mkbootimg_args")
|
||||||
if args and args.strip():
|
if args and args.strip():
|
||||||
cmd.extend(shlex.split(args))
|
cmd.extend(shlex.split(args))
|
||||||
|
|
||||||
args = info_dict.get("mkbootimg_version_args", None)
|
args = info_dict.get("mkbootimg_version_args")
|
||||||
if args and args.strip():
|
if args and args.strip():
|
||||||
cmd.extend(shlex.split(args))
|
cmd.extend(shlex.split(args))
|
||||||
|
|
||||||
|
@ -452,7 +452,7 @@ def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
|
||||||
cmd.extend(["--ramdisk", ramdisk_img.name])
|
cmd.extend(["--ramdisk", ramdisk_img.name])
|
||||||
|
|
||||||
img_unsigned = None
|
img_unsigned = None
|
||||||
if info_dict.get("vboot", None):
|
if info_dict.get("vboot"):
|
||||||
img_unsigned = tempfile.NamedTemporaryFile()
|
img_unsigned = tempfile.NamedTemporaryFile()
|
||||||
cmd.extend(["--output", img_unsigned.name])
|
cmd.extend(["--output", img_unsigned.name])
|
||||||
else:
|
else:
|
||||||
|
@ -470,8 +470,8 @@ def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
|
||||||
p.communicate()
|
p.communicate()
|
||||||
assert p.returncode == 0, "mkbootimg of %s image failed" % (partition_name,)
|
assert p.returncode == 0, "mkbootimg of %s image failed" % (partition_name,)
|
||||||
|
|
||||||
if (info_dict.get("boot_signer", None) == "true" and
|
if (info_dict.get("boot_signer") == "true" and
|
||||||
info_dict.get("verity_key", None)):
|
info_dict.get("verity_key")):
|
||||||
# Hard-code the path as "/boot" for two-step special recovery image (which
|
# Hard-code the path as "/boot" for two-step special recovery image (which
|
||||||
# will be loaded into /boot during the two-step OTA).
|
# will be loaded into /boot during the two-step OTA).
|
||||||
if two_step_image:
|
if two_step_image:
|
||||||
|
@ -488,7 +488,7 @@ def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
|
||||||
assert p.returncode == 0, "boot_signer of %s image failed" % path
|
assert p.returncode == 0, "boot_signer of %s image failed" % path
|
||||||
|
|
||||||
# Sign the image if vboot is non-empty.
|
# Sign the image if vboot is non-empty.
|
||||||
elif info_dict.get("vboot", None):
|
elif info_dict.get("vboot"):
|
||||||
path = "/" + partition_name
|
path = "/" + partition_name
|
||||||
img_keyblock = tempfile.NamedTemporaryFile()
|
img_keyblock = tempfile.NamedTemporaryFile()
|
||||||
# We have switched from the prebuilt futility binary to using the tool
|
# We have switched from the prebuilt futility binary to using the tool
|
||||||
|
@ -577,9 +577,9 @@ def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
|
||||||
|
|
||||||
|
|
||||||
def Gunzip(in_filename, out_filename):
|
def Gunzip(in_filename, out_filename):
|
||||||
"""Gunzip the given gzip compressed file to a given output file.
|
"""Gunzips the given gzip compressed file to a given output file."""
|
||||||
"""
|
with gzip.open(in_filename, "rb") as in_file, \
|
||||||
with gzip.open(in_filename, "rb") as in_file, open(out_filename, "wb") as out_file:
|
open(out_filename, "wb") as out_file:
|
||||||
shutil.copyfileobj(in_file, out_file)
|
shutil.copyfileobj(in_file, out_file)
|
||||||
|
|
||||||
|
|
||||||
|
@ -733,7 +733,7 @@ def GetKeyPasswords(keylist):
|
||||||
devnull.close()
|
devnull.close()
|
||||||
|
|
||||||
key_passwords.update(PasswordManager().GetPasswords(need_passwords))
|
key_passwords.update(PasswordManager().GetPasswords(need_passwords))
|
||||||
key_passwords.update(dict.fromkeys(no_passwords, None))
|
key_passwords.update(dict.fromkeys(no_passwords))
|
||||||
return key_passwords
|
return key_passwords
|
||||||
|
|
||||||
|
|
||||||
|
@ -798,8 +798,7 @@ def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
|
||||||
|
|
||||||
|
|
||||||
def SignFile(input_name, output_name, key, password, min_api_level=None,
|
def SignFile(input_name, output_name, key, password, min_api_level=None,
|
||||||
codename_to_api_level_map=dict(),
|
codename_to_api_level_map=None, whole_file=False):
|
||||||
whole_file=False):
|
|
||||||
"""Sign the input_name zip/jar/apk, producing output_name. Use the
|
"""Sign the input_name zip/jar/apk, producing output_name. Use the
|
||||||
given key and password (the latter may be None if the key does not
|
given key and password (the latter may be None if the key does not
|
||||||
have a password.
|
have a password.
|
||||||
|
@ -815,6 +814,8 @@ def SignFile(input_name, output_name, key, password, min_api_level=None,
|
||||||
codename_to_api_level_map is needed to translate the codename which may be
|
codename_to_api_level_map is needed to translate the codename which may be
|
||||||
encountered as the APK's minSdkVersion.
|
encountered as the APK's minSdkVersion.
|
||||||
"""
|
"""
|
||||||
|
if codename_to_api_level_map is None:
|
||||||
|
codename_to_api_level_map = {}
|
||||||
|
|
||||||
java_library_path = os.path.join(
|
java_library_path = os.path.join(
|
||||||
OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
|
OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
|
||||||
|
@ -876,7 +877,7 @@ def CheckSize(data, target, info_dict):
|
||||||
device = p.device
|
device = p.device
|
||||||
if "/" in device:
|
if "/" in device:
|
||||||
device = device[device.rfind("/")+1:]
|
device = device[device.rfind("/")+1:]
|
||||||
limit = info_dict.get(device + "_size", None)
|
limit = info_dict.get(device + "_size")
|
||||||
if not fs_type or not limit:
|
if not fs_type or not limit:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -1106,8 +1107,8 @@ def Cleanup():
|
||||||
|
|
||||||
class PasswordManager(object):
|
class PasswordManager(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.editor = os.getenv("EDITOR", None)
|
self.editor = os.getenv("EDITOR")
|
||||||
self.pwfile = os.getenv("ANDROID_PW_FILE", None)
|
self.pwfile = os.getenv("ANDROID_PW_FILE")
|
||||||
|
|
||||||
def GetPasswords(self, items):
|
def GetPasswords(self, items):
|
||||||
"""Get passwords corresponding to each string in 'items',
|
"""Get passwords corresponding to each string in 'items',
|
||||||
|
@ -1357,7 +1358,7 @@ class DeviceSpecificParams(object):
|
||||||
module does not define the function, return the value of the
|
module does not define the function, return the value of the
|
||||||
'default' kwarg (which itself defaults to None)."""
|
'default' kwarg (which itself defaults to None)."""
|
||||||
if self.module is None or not hasattr(self.module, function_name):
|
if self.module is None or not hasattr(self.module, function_name):
|
||||||
return kwargs.get("default", None)
|
return kwargs.get("default")
|
||||||
return getattr(self.module, function_name)(*((self,) + args), **kwargs)
|
return getattr(self.module, function_name)(*((self,) + args), **kwargs)
|
||||||
|
|
||||||
def FullOTA_Assertions(self):
|
def FullOTA_Assertions(self):
|
||||||
|
@ -1407,8 +1408,9 @@ class DeviceSpecificParams(object):
|
||||||
def VerifyOTA_Assertions(self):
|
def VerifyOTA_Assertions(self):
|
||||||
return self._DoCall("VerifyOTA_Assertions")
|
return self._DoCall("VerifyOTA_Assertions")
|
||||||
|
|
||||||
|
|
||||||
class File(object):
|
class File(object):
|
||||||
def __init__(self, name, data, compress_size = None):
|
def __init__(self, name, data, compress_size=None):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.data = data
|
self.data = data
|
||||||
self.size = len(data)
|
self.size = len(data)
|
||||||
|
@ -1435,6 +1437,7 @@ class File(object):
|
||||||
def AddToZip(self, z, compression=None):
|
def AddToZip(self, z, compression=None):
|
||||||
ZipWriteStr(z, self.name, self.data, compress_type=compression)
|
ZipWriteStr(z, self.name, self.data, compress_type=compression)
|
||||||
|
|
||||||
|
|
||||||
DIFF_PROGRAM_BY_EXT = {
|
DIFF_PROGRAM_BY_EXT = {
|
||||||
".gz" : "imgdiff",
|
".gz" : "imgdiff",
|
||||||
".zip" : ["imgdiff", "-z"],
|
".zip" : ["imgdiff", "-z"],
|
||||||
|
@ -1443,6 +1446,7 @@ DIFF_PROGRAM_BY_EXT = {
|
||||||
".img" : "imgdiff",
|
".img" : "imgdiff",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class Difference(object):
|
class Difference(object):
|
||||||
def __init__(self, tf, sf, diff_program=None):
|
def __init__(self, tf, sf, diff_program=None):
|
||||||
self.tf = tf
|
self.tf = tf
|
||||||
|
@ -1510,9 +1514,11 @@ class Difference(object):
|
||||||
|
|
||||||
|
|
||||||
def GetPatch(self):
|
def GetPatch(self):
|
||||||
"""Return a tuple (target_file, source_file, patch_data).
|
"""Returns a tuple of (target_file, source_file, patch_data).
|
||||||
|
|
||||||
patch_data may be None if ComputePatch hasn't been called, or if
|
patch_data may be None if ComputePatch hasn't been called, or if
|
||||||
computing the patch failed."""
|
computing the patch failed.
|
||||||
|
"""
|
||||||
return self.tf, self.sf, self.patch
|
return self.tf, self.sf, self.patch
|
||||||
|
|
||||||
|
|
||||||
|
@ -1544,7 +1550,8 @@ def ComputeDifferences(diffs):
|
||||||
else:
|
else:
|
||||||
name = "%s (%s)" % (tf.name, sf.name)
|
name = "%s (%s)" % (tf.name, sf.name)
|
||||||
if patch is None:
|
if patch is None:
|
||||||
print("patching failed! %s" % (name,))
|
print(
|
||||||
|
"patching failed! %s" % (name,))
|
||||||
else:
|
else:
|
||||||
print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
|
print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
|
||||||
dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
|
dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
|
||||||
|
@ -1597,7 +1604,8 @@ class BlockDifference(object):
|
||||||
def required_cache(self):
|
def required_cache(self):
|
||||||
return self._required_cache
|
return self._required_cache
|
||||||
|
|
||||||
def WriteScript(self, script, output_zip, progress=None):
|
def WriteScript(self, script, output_zip, progress=None,
|
||||||
|
write_verify_script=False):
|
||||||
if not self.src:
|
if not self.src:
|
||||||
# write the output unconditionally
|
# write the output unconditionally
|
||||||
script.Print("Patching %s image unconditionally..." % (self.partition,))
|
script.Print("Patching %s image unconditionally..." % (self.partition,))
|
||||||
|
@ -1607,7 +1615,8 @@ class BlockDifference(object):
|
||||||
if progress:
|
if progress:
|
||||||
script.ShowProgress(progress, 0)
|
script.ShowProgress(progress, 0)
|
||||||
self._WriteUpdate(script, output_zip)
|
self._WriteUpdate(script, output_zip)
|
||||||
if OPTIONS.verify:
|
|
||||||
|
if write_verify_script:
|
||||||
self._WritePostInstallVerifyScript(script)
|
self._WritePostInstallVerifyScript(script)
|
||||||
|
|
||||||
def WriteStrictVerifyScript(self, script):
|
def WriteStrictVerifyScript(self, script):
|
||||||
|
@ -1621,12 +1630,12 @@ class BlockDifference(object):
|
||||||
script.Print("Verifying %s..." % (partition,))
|
script.Print("Verifying %s..." % (partition,))
|
||||||
ranges = self.tgt.care_map
|
ranges = self.tgt.care_map
|
||||||
ranges_str = ranges.to_string_raw()
|
ranges_str = ranges.to_string_raw()
|
||||||
script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
|
script.AppendExtra(
|
||||||
'ui_print(" Verified.") || '
|
'range_sha1("%s", "%s") == "%s" && ui_print(" Verified.") || '
|
||||||
'ui_print("\\"%s\\" has unexpected contents.");' % (
|
'ui_print("\\"%s\\" has unexpected contents.");' % (
|
||||||
self.device, ranges_str,
|
self.device, ranges_str,
|
||||||
self.tgt.TotalSha1(include_clobbered_blocks=True),
|
self.tgt.TotalSha1(include_clobbered_blocks=True),
|
||||||
self.device))
|
self.device))
|
||||||
script.AppendExtra("")
|
script.AppendExtra("")
|
||||||
|
|
||||||
def WriteVerifyScript(self, script, touched_blocks_only=False):
|
def WriteVerifyScript(self, script, touched_blocks_only=False):
|
||||||
|
@ -1650,12 +1659,12 @@ class BlockDifference(object):
|
||||||
return
|
return
|
||||||
|
|
||||||
ranges_str = ranges.to_string_raw()
|
ranges_str = ranges.to_string_raw()
|
||||||
script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
|
script.AppendExtra(
|
||||||
'block_image_verify("%s", '
|
'if (range_sha1("%s", "%s") == "%s" || block_image_verify("%s", '
|
||||||
'package_extract_file("%s.transfer.list"), '
|
'package_extract_file("%s.transfer.list"), "%s.new.dat", '
|
||||||
'"%s.new.dat", "%s.patch.dat")) then') % (
|
'"%s.patch.dat")) then' % (
|
||||||
self.device, ranges_str, expected_sha1,
|
self.device, ranges_str, expected_sha1,
|
||||||
self.device, partition, partition, partition))
|
self.device, partition, partition, partition))
|
||||||
script.Print('Verified %s image...' % (partition,))
|
script.Print('Verified %s image...' % (partition,))
|
||||||
script.AppendExtra('else')
|
script.AppendExtra('else')
|
||||||
|
|
||||||
|
@ -1705,17 +1714,19 @@ class BlockDifference(object):
|
||||||
# Unlike pre-install verification, clobbered_blocks should not be ignored.
|
# Unlike pre-install verification, clobbered_blocks should not be ignored.
|
||||||
ranges = self.tgt.care_map
|
ranges = self.tgt.care_map
|
||||||
ranges_str = ranges.to_string_raw()
|
ranges_str = ranges.to_string_raw()
|
||||||
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
|
script.AppendExtra(
|
||||||
self.device, ranges_str,
|
'if range_sha1("%s", "%s") == "%s" then' % (
|
||||||
self.tgt.TotalSha1(include_clobbered_blocks=True)))
|
self.device, ranges_str,
|
||||||
|
self.tgt.TotalSha1(include_clobbered_blocks=True)))
|
||||||
|
|
||||||
# Bug: 20881595
|
# Bug: 20881595
|
||||||
# Verify that extended blocks are really zeroed out.
|
# Verify that extended blocks are really zeroed out.
|
||||||
if self.tgt.extended:
|
if self.tgt.extended:
|
||||||
ranges_str = self.tgt.extended.to_string_raw()
|
ranges_str = self.tgt.extended.to_string_raw()
|
||||||
script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
|
script.AppendExtra(
|
||||||
self.device, ranges_str,
|
'if range_sha1("%s", "%s") == "%s" then' % (
|
||||||
self._HashZeroBlocks(self.tgt.extended.size())))
|
self.device, ranges_str,
|
||||||
|
self._HashZeroBlocks(self.tgt.extended.size())))
|
||||||
script.Print('Verified the updated %s image.' % (partition,))
|
script.Print('Verified the updated %s image.' % (partition,))
|
||||||
if partition == "system":
|
if partition == "system":
|
||||||
code = ErrorCode.SYSTEM_NONZERO_CONTENTS
|
code = ErrorCode.SYSTEM_NONZERO_CONTENTS
|
||||||
|
@ -1745,9 +1756,9 @@ class BlockDifference(object):
|
||||||
'{}.transfer.list'.format(self.path),
|
'{}.transfer.list'.format(self.path),
|
||||||
'{}.transfer.list'.format(self.partition))
|
'{}.transfer.list'.format(self.partition))
|
||||||
|
|
||||||
# For full OTA, compress the new.dat with brotli with quality 6 to reduce its size. Quailty 9
|
# For full OTA, compress the new.dat with brotli with quality 6 to reduce
|
||||||
# almost triples the compression time but doesn't further reduce the size too much.
|
# its size. Quailty 9 almost triples the compression time but doesn't
|
||||||
# For a typical 1.8G system.new.dat
|
# further reduce the size too much. For a typical 1.8G system.new.dat
|
||||||
# zip | brotli(quality 6) | brotli(quality 9)
|
# zip | brotli(quality 6) | brotli(quality 9)
|
||||||
# compressed_size: 942M | 869M (~8% reduced) | 854M
|
# compressed_size: 942M | 869M (~8% reduced) | 854M
|
||||||
# compression_time: 75s | 265s | 719s
|
# compression_time: 75s | 265s | 719s
|
||||||
|
@ -1812,6 +1823,7 @@ class BlockDifference(object):
|
||||||
|
|
||||||
DataImage = blockimgdiff.DataImage
|
DataImage = blockimgdiff.DataImage
|
||||||
|
|
||||||
|
|
||||||
# map recovery.fstab's fs_types to mount/format "partition types"
|
# map recovery.fstab's fs_types to mount/format "partition types"
|
||||||
PARTITION_TYPES = {
|
PARTITION_TYPES = {
|
||||||
"ext4": "EMMC",
|
"ext4": "EMMC",
|
||||||
|
@ -1820,6 +1832,7 @@ PARTITION_TYPES = {
|
||||||
"squashfs": "EMMC"
|
"squashfs": "EMMC"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def GetTypeAndDevice(mount_point, info):
|
def GetTypeAndDevice(mount_point, info):
|
||||||
fstab = info["fstab"]
|
fstab = info["fstab"]
|
||||||
if fstab:
|
if fstab:
|
||||||
|
|
|
@ -132,8 +132,8 @@ class EdifyGenerator(object):
|
||||||
self.script.append(
|
self.script.append(
|
||||||
('(!less_than_int(%s, getprop("ro.build.date.utc"))) || '
|
('(!less_than_int(%s, getprop("ro.build.date.utc"))) || '
|
||||||
'abort("E%d: Can\'t install this package (%s) over newer '
|
'abort("E%d: Can\'t install this package (%s) over newer '
|
||||||
'build (" + getprop("ro.build.date") + ").");') % (timestamp,
|
'build (" + getprop("ro.build.date") + ").");') % (
|
||||||
common.ErrorCode.OLDER_BUILD, timestamp_text))
|
timestamp, common.ErrorCode.OLDER_BUILD, timestamp_text))
|
||||||
|
|
||||||
def AssertDevice(self, device):
|
def AssertDevice(self, device):
|
||||||
"""Assert that the device identifier is the given string."""
|
"""Assert that the device identifier is the given string."""
|
||||||
|
@ -260,8 +260,8 @@ class EdifyGenerator(object):
|
||||||
cmd.append(',\0%s,\0package_extract_file("%s")' % patchpairs[i:i+2])
|
cmd.append(',\0%s,\0package_extract_file("%s")' % patchpairs[i:i+2])
|
||||||
cmd.append(') ||\n abort("E%d: Failed to apply patch to %s");' % (
|
cmd.append(') ||\n abort("E%d: Failed to apply patch to %s");' % (
|
||||||
common.ErrorCode.APPLY_PATCH_FAILURE, srcfile))
|
common.ErrorCode.APPLY_PATCH_FAILURE, srcfile))
|
||||||
cmd = "".join(cmd)
|
cmd_str = "".join(cmd)
|
||||||
self.script.append(self.WordWrap(cmd))
|
self.script.append(self.WordWrap(cmd_str))
|
||||||
|
|
||||||
def WriteRawImage(self, mount_point, fn, mapfn=None):
|
def WriteRawImage(self, mount_point, fn, mapfn=None):
|
||||||
"""Write the given package file into the partition for the given
|
"""Write the given package file into the partition for the given
|
||||||
|
|
|
@ -28,17 +28,17 @@ Usage: img_from_target_files [flags] input_target_files output_image_zip
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
|
import zipfile
|
||||||
|
|
||||||
|
import common
|
||||||
|
|
||||||
if sys.hexversion < 0x02070000:
|
if sys.hexversion < 0x02070000:
|
||||||
print("Python 2.7 or newer is required.", file=sys.stderr)
|
print("Python 2.7 or newer is required.", file=sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import zipfile
|
|
||||||
|
|
||||||
import common
|
|
||||||
|
|
||||||
OPTIONS = common.OPTIONS
|
OPTIONS = common.OPTIONS
|
||||||
|
|
||||||
|
@ -51,11 +51,12 @@ def CopyInfo(output_zip):
|
||||||
|
|
||||||
|
|
||||||
def main(argv):
|
def main(argv):
|
||||||
bootable_only = [False]
|
# This allows modifying the value from inner function.
|
||||||
|
bootable_only_array = [False]
|
||||||
|
|
||||||
def option_handler(o, _):
|
def option_handler(o, _):
|
||||||
if o in ("-z", "--bootable_zip"):
|
if o in ("-z", "--bootable_zip"):
|
||||||
bootable_only[0] = True
|
bootable_only_array[0] = True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
@ -65,7 +66,7 @@ def main(argv):
|
||||||
extra_long_opts=["bootable_zip"],
|
extra_long_opts=["bootable_zip"],
|
||||||
extra_option_handler=option_handler)
|
extra_option_handler=option_handler)
|
||||||
|
|
||||||
bootable_only = bootable_only[0]
|
bootable_only = bootable_only_array[0]
|
||||||
|
|
||||||
if len(args) != 2:
|
if len(args) != 2:
|
||||||
common.Usage(__doc__)
|
common.Usage(__doc__)
|
||||||
|
|
|
@ -833,7 +833,8 @@ else if get_stage("%(bcb_dev)s") == "3/3" then
|
||||||
allow_shared_blocks)
|
allow_shared_blocks)
|
||||||
system_tgt.ResetFileMap()
|
system_tgt.ResetFileMap()
|
||||||
system_diff = common.BlockDifference("system", system_tgt, src=None)
|
system_diff = common.BlockDifference("system", system_tgt, src=None)
|
||||||
system_diff.WriteScript(script, output_zip)
|
system_diff.WriteScript(script, output_zip,
|
||||||
|
write_verify_script=OPTIONS.verify)
|
||||||
|
|
||||||
boot_img = common.GetBootableImage(
|
boot_img = common.GetBootableImage(
|
||||||
"boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
|
"boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
|
||||||
|
@ -845,7 +846,8 @@ else if get_stage("%(bcb_dev)s") == "3/3" then
|
||||||
allow_shared_blocks)
|
allow_shared_blocks)
|
||||||
vendor_tgt.ResetFileMap()
|
vendor_tgt.ResetFileMap()
|
||||||
vendor_diff = common.BlockDifference("vendor", vendor_tgt)
|
vendor_diff = common.BlockDifference("vendor", vendor_tgt)
|
||||||
vendor_diff.WriteScript(script, output_zip)
|
vendor_diff.WriteScript(script, output_zip,
|
||||||
|
write_verify_script=OPTIONS.verify)
|
||||||
|
|
||||||
AddCompatibilityArchiveIfTrebleEnabled(input_zip, output_zip, target_info)
|
AddCompatibilityArchiveIfTrebleEnabled(input_zip, output_zip, target_info)
|
||||||
|
|
||||||
|
@ -1565,10 +1567,12 @@ else
|
||||||
device_specific.IncrementalOTA_InstallBegin()
|
device_specific.IncrementalOTA_InstallBegin()
|
||||||
|
|
||||||
system_diff.WriteScript(script, output_zip,
|
system_diff.WriteScript(script, output_zip,
|
||||||
progress=0.8 if vendor_diff else 0.9)
|
progress=0.8 if vendor_diff else 0.9,
|
||||||
|
write_verify_script=OPTIONS.verify)
|
||||||
|
|
||||||
if vendor_diff:
|
if vendor_diff:
|
||||||
vendor_diff.WriteScript(script, output_zip, progress=0.1)
|
vendor_diff.WriteScript(script, output_zip, progress=0.1,
|
||||||
|
write_verify_script=OPTIONS.verify)
|
||||||
|
|
||||||
if OPTIONS.two_step:
|
if OPTIONS.two_step:
|
||||||
common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
|
common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
|
||||||
|
|
|
@ -62,7 +62,7 @@ confidence=
|
||||||
# --enable=similarities". If you want to run only the classes checker, but have
|
# --enable=similarities". If you want to run only the classes checker, but have
|
||||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
# no Warning level messages displayed, use"--disable=all --enable=classes
|
||||||
# --disable=W"
|
# --disable=W"
|
||||||
disable=invalid-name,missing-docstring,too-many-branches,too-many-locals,too-many-arguments,too-many-statements,duplicate-code,too-few-public-methods,too-many-instance-attributes,too-many-lines,too-many-public-methods,locally-disabled,fixme
|
disable=invalid-name,missing-docstring,too-many-branches,too-many-locals,too-many-arguments,too-many-statements,duplicate-code,too-few-public-methods,too-many-instance-attributes,too-many-lines,too-many-public-methods,locally-disabled,fixme,not-callable
|
||||||
|
|
||||||
|
|
||||||
[REPORTS]
|
[REPORTS]
|
||||||
|
|
Loading…
Reference in New Issue