2009-04-03 03:14:19 +08:00
|
|
|
#!/usr/bin/env python
|
|
|
|
#
|
|
|
|
# Copyright (C) 2008 The Android Open Source Project
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
"""
|
|
|
|
Given a target-files zipfile, produces an OTA package that installs
|
|
|
|
that build. An incremental OTA is produced if -i is given, otherwise
|
|
|
|
a full OTA is produced.
|
|
|
|
|
|
|
|
Usage: ota_from_target_files [flags] input_target_files output_ota_package
|
|
|
|
|
|
|
|
-b (--board_config) <file>
|
2009-08-04 08:27:48 +08:00
|
|
|
Deprecated.
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2011-09-23 01:28:04 +08:00
|
|
|
-k (--package_key) <key> Key to use to sign the package (default is
|
|
|
|
the value of default_system_dev_certificate from the input
|
|
|
|
target-files's META/misc_info.txt, or
|
|
|
|
"build/target/product/security/testkey" if that value is not
|
|
|
|
specified).
|
|
|
|
|
|
|
|
For incremental OTAs, the default value is based on the source
|
|
|
|
target-file, not the target build.
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
-i (--incremental_from) <file>
|
|
|
|
Generate an incremental OTA using the given target-files zip as
|
|
|
|
the starting build.
|
|
|
|
|
2009-04-22 08:12:54 +08:00
|
|
|
-w (--wipe_user_data)
|
|
|
|
Generate an OTA package that will wipe the user data partition
|
|
|
|
when installed.
|
|
|
|
|
2009-04-24 02:41:58 +08:00
|
|
|
-n (--no_prereq)
|
|
|
|
Omit the timestamp prereq check normally included at the top of
|
|
|
|
the build scripts (used for developer OTA packages which
|
|
|
|
legitimately need to go back and forth).
|
|
|
|
|
2009-05-15 10:06:36 +08:00
|
|
|
-e (--extra_script) <file>
|
|
|
|
Insert the contents of file at the end of the update script.
|
|
|
|
|
2010-08-27 05:35:16 +08:00
|
|
|
-a (--aslr_mode) <on|off>
|
|
|
|
Specify whether to turn on ASLR for the package (on by default).
|
2009-04-03 03:14:19 +08:00
|
|
|
"""
|
|
|
|
|
|
|
|
import sys
|
|
|
|
|
|
|
|
if sys.hexversion < 0x02040000:
|
|
|
|
print >> sys.stderr, "Python 2.4 or newer is required."
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
import copy
|
2009-10-01 00:20:32 +08:00
|
|
|
import errno
|
2009-04-03 03:14:19 +08:00
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import subprocess
|
|
|
|
import tempfile
|
|
|
|
import time
|
|
|
|
import zipfile
|
|
|
|
|
2011-03-15 22:21:38 +08:00
|
|
|
try:
|
|
|
|
from hashlib import sha1 as sha1
|
|
|
|
except ImportError:
|
|
|
|
from sha import sha as sha1
|
|
|
|
|
2009-04-03 03:14:19 +08:00
|
|
|
import common
|
2009-06-18 23:43:44 +08:00
|
|
|
import edify_generator
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
OPTIONS = common.OPTIONS
|
2011-09-23 01:28:04 +08:00
|
|
|
OPTIONS.package_key = None
|
2009-04-03 03:14:19 +08:00
|
|
|
OPTIONS.incremental_source = None
|
|
|
|
OPTIONS.require_verbatim = set()
|
|
|
|
OPTIONS.prohibit_verbatim = set(("system/build.prop",))
|
|
|
|
OPTIONS.patch_threshold = 0.95
|
2009-04-22 08:12:54 +08:00
|
|
|
OPTIONS.wipe_user_data = False
|
2009-04-24 02:41:58 +08:00
|
|
|
OPTIONS.omit_prereq = False
|
2009-05-15 10:06:36 +08:00
|
|
|
OPTIONS.extra_script = None
|
2010-08-27 05:35:16 +08:00
|
|
|
OPTIONS.aslr_mode = True
|
2009-09-26 01:45:39 +08:00
|
|
|
OPTIONS.worker_threads = 3
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
def MostPopularKey(d, default):
|
|
|
|
"""Given a dict, return the key corresponding to the largest
|
|
|
|
value. Returns 'default' if the dict is empty."""
|
|
|
|
x = [(v, k) for (k, v) in d.iteritems()]
|
|
|
|
if not x: return default
|
|
|
|
x.sort()
|
|
|
|
return x[-1][1]
|
|
|
|
|
|
|
|
|
|
|
|
def IsSymlink(info):
|
|
|
|
"""Return true if the zipfile.ZipInfo object passed in represents a
|
|
|
|
symlink."""
|
|
|
|
return (info.external_attr >> 16) == 0120777
|
|
|
|
|
2010-08-03 01:26:17 +08:00
|
|
|
def IsRegular(info):
|
|
|
|
"""Return true if the zipfile.ZipInfo object passed in represents a
|
|
|
|
symlink."""
|
|
|
|
return (info.external_attr >> 28) == 010
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
class Item:
|
|
|
|
"""Items represent the metadata (user, group, mode) of files and
|
|
|
|
directories in the system image."""
|
|
|
|
ITEMS = {}
|
|
|
|
def __init__(self, name, dir=False):
|
|
|
|
self.name = name
|
|
|
|
self.uid = None
|
|
|
|
self.gid = None
|
|
|
|
self.mode = None
|
|
|
|
self.dir = dir
|
|
|
|
|
|
|
|
if name:
|
|
|
|
self.parent = Item.Get(os.path.dirname(name), dir=True)
|
|
|
|
self.parent.children.append(self)
|
|
|
|
else:
|
|
|
|
self.parent = None
|
|
|
|
if dir:
|
|
|
|
self.children = []
|
|
|
|
|
|
|
|
def Dump(self, indent=0):
|
|
|
|
if self.uid is not None:
|
|
|
|
print "%s%s %d %d %o" % (" "*indent, self.name, self.uid, self.gid, self.mode)
|
|
|
|
else:
|
|
|
|
print "%s%s %s %s %s" % (" "*indent, self.name, self.uid, self.gid, self.mode)
|
|
|
|
if self.dir:
|
|
|
|
print "%s%s" % (" "*indent, self.descendants)
|
|
|
|
print "%s%s" % (" "*indent, self.best_subtree)
|
|
|
|
for i in self.children:
|
|
|
|
i.Dump(indent=indent+1)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def Get(cls, name, dir=False):
|
|
|
|
if name not in cls.ITEMS:
|
|
|
|
cls.ITEMS[name] = Item(name, dir=dir)
|
|
|
|
return cls.ITEMS[name]
|
|
|
|
|
|
|
|
@classmethod
|
2010-03-16 08:52:32 +08:00
|
|
|
def GetMetadata(cls, input_zip):
|
|
|
|
|
|
|
|
try:
|
|
|
|
# See if the target_files contains a record of what the uid,
|
|
|
|
# gid, and mode is supposed to be.
|
|
|
|
output = input_zip.read("META/filesystem_config.txt")
|
|
|
|
except KeyError:
|
|
|
|
# Run the external 'fs_config' program to determine the desired
|
|
|
|
# uid, gid, and mode for every Item object. Note this uses the
|
|
|
|
# one in the client now, which might not be the same as the one
|
|
|
|
# used when this target_files was built.
|
|
|
|
p = common.Run(["fs_config"], stdin=subprocess.PIPE,
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
|
|
suffix = { False: "", True: "/" }
|
|
|
|
input = "".join(["%s%s\n" % (i.name, suffix[i.dir])
|
|
|
|
for i in cls.ITEMS.itervalues() if i.name])
|
2010-03-18 07:39:30 +08:00
|
|
|
output, error = p.communicate(input)
|
2010-03-16 08:52:32 +08:00
|
|
|
assert not error
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
for line in output.split("\n"):
|
|
|
|
if not line: continue
|
|
|
|
name, uid, gid, mode = line.split()
|
2010-03-16 08:52:32 +08:00
|
|
|
i = cls.ITEMS.get(name, None)
|
|
|
|
if i is not None:
|
|
|
|
i.uid = int(uid)
|
|
|
|
i.gid = int(gid)
|
|
|
|
i.mode = int(mode, 8)
|
|
|
|
if i.dir:
|
|
|
|
i.children.sort(key=lambda i: i.name)
|
|
|
|
|
|
|
|
# set metadata for the files generated by this script.
|
|
|
|
i = cls.ITEMS.get("system/recovery-from-boot.p", None)
|
|
|
|
if i: i.uid, i.gid, i.mode = 0, 0, 0644
|
|
|
|
i = cls.ITEMS.get("system/etc/install-recovery.sh", None)
|
|
|
|
if i: i.uid, i.gid, i.mode = 0, 0, 0544
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
def CountChildMetadata(self):
|
|
|
|
"""Count up the (uid, gid, mode) tuples for all children and
|
|
|
|
determine the best strategy for using set_perm_recursive and
|
|
|
|
set_perm to correctly chown/chmod all the files to their desired
|
|
|
|
values. Recursively calls itself for all descendants.
|
|
|
|
|
|
|
|
Returns a dict of {(uid, gid, dmode, fmode): count} counting up
|
|
|
|
all descendants of this node. (dmode or fmode may be None.) Also
|
|
|
|
sets the best_subtree of each directory Item to the (uid, gid,
|
|
|
|
dmode, fmode) tuple that will match the most descendants of that
|
|
|
|
Item.
|
|
|
|
"""
|
|
|
|
|
|
|
|
assert self.dir
|
|
|
|
d = self.descendants = {(self.uid, self.gid, self.mode, None): 1}
|
|
|
|
for i in self.children:
|
|
|
|
if i.dir:
|
|
|
|
for k, v in i.CountChildMetadata().iteritems():
|
|
|
|
d[k] = d.get(k, 0) + v
|
|
|
|
else:
|
|
|
|
k = (i.uid, i.gid, None, i.mode)
|
|
|
|
d[k] = d.get(k, 0) + 1
|
|
|
|
|
|
|
|
# Find the (uid, gid, dmode, fmode) tuple that matches the most
|
|
|
|
# descendants.
|
|
|
|
|
|
|
|
# First, find the (uid, gid) pair that matches the most
|
|
|
|
# descendants.
|
|
|
|
ug = {}
|
|
|
|
for (uid, gid, _, _), count in d.iteritems():
|
|
|
|
ug[(uid, gid)] = ug.get((uid, gid), 0) + count
|
|
|
|
ug = MostPopularKey(ug, (0, 0))
|
|
|
|
|
|
|
|
# Now find the dmode and fmode that match the most descendants
|
|
|
|
# with that (uid, gid), and choose those.
|
|
|
|
best_dmode = (0, 0755)
|
|
|
|
best_fmode = (0, 0644)
|
|
|
|
for k, count in d.iteritems():
|
|
|
|
if k[:2] != ug: continue
|
|
|
|
if k[2] is not None and count >= best_dmode[0]: best_dmode = (count, k[2])
|
|
|
|
if k[3] is not None and count >= best_fmode[0]: best_fmode = (count, k[3])
|
|
|
|
self.best_subtree = ug + (best_dmode[1], best_fmode[1])
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
2009-06-18 23:43:44 +08:00
|
|
|
def SetPermissions(self, script):
|
2009-04-03 03:14:19 +08:00
|
|
|
"""Append set_perm/set_perm_recursive commands to 'script' to
|
|
|
|
set all permissions, users, and groups for the tree of files
|
2009-06-18 23:43:44 +08:00
|
|
|
rooted at 'self'."""
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
self.CountChildMetadata()
|
|
|
|
|
|
|
|
def recurse(item, current):
|
|
|
|
# current is the (uid, gid, dmode, fmode) tuple that the current
|
|
|
|
# item (and all its children) have already been set to. We only
|
|
|
|
# need to issue set_perm/set_perm_recursive commands if we're
|
|
|
|
# supposed to be something different.
|
|
|
|
if item.dir:
|
|
|
|
if current != item.best_subtree:
|
2009-06-18 23:43:44 +08:00
|
|
|
script.SetPermissionsRecursive("/"+item.name, *item.best_subtree)
|
2009-04-03 03:14:19 +08:00
|
|
|
current = item.best_subtree
|
|
|
|
|
|
|
|
if item.uid != current[0] or item.gid != current[1] or \
|
|
|
|
item.mode != current[2]:
|
2009-06-18 23:43:44 +08:00
|
|
|
script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
for i in item.children:
|
|
|
|
recurse(i, current)
|
|
|
|
else:
|
|
|
|
if item.uid != current[0] or item.gid != current[1] or \
|
|
|
|
item.mode != current[3]:
|
2009-06-18 23:43:44 +08:00
|
|
|
script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
recurse(self, (-1, -1, -1, -1))
|
|
|
|
|
|
|
|
|
|
|
|
def CopySystemFiles(input_zip, output_zip=None,
|
|
|
|
substitute=None):
|
|
|
|
"""Copies files underneath system/ in the input zip to the output
|
|
|
|
zip. Populates the Item class with their metadata, and returns a
|
2010-08-03 01:26:17 +08:00
|
|
|
list of symlinks as well as a list of files that will be retouched.
|
|
|
|
output_zip may be None, in which case the copy is skipped (but the
|
|
|
|
other side effects still happen). substitute is an optional dict
|
|
|
|
of {output filename: contents} to be output instead of certain input
|
|
|
|
files.
|
2009-04-03 03:14:19 +08:00
|
|
|
"""
|
|
|
|
|
|
|
|
symlinks = []
|
2010-08-03 01:26:17 +08:00
|
|
|
retouch_files = []
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
for info in input_zip.infolist():
|
|
|
|
if info.filename.startswith("SYSTEM/"):
|
|
|
|
basefilename = info.filename[7:]
|
|
|
|
if IsSymlink(info):
|
|
|
|
symlinks.append((input_zip.read(info.filename),
|
2009-06-18 23:43:44 +08:00
|
|
|
"/system/" + basefilename))
|
2009-04-03 03:14:19 +08:00
|
|
|
else:
|
|
|
|
info2 = copy.copy(info)
|
|
|
|
fn = info2.filename = "system/" + basefilename
|
|
|
|
if substitute and fn in substitute and substitute[fn] is None:
|
|
|
|
continue
|
|
|
|
if output_zip is not None:
|
|
|
|
if substitute and fn in substitute:
|
|
|
|
data = substitute[fn]
|
|
|
|
else:
|
|
|
|
data = input_zip.read(info.filename)
|
2010-08-03 01:26:17 +08:00
|
|
|
if info.filename.startswith("SYSTEM/lib/") and IsRegular(info):
|
|
|
|
retouch_files.append(("/system/" + basefilename,
|
2011-01-26 09:03:34 +08:00
|
|
|
common.sha1(data).hexdigest()))
|
2009-04-03 03:14:19 +08:00
|
|
|
output_zip.writestr(info2, data)
|
|
|
|
if fn.endswith("/"):
|
|
|
|
Item.Get(fn[:-1], dir=True)
|
|
|
|
else:
|
|
|
|
Item.Get(fn, dir=False)
|
|
|
|
|
|
|
|
symlinks.sort()
|
2010-08-03 01:26:17 +08:00
|
|
|
return (symlinks, retouch_files)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
|
|
|
|
def SignOutput(temp_zip_name, output_zip_name):
|
|
|
|
key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
|
|
|
|
pw = key_passwords[OPTIONS.package_key]
|
|
|
|
|
2009-08-15 03:44:19 +08:00
|
|
|
common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw,
|
|
|
|
whole_file=True)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
|
2009-06-18 23:43:44 +08:00
|
|
|
def AppendAssertions(script, input_zip):
|
2009-04-03 03:14:19 +08:00
|
|
|
device = GetBuildProp("ro.product.device", input_zip)
|
2009-06-18 23:43:44 +08:00
|
|
|
script.AssertDevice(device)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
|
2010-09-17 05:01:56 +08:00
|
|
|
def MakeRecoveryPatch(output_zip, recovery_img, boot_img):
|
2009-07-24 06:12:53 +08:00
|
|
|
"""Generate a binary patch that creates the recovery image starting
|
|
|
|
with the boot image. (Most of the space in these images is just the
|
|
|
|
kernel, which is identical for the two, so the resulting patch
|
|
|
|
should be efficient.) Add it to the output zip, along with a shell
|
|
|
|
script that is run from init.rc on first boot to actually do the
|
|
|
|
patching and install the new recovery image.
|
|
|
|
|
|
|
|
recovery_img and boot_img should be File objects for the
|
2010-07-08 04:53:32 +08:00
|
|
|
corresponding images. info should be the dictionary returned by
|
|
|
|
common.LoadInfoDict() on the input target_files.
|
2009-07-24 06:12:53 +08:00
|
|
|
|
|
|
|
Returns an Item for the shell script, which must be made
|
|
|
|
executable.
|
|
|
|
"""
|
|
|
|
|
2010-09-13 06:26:16 +08:00
|
|
|
d = common.Difference(recovery_img, boot_img)
|
2009-09-26 01:45:39 +08:00
|
|
|
_, _, patch = d.ComputePatch()
|
2009-10-08 02:35:53 +08:00
|
|
|
common.ZipWriteStr(output_zip, "recovery/recovery-from-boot.p", patch)
|
2009-07-24 06:12:53 +08:00
|
|
|
Item.Get("system/recovery-from-boot.p", dir=False)
|
|
|
|
|
2010-09-27 05:57:41 +08:00
|
|
|
boot_type, boot_device = common.GetTypeAndDevice("/boot", OPTIONS.info_dict)
|
|
|
|
recovery_type, recovery_device = common.GetTypeAndDevice("/recovery", OPTIONS.info_dict)
|
2010-09-21 09:04:41 +08:00
|
|
|
|
2009-07-24 06:12:53 +08:00
|
|
|
sh = """#!/system/bin/sh
|
2011-12-03 02:46:59 +08:00
|
|
|
if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
|
2009-07-24 06:12:53 +08:00
|
|
|
log -t recovery "Installing new recovery image"
|
2010-09-21 09:04:41 +08:00
|
|
|
applypatch %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p
|
2009-07-24 06:12:53 +08:00
|
|
|
else
|
|
|
|
log -t recovery "Recovery image already installed"
|
|
|
|
fi
|
|
|
|
""" % { 'boot_size': boot_img.size,
|
|
|
|
'boot_sha1': boot_img.sha1,
|
|
|
|
'recovery_size': recovery_img.size,
|
2010-07-08 04:53:32 +08:00
|
|
|
'recovery_sha1': recovery_img.sha1,
|
2010-09-21 09:04:41 +08:00
|
|
|
'boot_type': boot_type,
|
|
|
|
'boot_device': boot_device,
|
|
|
|
'recovery_type': recovery_type,
|
|
|
|
'recovery_device': recovery_device,
|
2010-07-08 04:53:32 +08:00
|
|
|
}
|
2009-10-08 02:35:53 +08:00
|
|
|
common.ZipWriteStr(output_zip, "recovery/etc/install-recovery.sh", sh)
|
2009-07-24 06:12:53 +08:00
|
|
|
return Item.Get("system/etc/install-recovery.sh", dir=False)
|
|
|
|
|
|
|
|
|
2010-09-17 02:28:43 +08:00
|
|
|
def WriteFullOTAPackage(input_zip, output_zip):
|
2010-04-22 05:08:44 +08:00
|
|
|
# TODO: how to determine this? We don't know what version it will
|
|
|
|
# be installed on top of. For now, we expect the API just won't
|
|
|
|
# change very often.
|
2010-09-17 02:28:43 +08:00
|
|
|
script = edify_generator.EdifyGenerator(3, OPTIONS.info_dict)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2010-04-29 07:05:21 +08:00
|
|
|
metadata = {"post-build": GetBuildProp("ro.build.fingerprint", input_zip),
|
|
|
|
"pre-device": GetBuildProp("ro.product.device", input_zip),
|
2010-06-22 06:30:45 +08:00
|
|
|
"post-timestamp": GetBuildProp("ro.build.date.utc", input_zip),
|
2010-04-29 07:05:21 +08:00
|
|
|
}
|
|
|
|
|
2009-06-23 02:32:31 +08:00
|
|
|
device_specific = common.DeviceSpecificParams(
|
|
|
|
input_zip=input_zip,
|
2010-09-17 08:44:38 +08:00
|
|
|
input_version=OPTIONS.info_dict["recovery_api_version"],
|
2009-06-23 02:32:31 +08:00
|
|
|
output_zip=output_zip,
|
|
|
|
script=script,
|
2010-04-29 07:05:21 +08:00
|
|
|
input_tmp=OPTIONS.input_tmp,
|
2010-09-27 05:57:41 +08:00
|
|
|
metadata=metadata,
|
|
|
|
info_dict=OPTIONS.info_dict)
|
2009-06-23 02:32:31 +08:00
|
|
|
|
2009-04-24 02:41:58 +08:00
|
|
|
if not OPTIONS.omit_prereq:
|
|
|
|
ts = GetBuildProp("ro.build.date.utc", input_zip)
|
2009-06-18 23:43:44 +08:00
|
|
|
script.AssertOlderBuild(ts)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
AppendAssertions(script, input_zip)
|
2009-06-23 02:32:31 +08:00
|
|
|
device_specific.FullOTA_Assertions()
|
2009-06-16 13:36:37 +08:00
|
|
|
|
2009-06-18 23:43:44 +08:00
|
|
|
script.ShowProgress(0.5, 0)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2009-04-22 08:12:54 +08:00
|
|
|
if OPTIONS.wipe_user_data:
|
2010-09-21 09:04:41 +08:00
|
|
|
script.FormatPartition("/data")
|
2009-04-22 08:12:54 +08:00
|
|
|
|
2010-09-21 09:04:41 +08:00
|
|
|
script.FormatPartition("/system")
|
|
|
|
script.Mount("/system")
|
2009-10-08 02:35:53 +08:00
|
|
|
script.UnpackPackageDir("recovery", "/system")
|
2009-06-18 23:43:44 +08:00
|
|
|
script.UnpackPackageDir("system", "/system")
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2010-08-03 01:26:17 +08:00
|
|
|
(symlinks, retouch_files) = CopySystemFiles(input_zip, output_zip)
|
2009-06-18 23:43:44 +08:00
|
|
|
script.MakeSymlinks(symlinks)
|
2010-08-03 01:26:17 +08:00
|
|
|
if OPTIONS.aslr_mode:
|
|
|
|
script.RetouchBinaries(retouch_files)
|
|
|
|
else:
|
|
|
|
script.UndoRetouchBinaries(retouch_files)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2011-01-26 09:03:34 +08:00
|
|
|
boot_img = common.GetBootableImage("boot.img", "boot.img",
|
|
|
|
OPTIONS.input_tmp, "BOOT")
|
|
|
|
recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
|
|
|
|
OPTIONS.input_tmp, "RECOVERY")
|
2010-09-17 05:01:56 +08:00
|
|
|
MakeRecoveryPatch(output_zip, recovery_img, boot_img)
|
2009-07-24 06:12:53 +08:00
|
|
|
|
2010-03-16 08:52:32 +08:00
|
|
|
Item.GetMetadata(input_zip)
|
2009-07-24 06:12:53 +08:00
|
|
|
Item.Get("system").SetPermissions(script)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2010-09-17 08:44:38 +08:00
|
|
|
common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict)
|
2009-07-24 06:12:53 +08:00
|
|
|
common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
|
2009-06-18 23:43:44 +08:00
|
|
|
script.ShowProgress(0.2, 0)
|
|
|
|
|
|
|
|
script.ShowProgress(0.2, 10)
|
2010-09-21 09:04:41 +08:00
|
|
|
script.WriteRawImage("/boot", "boot.img")
|
2009-06-23 02:32:31 +08:00
|
|
|
|
|
|
|
script.ShowProgress(0.1, 0)
|
|
|
|
device_specific.FullOTA_InstallEnd()
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2009-05-15 10:06:36 +08:00
|
|
|
if OPTIONS.extra_script is not None:
|
2009-06-18 23:43:44 +08:00
|
|
|
script.AppendExtra(OPTIONS.extra_script)
|
2009-05-15 10:06:36 +08:00
|
|
|
|
2010-02-03 05:12:04 +08:00
|
|
|
script.UnmountAll()
|
2009-06-18 23:43:44 +08:00
|
|
|
script.AddToZip(input_zip, output_zip)
|
2010-04-29 07:05:21 +08:00
|
|
|
WriteMetadata(metadata, output_zip)
|
|
|
|
|
|
|
|
|
|
|
|
def WriteMetadata(metadata, output_zip):
|
|
|
|
common.ZipWriteStr(output_zip, "META-INF/com/android/metadata",
|
|
|
|
"".join(["%s=%s\n" % kv
|
|
|
|
for kv in sorted(metadata.iteritems())]))
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def LoadSystemFiles(z):
|
|
|
|
"""Load all the files from SYSTEM/... in a given target-files
|
|
|
|
ZipFile, and return a dict of {filename: File object}."""
|
|
|
|
out = {}
|
2010-08-03 01:26:17 +08:00
|
|
|
retouch_files = []
|
2009-04-03 03:14:19 +08:00
|
|
|
for info in z.infolist():
|
|
|
|
if info.filename.startswith("SYSTEM/") and not IsSymlink(info):
|
2010-08-03 01:26:17 +08:00
|
|
|
basefilename = info.filename[7:]
|
|
|
|
fn = "system/" + basefilename
|
2009-04-03 03:14:19 +08:00
|
|
|
data = z.read(info.filename)
|
2010-09-13 06:26:16 +08:00
|
|
|
out[fn] = common.File(fn, data)
|
2010-08-03 01:26:17 +08:00
|
|
|
if info.filename.startswith("SYSTEM/lib/") and IsRegular(info):
|
|
|
|
retouch_files.append(("/system/" + basefilename,
|
|
|
|
out[fn].sha1))
|
|
|
|
return (out, retouch_files)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
|
|
|
|
def GetBuildProp(property, z):
|
|
|
|
"""Return the fingerprint of the build of a given target-files
|
|
|
|
ZipFile object."""
|
|
|
|
bp = z.read("SYSTEM/build.prop")
|
|
|
|
if not property:
|
|
|
|
return bp
|
|
|
|
m = re.search(re.escape(property) + r"=(.*)\n", bp)
|
|
|
|
if not m:
|
2009-06-24 07:27:38 +08:00
|
|
|
raise common.ExternalError("couldn't find %s in build.prop" % (property,))
|
2009-04-03 03:14:19 +08:00
|
|
|
return m.group(1).strip()
|
|
|
|
|
|
|
|
|
2010-09-17 02:28:43 +08:00
|
|
|
def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
|
2010-09-17 08:44:38 +08:00
|
|
|
source_version = OPTIONS.source_info_dict["recovery_api_version"]
|
|
|
|
target_version = OPTIONS.target_info_dict["recovery_api_version"]
|
2009-06-18 23:43:44 +08:00
|
|
|
|
2010-04-22 05:08:44 +08:00
|
|
|
if source_version == 0:
|
|
|
|
print ("WARNING: generating edify script for a source that "
|
|
|
|
"can't install it.")
|
2010-09-17 14:13:11 +08:00
|
|
|
script = edify_generator.EdifyGenerator(source_version, OPTIONS.target_info_dict)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2010-04-29 07:05:21 +08:00
|
|
|
metadata = {"pre-device": GetBuildProp("ro.product.device", source_zip),
|
2010-06-22 06:30:45 +08:00
|
|
|
"post-timestamp": GetBuildProp("ro.build.date.utc", target_zip),
|
2010-04-29 07:05:21 +08:00
|
|
|
}
|
|
|
|
|
2009-06-23 02:32:31 +08:00
|
|
|
device_specific = common.DeviceSpecificParams(
|
|
|
|
source_zip=source_zip,
|
2010-02-03 05:12:04 +08:00
|
|
|
source_version=source_version,
|
2009-06-23 02:32:31 +08:00
|
|
|
target_zip=target_zip,
|
2010-02-03 05:12:04 +08:00
|
|
|
target_version=target_version,
|
2009-06-23 02:32:31 +08:00
|
|
|
output_zip=output_zip,
|
2010-04-29 07:05:21 +08:00
|
|
|
script=script,
|
2010-09-27 05:57:41 +08:00
|
|
|
metadata=metadata,
|
|
|
|
info_dict=OPTIONS.info_dict)
|
2009-06-23 02:32:31 +08:00
|
|
|
|
2009-04-03 03:14:19 +08:00
|
|
|
print "Loading target..."
|
2010-08-03 01:26:17 +08:00
|
|
|
(target_data, target_retouch_files) = LoadSystemFiles(target_zip)
|
2009-04-03 03:14:19 +08:00
|
|
|
print "Loading source..."
|
2010-08-03 01:26:17 +08:00
|
|
|
(source_data, source_retouch_files) = LoadSystemFiles(source_zip)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
verbatim_targets = []
|
|
|
|
patch_list = []
|
2009-09-26 01:45:39 +08:00
|
|
|
diffs = []
|
2009-04-03 03:14:19 +08:00
|
|
|
largest_source_size = 0
|
|
|
|
for fn in sorted(target_data.keys()):
|
|
|
|
tf = target_data[fn]
|
2009-09-26 01:45:39 +08:00
|
|
|
assert fn == tf.name
|
2009-04-03 03:14:19 +08:00
|
|
|
sf = source_data.get(fn, None)
|
|
|
|
|
|
|
|
if sf is None or fn in OPTIONS.require_verbatim:
|
|
|
|
# This file should be included verbatim
|
|
|
|
if fn in OPTIONS.prohibit_verbatim:
|
2009-06-24 07:27:38 +08:00
|
|
|
raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
|
2009-04-03 03:14:19 +08:00
|
|
|
print "send", fn, "verbatim"
|
|
|
|
tf.AddToZip(output_zip)
|
|
|
|
verbatim_targets.append((fn, tf.size))
|
|
|
|
elif tf.sha1 != sf.sha1:
|
|
|
|
# File is different; consider sending as a patch
|
2010-09-13 06:26:16 +08:00
|
|
|
diffs.append(common.Difference(tf, sf))
|
2009-04-03 03:14:19 +08:00
|
|
|
else:
|
|
|
|
# Target file identical to source.
|
|
|
|
pass
|
|
|
|
|
2010-09-13 06:26:16 +08:00
|
|
|
common.ComputeDifferences(diffs)
|
2009-09-26 01:45:39 +08:00
|
|
|
|
|
|
|
for diff in diffs:
|
|
|
|
tf, sf, d = diff.GetPatch()
|
|
|
|
if d is None or len(d) > tf.size * OPTIONS.patch_threshold:
|
|
|
|
# patch is almost as big as the file; don't bother patching
|
|
|
|
tf.AddToZip(output_zip)
|
|
|
|
verbatim_targets.append((tf.name, tf.size))
|
|
|
|
else:
|
|
|
|
common.ZipWriteStr(output_zip, "patch/" + tf.name + ".p", d)
|
2011-01-26 09:03:34 +08:00
|
|
|
patch_list.append((tf.name, tf, sf, tf.size, common.sha1(d).hexdigest()))
|
2009-09-26 01:45:39 +08:00
|
|
|
largest_source_size = max(largest_source_size, sf.size)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
source_fp = GetBuildProp("ro.build.fingerprint", source_zip)
|
|
|
|
target_fp = GetBuildProp("ro.build.fingerprint", target_zip)
|
2010-04-29 07:05:21 +08:00
|
|
|
metadata["pre-build"] = source_fp
|
|
|
|
metadata["post-build"] = target_fp
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2010-09-21 09:04:41 +08:00
|
|
|
script.Mount("/system")
|
2009-06-18 23:43:44 +08:00
|
|
|
script.AssertSomeFingerprint(source_fp, target_fp)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2011-01-26 09:03:34 +08:00
|
|
|
source_boot = common.GetBootableImage(
|
|
|
|
"/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT")
|
|
|
|
target_boot = common.GetBootableImage(
|
|
|
|
"/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT")
|
2009-06-03 04:38:17 +08:00
|
|
|
updating_boot = (source_boot.data != target_boot.data)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2011-01-26 09:03:34 +08:00
|
|
|
source_recovery = common.GetBootableImage(
|
|
|
|
"/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY")
|
|
|
|
target_recovery = common.GetBootableImage(
|
|
|
|
"/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
|
2009-05-30 02:41:21 +08:00
|
|
|
updating_recovery = (source_recovery.data != target_recovery.data)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2009-09-21 05:03:55 +08:00
|
|
|
# Here's how we divide up the progress bar:
|
|
|
|
# 0.1 for verifying the start state (PatchCheck calls)
|
|
|
|
# 0.8 for applying patches (ApplyPatch calls)
|
|
|
|
# 0.1 for unpacking verbatim files, symlinking, and doing the
|
|
|
|
# device-specific commands.
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
AppendAssertions(script, target_zip)
|
2009-06-23 02:32:31 +08:00
|
|
|
device_specific.IncrementalOTA_Assertions()
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2009-06-18 23:43:44 +08:00
|
|
|
script.Print("Verifying current system...")
|
|
|
|
|
2009-09-21 05:03:55 +08:00
|
|
|
script.ShowProgress(0.1, 0)
|
|
|
|
total_verify_size = float(sum([i[2].size for i in patch_list]) + 1)
|
|
|
|
if updating_boot:
|
|
|
|
total_verify_size += source_boot.size
|
|
|
|
so_far = 0
|
2009-06-18 23:43:44 +08:00
|
|
|
|
2010-02-18 08:09:18 +08:00
|
|
|
for fn, tf, sf, size, patch_sha in patch_list:
|
2009-06-18 23:43:44 +08:00
|
|
|
script.PatchCheck("/"+fn, tf.sha1, sf.sha1)
|
2009-09-21 05:03:55 +08:00
|
|
|
so_far += sf.size
|
|
|
|
script.SetProgress(so_far / total_verify_size)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2009-06-03 04:38:17 +08:00
|
|
|
if updating_boot:
|
2010-09-13 06:26:16 +08:00
|
|
|
d = common.Difference(target_boot, source_boot)
|
2009-09-26 01:45:39 +08:00
|
|
|
_, _, d = d.ComputePatch()
|
2009-06-03 04:38:17 +08:00
|
|
|
print "boot target: %d source: %d diff: %d" % (
|
|
|
|
target_boot.size, source_boot.size, len(d))
|
|
|
|
|
2009-06-16 05:31:53 +08:00
|
|
|
common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
|
2009-06-03 04:38:17 +08:00
|
|
|
|
2010-09-27 05:57:41 +08:00
|
|
|
boot_type, boot_device = common.GetTypeAndDevice("/boot", OPTIONS.info_dict)
|
2010-09-23 01:12:54 +08:00
|
|
|
|
|
|
|
script.PatchCheck("%s:%s:%d:%s:%d:%s" %
|
|
|
|
(boot_type, boot_device,
|
2010-07-08 04:53:32 +08:00
|
|
|
source_boot.size, source_boot.sha1,
|
2009-06-18 23:43:44 +08:00
|
|
|
target_boot.size, target_boot.sha1))
|
2009-09-21 05:03:55 +08:00
|
|
|
so_far += source_boot.size
|
|
|
|
script.SetProgress(so_far / total_verify_size)
|
2009-06-03 04:38:17 +08:00
|
|
|
|
|
|
|
if patch_list or updating_recovery or updating_boot:
|
2009-06-18 23:43:44 +08:00
|
|
|
script.CacheFreeSpaceCheck(largest_source_size)
|
2010-02-18 08:09:18 +08:00
|
|
|
|
2009-06-23 02:32:31 +08:00
|
|
|
device_specific.IncrementalOTA_VerifyEnd()
|
|
|
|
|
2009-06-18 23:43:44 +08:00
|
|
|
script.Comment("---- start making changes here ----")
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2009-04-22 08:12:54 +08:00
|
|
|
if OPTIONS.wipe_user_data:
|
2009-06-18 23:43:44 +08:00
|
|
|
script.Print("Erasing user data...")
|
2010-09-21 09:04:41 +08:00
|
|
|
script.FormatPartition("/data")
|
2009-04-22 08:12:54 +08:00
|
|
|
|
2009-06-18 23:43:44 +08:00
|
|
|
script.Print("Removing unneeded files...")
|
2009-06-30 23:16:58 +08:00
|
|
|
script.DeleteFiles(["/"+i[0] for i in verbatim_targets] +
|
|
|
|
["/"+i for i in sorted(source_data)
|
2009-08-25 01:24:32 +08:00
|
|
|
if i not in target_data] +
|
|
|
|
["/system/recovery.img"])
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2009-09-21 05:03:55 +08:00
|
|
|
script.ShowProgress(0.8, 0)
|
|
|
|
total_patch_size = float(sum([i[1].size for i in patch_list]) + 1)
|
|
|
|
if updating_boot:
|
|
|
|
total_patch_size += target_boot.size
|
|
|
|
so_far = 0
|
|
|
|
|
|
|
|
script.Print("Patching system files...")
|
2011-08-27 04:46:40 +08:00
|
|
|
deferred_patch_list = []
|
|
|
|
for item in patch_list:
|
|
|
|
fn, tf, sf, size, _ = item
|
|
|
|
if tf.name == "system/build.prop":
|
|
|
|
deferred_patch_list.append(item)
|
|
|
|
continue
|
2010-02-23 07:41:53 +08:00
|
|
|
script.ApplyPatch("/"+fn, "-", tf.size, tf.sha1, sf.sha1, "patch/"+fn+".p")
|
2009-09-21 05:03:55 +08:00
|
|
|
so_far += tf.size
|
|
|
|
script.SetProgress(so_far / total_patch_size)
|
|
|
|
|
2009-04-03 03:14:19 +08:00
|
|
|
if updating_boot:
|
2009-06-03 04:38:17 +08:00
|
|
|
# Produce the boot image by applying a patch to the current
|
|
|
|
# contents of the boot partition, and write it back to the
|
|
|
|
# partition.
|
2009-06-18 23:43:44 +08:00
|
|
|
script.Print("Patching boot image...")
|
2010-09-23 01:12:54 +08:00
|
|
|
script.ApplyPatch("%s:%s:%d:%s:%d:%s"
|
|
|
|
% (boot_type, boot_device,
|
2010-07-08 04:53:32 +08:00
|
|
|
source_boot.size, source_boot.sha1,
|
2009-06-18 23:43:44 +08:00
|
|
|
target_boot.size, target_boot.sha1),
|
|
|
|
"-",
|
|
|
|
target_boot.size, target_boot.sha1,
|
2010-02-23 07:41:53 +08:00
|
|
|
source_boot.sha1, "patch/boot.img.p")
|
2009-09-21 05:03:55 +08:00
|
|
|
so_far += target_boot.size
|
|
|
|
script.SetProgress(so_far / total_patch_size)
|
2009-04-03 03:14:19 +08:00
|
|
|
print "boot image changed; including."
|
|
|
|
else:
|
|
|
|
print "boot image unchanged; skipping."
|
|
|
|
|
|
|
|
if updating_recovery:
|
2009-07-24 06:12:53 +08:00
|
|
|
# Is it better to generate recovery as a patch from the current
|
|
|
|
# boot image, or from the previous recovery image? For large
|
|
|
|
# updates with significant kernel changes, probably the former.
|
|
|
|
# For small updates where the kernel hasn't changed, almost
|
|
|
|
# certainly the latter. We pick the first option. Future
|
|
|
|
# complicated schemes may let us effectively use both.
|
|
|
|
#
|
|
|
|
# A wacky possibility: as long as there is room in the boot
|
|
|
|
# partition, include the binaries and image files from recovery in
|
|
|
|
# the boot image (though not in the ramdisk) so they can be used
|
|
|
|
# as fodder for constructing the recovery image.
|
2010-09-17 05:01:56 +08:00
|
|
|
MakeRecoveryPatch(output_zip, target_recovery, target_boot)
|
2010-02-13 02:21:00 +08:00
|
|
|
script.DeleteFiles(["/system/recovery-from-boot.p",
|
|
|
|
"/system/etc/install-recovery.sh"])
|
2009-07-24 06:12:53 +08:00
|
|
|
print "recovery image changed; including as patch from boot."
|
2009-04-03 03:14:19 +08:00
|
|
|
else:
|
|
|
|
print "recovery image unchanged; skipping."
|
|
|
|
|
2009-09-21 05:03:55 +08:00
|
|
|
script.ShowProgress(0.1, 10)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2010-08-03 01:26:17 +08:00
|
|
|
(target_symlinks, target_retouch_dummies) = CopySystemFiles(target_zip, None)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
|
2009-06-18 23:43:44 +08:00
|
|
|
temp_script = script.MakeTemporary()
|
2010-03-16 08:52:32 +08:00
|
|
|
Item.GetMetadata(target_zip)
|
2009-07-24 06:12:53 +08:00
|
|
|
Item.Get("system").SetPermissions(temp_script)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
# Note that this call will mess up the tree of Items, so make sure
|
|
|
|
# we're done with it.
|
2010-08-03 01:26:17 +08:00
|
|
|
(source_symlinks, source_retouch_dummies) = CopySystemFiles(source_zip, None)
|
2009-04-03 03:14:19 +08:00
|
|
|
source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks])
|
|
|
|
|
|
|
|
# Delete all the symlinks in source that aren't in target. This
|
|
|
|
# needs to happen before verbatim files are unpacked, in case a
|
|
|
|
# symlink in the source is replaced by a real file in the target.
|
|
|
|
to_delete = []
|
|
|
|
for dest, link in source_symlinks:
|
|
|
|
if link not in target_symlinks_d:
|
|
|
|
to_delete.append(link)
|
2009-06-18 23:43:44 +08:00
|
|
|
script.DeleteFiles(to_delete)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
if verbatim_targets:
|
2009-06-18 23:43:44 +08:00
|
|
|
script.Print("Unpacking new files...")
|
|
|
|
script.UnpackPackageDir("system", "/system")
|
|
|
|
|
2010-02-13 02:21:00 +08:00
|
|
|
if updating_recovery:
|
|
|
|
script.Print("Unpacking new recovery...")
|
|
|
|
script.UnpackPackageDir("recovery", "/system")
|
|
|
|
|
2009-06-23 02:32:31 +08:00
|
|
|
script.Print("Symlinks and permissions...")
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
# Create all the symlinks that don't already exist, or point to
|
|
|
|
# somewhere different than what we want. Delete each symlink before
|
|
|
|
# creating it, since the 'symlink' command won't overwrite.
|
|
|
|
to_create = []
|
|
|
|
for dest, link in target_symlinks:
|
|
|
|
if link in source_symlinks_d:
|
|
|
|
if dest != source_symlinks_d[link]:
|
|
|
|
to_create.append((dest, link))
|
|
|
|
else:
|
|
|
|
to_create.append((dest, link))
|
2009-06-18 23:43:44 +08:00
|
|
|
script.DeleteFiles([i[1] for i in to_create])
|
|
|
|
script.MakeSymlinks(to_create)
|
2010-08-03 01:26:17 +08:00
|
|
|
if OPTIONS.aslr_mode:
|
|
|
|
script.RetouchBinaries(target_retouch_files)
|
|
|
|
else:
|
|
|
|
script.UndoRetouchBinaries(target_retouch_files)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
# Now that the symlinks are created, we can set all the
|
|
|
|
# permissions.
|
2009-06-18 23:43:44 +08:00
|
|
|
script.AppendScript(temp_script)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
2009-09-21 05:03:55 +08:00
|
|
|
# Do device-specific installation (eg, write radio image).
|
2009-06-23 02:32:31 +08:00
|
|
|
device_specific.IncrementalOTA_InstallEnd()
|
|
|
|
|
2009-05-15 10:06:36 +08:00
|
|
|
if OPTIONS.extra_script is not None:
|
2010-07-08 04:53:32 +08:00
|
|
|
script.AppendExtra(OPTIONS.extra_script)
|
2009-05-15 10:06:36 +08:00
|
|
|
|
2011-08-27 04:46:40 +08:00
|
|
|
# Patch the build.prop file last, so if something fails but the
|
|
|
|
# device can still come up, it appears to be the old build and will
|
|
|
|
# get set the OTA package again to retry.
|
|
|
|
script.Print("Patching remaining system files...")
|
|
|
|
for item in deferred_patch_list:
|
|
|
|
fn, tf, sf, size, _ = item
|
|
|
|
script.ApplyPatch("/"+fn, "-", tf.size, tf.sha1, sf.sha1, "patch/"+fn+".p")
|
|
|
|
script.SetPermissions("/system/build.prop", 0, 0, 0644)
|
|
|
|
|
2009-06-18 23:43:44 +08:00
|
|
|
script.AddToZip(target_zip, output_zip)
|
2010-04-29 07:05:21 +08:00
|
|
|
WriteMetadata(metadata, output_zip)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
|
|
|
|
def main(argv):
|
|
|
|
|
|
|
|
def option_handler(o, a):
|
|
|
|
if o in ("-b", "--board_config"):
|
2009-08-04 08:27:48 +08:00
|
|
|
pass # deprecated
|
2009-04-03 03:14:19 +08:00
|
|
|
elif o in ("-k", "--package_key"):
|
|
|
|
OPTIONS.package_key = a
|
|
|
|
elif o in ("-i", "--incremental_from"):
|
|
|
|
OPTIONS.incremental_source = a
|
2009-04-22 08:12:54 +08:00
|
|
|
elif o in ("-w", "--wipe_user_data"):
|
|
|
|
OPTIONS.wipe_user_data = True
|
2009-04-24 02:41:58 +08:00
|
|
|
elif o in ("-n", "--no_prereq"):
|
|
|
|
OPTIONS.omit_prereq = True
|
2009-05-15 10:06:36 +08:00
|
|
|
elif o in ("-e", "--extra_script"):
|
|
|
|
OPTIONS.extra_script = a
|
2010-08-27 05:35:16 +08:00
|
|
|
elif o in ("-a", "--aslr_mode"):
|
|
|
|
if a in ("on", "On", "true", "True", "yes", "Yes"):
|
|
|
|
OPTIONS.aslr_mode = True
|
|
|
|
else:
|
|
|
|
OPTIONS.aslr_mode = False
|
2009-09-26 01:45:39 +08:00
|
|
|
elif o in ("--worker_threads"):
|
|
|
|
OPTIONS.worker_threads = int(a)
|
2009-04-03 03:14:19 +08:00
|
|
|
else:
|
|
|
|
return False
|
2009-04-22 08:12:54 +08:00
|
|
|
return True
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
args = common.ParseOptions(argv, __doc__,
|
2010-08-27 05:35:16 +08:00
|
|
|
extra_opts="b:k:i:d:wne:a:",
|
2009-04-03 03:14:19 +08:00
|
|
|
extra_long_opts=["board_config=",
|
|
|
|
"package_key=",
|
2009-04-22 08:12:54 +08:00
|
|
|
"incremental_from=",
|
2009-04-24 02:41:58 +08:00
|
|
|
"wipe_user_data",
|
2009-05-15 10:06:36 +08:00
|
|
|
"no_prereq",
|
2009-06-18 23:43:44 +08:00
|
|
|
"extra_script=",
|
2010-08-03 01:26:17 +08:00
|
|
|
"worker_threads=",
|
2010-09-04 04:22:38 +08:00
|
|
|
"aslr_mode=",
|
|
|
|
],
|
2009-04-03 03:14:19 +08:00
|
|
|
extra_option_handler=option_handler)
|
|
|
|
|
|
|
|
if len(args) != 2:
|
|
|
|
common.Usage(__doc__)
|
|
|
|
sys.exit(1)
|
|
|
|
|
2009-05-15 10:06:36 +08:00
|
|
|
if OPTIONS.extra_script is not None:
|
|
|
|
OPTIONS.extra_script = open(OPTIONS.extra_script).read()
|
|
|
|
|
2009-04-03 03:14:19 +08:00
|
|
|
print "unzipping target target-files..."
|
2011-01-26 09:03:34 +08:00
|
|
|
OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0])
|
2009-08-04 08:27:48 +08:00
|
|
|
|
2009-04-03 03:14:19 +08:00
|
|
|
OPTIONS.target_tmp = OPTIONS.input_tmp
|
2010-09-17 08:44:38 +08:00
|
|
|
OPTIONS.info_dict = common.LoadInfoDict(input_zip)
|
|
|
|
if OPTIONS.verbose:
|
|
|
|
print "--- target info ---"
|
|
|
|
common.DumpInfoDict(OPTIONS.info_dict)
|
|
|
|
|
|
|
|
if OPTIONS.device_specific is None:
|
|
|
|
OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None)
|
|
|
|
if OPTIONS.device_specific is not None:
|
|
|
|
OPTIONS.device_specific = os.path.normpath(OPTIONS.device_specific)
|
|
|
|
print "using device-specific extensions in", OPTIONS.device_specific
|
|
|
|
|
2011-09-23 01:28:04 +08:00
|
|
|
temp_zip_file = tempfile.NamedTemporaryFile()
|
|
|
|
output_zip = zipfile.ZipFile(temp_zip_file, "w",
|
|
|
|
compression=zipfile.ZIP_DEFLATED)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
if OPTIONS.incremental_source is None:
|
2010-09-17 02:28:43 +08:00
|
|
|
WriteFullOTAPackage(input_zip, output_zip)
|
2011-09-23 01:28:04 +08:00
|
|
|
if OPTIONS.package_key is None:
|
|
|
|
OPTIONS.package_key = OPTIONS.info_dict.get(
|
|
|
|
"default_system_dev_certificate",
|
|
|
|
"build/target/product/security/testkey")
|
2009-04-03 03:14:19 +08:00
|
|
|
else:
|
|
|
|
print "unzipping source target-files..."
|
2011-01-26 09:03:34 +08:00
|
|
|
OPTIONS.source_tmp, source_zip = common.UnzipTemp(OPTIONS.incremental_source)
|
2010-09-17 08:44:38 +08:00
|
|
|
OPTIONS.target_info_dict = OPTIONS.info_dict
|
|
|
|
OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
|
2011-09-23 01:28:04 +08:00
|
|
|
if OPTIONS.package_key is None:
|
2011-09-24 03:48:33 +08:00
|
|
|
OPTIONS.package_key = OPTIONS.source_info_dict.get(
|
2011-09-23 01:28:04 +08:00
|
|
|
"default_system_dev_certificate",
|
|
|
|
"build/target/product/security/testkey")
|
2010-09-17 08:44:38 +08:00
|
|
|
if OPTIONS.verbose:
|
|
|
|
print "--- source info ---"
|
|
|
|
common.DumpInfoDict(OPTIONS.source_info_dict)
|
2010-09-17 02:28:43 +08:00
|
|
|
WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
output_zip.close()
|
2011-09-23 01:28:04 +08:00
|
|
|
|
|
|
|
SignOutput(temp_zip_file.name, args[1])
|
|
|
|
temp_zip_file.close()
|
2009-04-03 03:14:19 +08:00
|
|
|
|
|
|
|
common.Cleanup()
|
|
|
|
|
|
|
|
print "done."
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
try:
|
2010-12-14 08:25:36 +08:00
|
|
|
common.CloseInheritedPipes()
|
2009-04-03 03:14:19 +08:00
|
|
|
main(sys.argv[1:])
|
|
|
|
except common.ExternalError, e:
|
|
|
|
print
|
|
|
|
print " ERROR: %s" % (e,)
|
|
|
|
print
|
|
|
|
sys.exit(1)
|