Don't add data_descripters when merging uncompress zip entries for merge_zips.
Also filter out META-INF/TRANSITIVE dir, and report warnings when merge_zips see duplicates entries with different CRC hash. Bug: b/65455145 Test: m clean && m -j java (locally) Change-Id: I47172ffa27df71f3280f35f6b540a7b5a0c14550
This commit is contained in:
parent
58aebd40d4
commit
d5998cce7d
|
@ -26,11 +26,28 @@ import (
|
|||
"android/soong/third_party/zip"
|
||||
)
|
||||
|
||||
type strip struct{}
|
||||
|
||||
func (s *strip) String() string {
|
||||
return `""`
|
||||
}
|
||||
|
||||
func (s *strip) Set(path_prefix string) error {
|
||||
strippings = append(strippings, path_prefix)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
sortEntries = flag.Bool("s", false, "sort entries (defaults to the order from the input zip files)")
|
||||
emulateJar = flag.Bool("j", false, "sort zip entries using jar ordering (META-INF first)")
|
||||
strippings []string
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.Var(&strip{}, "strip", "the prefix of file path to be excluded from the output zip")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Fprintln(os.Stderr, "usage: merge_zips [-j] output [inputs...]")
|
||||
|
@ -115,7 +132,13 @@ func mergeZips(readers []namedZipReader, writer *zip.Writer, sortEntries bool, e
|
|||
orderedMappings := []fileMapping{}
|
||||
|
||||
for _, namedReader := range readers {
|
||||
FileLoop:
|
||||
for _, file := range namedReader.reader.File {
|
||||
for _, path_prefix := range strippings {
|
||||
if strings.HasPrefix(file.Name, path_prefix) {
|
||||
continue FileLoop
|
||||
}
|
||||
}
|
||||
// check for other files or directories destined for the same path
|
||||
dest := file.Name
|
||||
mapKey := dest
|
||||
|
@ -142,16 +165,22 @@ func mergeZips(readers []namedZipReader, writer *zip.Writer, sortEntries bool, e
|
|||
continue
|
||||
}
|
||||
if !isDir {
|
||||
if emulateJar {
|
||||
if existingMapping.source.content.CRC32 != newMapping.source.content.CRC32 {
|
||||
fmt.Fprintf(os.Stdout, "WARNING: Duplicate path %v found in %v and %v\n",
|
||||
dest, existingMapping.source.path, newMapping.source.path)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Duplicate path %v found in %v and %v\n",
|
||||
dest, existingMapping.source.path, newMapping.source.path)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// save entry
|
||||
mappingsByDest[mapKey] = newMapping
|
||||
orderedMappings = append(orderedMappings, newMapping)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if emulateJar {
|
||||
|
|
|
@ -32,7 +32,6 @@ func (w *Writer) CopyFrom(orig *File, newName string) error {
|
|||
fileHeader := orig.FileHeader
|
||||
fileHeader.Name = newName
|
||||
fh := &fileHeader
|
||||
fh.Flags |= DataDescriptorFlag
|
||||
|
||||
// The zip64 extras change between the Central Directory and Local File Header, while we use
|
||||
// the same structure for both. The Local File Haeder is taken care of by us writing a data
|
||||
|
@ -57,6 +56,7 @@ func (w *Writer) CopyFrom(orig *File, newName string) error {
|
|||
}
|
||||
io.Copy(w.cw, io.NewSectionReader(orig.zipr, dataOffset, int64(orig.CompressedSize64)))
|
||||
|
||||
if orig.hasDataDescriptor() {
|
||||
// Write data descriptor.
|
||||
var buf []byte
|
||||
if fh.isZip64() {
|
||||
|
@ -75,6 +75,7 @@ func (w *Writer) CopyFrom(orig *File, newName string) error {
|
|||
b.uint32(fh.UncompressedSize)
|
||||
}
|
||||
_, err = w.cw.Write(buf)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue