forked from openkylin/platform_build
Protect SparseImage._GetRangeData() with lock
The generator function is not thread safe and is prone to race conditions. This CL uses a lock to protect this generator and loose the locks elsewhere, e.g. 'WriteRangeDataToFd()'. Bug: 71908713 Test: Generate an incremental package several times for angler 4208095 to 4442250. Change-Id: I9e6f0a182a1ba7904a597f403f2b12fe05016513
This commit is contained in:
parent
92d73d3ab9
commit
df1166e92f
|
@ -776,15 +776,13 @@ class BlockImageDiff(object):
|
|||
src_ranges = xf.src_ranges
|
||||
tgt_ranges = xf.tgt_ranges
|
||||
|
||||
# Needs lock since WriteRangeDataToFd() is stateful (calling seek).
|
||||
with lock:
|
||||
src_file = common.MakeTempFile(prefix="src-")
|
||||
with open(src_file, "wb") as fd:
|
||||
self.src.WriteRangeDataToFd(src_ranges, fd)
|
||||
src_file = common.MakeTempFile(prefix="src-")
|
||||
with open(src_file, "wb") as fd:
|
||||
self.src.WriteRangeDataToFd(src_ranges, fd)
|
||||
|
||||
tgt_file = common.MakeTempFile(prefix="tgt-")
|
||||
with open(tgt_file, "wb") as fd:
|
||||
self.tgt.WriteRangeDataToFd(tgt_ranges, fd)
|
||||
tgt_file = common.MakeTempFile(prefix="tgt-")
|
||||
with open(tgt_file, "wb") as fd:
|
||||
self.tgt.WriteRangeDataToFd(tgt_ranges, fd)
|
||||
|
||||
message = []
|
||||
try:
|
||||
|
@ -1430,11 +1428,10 @@ class BlockImageDiff(object):
|
|||
|
||||
src_file = common.MakeTempFile(prefix="src-")
|
||||
tgt_file = common.MakeTempFile(prefix="tgt-")
|
||||
with transfer_lock:
|
||||
with open(src_file, "wb") as src_fd:
|
||||
self.src.WriteRangeDataToFd(src_ranges, src_fd)
|
||||
with open(tgt_file, "wb") as tgt_fd:
|
||||
self.tgt.WriteRangeDataToFd(tgt_ranges, tgt_fd)
|
||||
with open(src_file, "wb") as src_fd:
|
||||
self.src.WriteRangeDataToFd(src_ranges, src_fd)
|
||||
with open(tgt_file, "wb") as tgt_fd:
|
||||
self.tgt.WriteRangeDataToFd(tgt_ranges, tgt_fd)
|
||||
|
||||
patch_file = common.MakeTempFile(prefix="patch-")
|
||||
patch_info_file = common.MakeTempFile(prefix="split_info-")
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
import bisect
|
||||
import os
|
||||
import struct
|
||||
import threading
|
||||
from hashlib import sha1
|
||||
|
||||
import rangelib
|
||||
|
@ -111,6 +112,8 @@ class SparseImage(object):
|
|||
raise ValueError("Unknown chunk type 0x%04X not supported" %
|
||||
(chunk_type,))
|
||||
|
||||
self.generator_lock = threading.Lock()
|
||||
|
||||
self.care_map = rangelib.RangeSet(care_data)
|
||||
self.offset_index = [i[0] for i in offset_map]
|
||||
|
||||
|
@ -173,39 +176,39 @@ class SparseImage(object):
|
|||
particular is not necessarily equal to the number of ranges in
|
||||
'ranges'.
|
||||
|
||||
This generator is stateful -- it depends on the open file object
|
||||
contained in this SparseImage, so you should not try to run two
|
||||
Use a lock to protect the generator so that we will not run two
|
||||
instances of this generator on the same object simultaneously."""
|
||||
|
||||
f = self.simg_f
|
||||
for s, e in ranges:
|
||||
to_read = e-s
|
||||
idx = bisect.bisect_right(self.offset_index, s) - 1
|
||||
chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
|
||||
|
||||
# for the first chunk we may be starting partway through it.
|
||||
remain = chunk_len - (s - chunk_start)
|
||||
this_read = min(remain, to_read)
|
||||
if filepos is not None:
|
||||
p = filepos + ((s - chunk_start) * self.blocksize)
|
||||
f.seek(p, os.SEEK_SET)
|
||||
yield f.read(this_read * self.blocksize)
|
||||
else:
|
||||
yield fill_data * (this_read * (self.blocksize >> 2))
|
||||
to_read -= this_read
|
||||
|
||||
while to_read > 0:
|
||||
# continue with following chunks if this range spans multiple chunks.
|
||||
idx += 1
|
||||
with self.generator_lock:
|
||||
for s, e in ranges:
|
||||
to_read = e-s
|
||||
idx = bisect.bisect_right(self.offset_index, s) - 1
|
||||
chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
|
||||
this_read = min(chunk_len, to_read)
|
||||
|
||||
# for the first chunk we may be starting partway through it.
|
||||
remain = chunk_len - (s - chunk_start)
|
||||
this_read = min(remain, to_read)
|
||||
if filepos is not None:
|
||||
f.seek(filepos, os.SEEK_SET)
|
||||
p = filepos + ((s - chunk_start) * self.blocksize)
|
||||
f.seek(p, os.SEEK_SET)
|
||||
yield f.read(this_read * self.blocksize)
|
||||
else:
|
||||
yield fill_data * (this_read * (self.blocksize >> 2))
|
||||
to_read -= this_read
|
||||
|
||||
while to_read > 0:
|
||||
# continue with following chunks if this range spans multiple chunks.
|
||||
idx += 1
|
||||
chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
|
||||
this_read = min(chunk_len, to_read)
|
||||
if filepos is not None:
|
||||
f.seek(filepos, os.SEEK_SET)
|
||||
yield f.read(this_read * self.blocksize)
|
||||
else:
|
||||
yield fill_data * (this_read * (self.blocksize >> 2))
|
||||
to_read -= this_read
|
||||
|
||||
def LoadFileBlockMap(self, fn, clobbered_blocks):
|
||||
remaining = self.care_map
|
||||
self.file_map = out = {}
|
||||
|
|
Loading…
Reference in New Issue