forked from openkylin/platform_build
Merge "Improve efficiency of using the DataSource abstraction."
This commit is contained in:
commit
7353eabd14
|
@ -16,7 +16,6 @@
|
|||
|
||||
package com.android.apksigner.core.internal.apk.v2;
|
||||
|
||||
import com.android.apksigner.core.internal.util.ByteBufferSink;
|
||||
import com.android.apksigner.core.internal.util.Pair;
|
||||
import com.android.apksigner.core.internal.zip.ZipUtils;
|
||||
import com.android.apksigner.core.util.DataSource;
|
||||
|
@ -191,8 +190,10 @@ public abstract class V2SchemeSigner {
|
|||
// offset field is treated as pointing to the offset at which the APK Signing Block will
|
||||
// start.
|
||||
long centralDirOffsetForDigesting = beforeCentralDir.size();
|
||||
ByteBuffer eocdBuf = copyToByteBuffer(eocd);
|
||||
ByteBuffer eocdBuf = ByteBuffer.allocate((int) eocd.size());
|
||||
eocdBuf.order(ByteOrder.LITTLE_ENDIAN);
|
||||
eocd.copyTo(0, (int) eocd.size(), eocdBuf);
|
||||
eocdBuf.flip();
|
||||
ZipUtils.setZipEocdCentralDirectoryOffset(eocdBuf, centralDirOffsetForDigesting);
|
||||
|
||||
// Compute digests of APK contents.
|
||||
|
@ -600,15 +601,4 @@ public abstract class V2SchemeSigner {
|
|||
}
|
||||
return result.array();
|
||||
}
|
||||
|
||||
private static ByteBuffer copyToByteBuffer(DataSource dataSource) throws IOException {
|
||||
long dataSourceSize = dataSource.size();
|
||||
if (dataSourceSize > Integer.MAX_VALUE) {
|
||||
throw new IllegalArgumentException("Data source too large: " + dataSourceSize);
|
||||
}
|
||||
ByteBuffer result = ByteBuffer.allocate((int) dataSourceSize);
|
||||
dataSource.feed(0, result.remaining(), new ByteBufferSink(result));
|
||||
result.position(0);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,15 @@ public class ByteBufferDataSource implements DataSource {
|
|||
* buffer between the buffer's position and limit.
|
||||
*/
|
||||
public ByteBufferDataSource(ByteBuffer buffer) {
|
||||
mBuffer = buffer.slice();
|
||||
this(buffer, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new {@code ByteBufferDigestSource} based on the data contained in the provided
|
||||
* buffer between the buffer's position and limit.
|
||||
*/
|
||||
private ByteBufferDataSource(ByteBuffer buffer, boolean sliceRequired) {
|
||||
mBuffer = (sliceRequired) ? buffer.slice() : buffer;
|
||||
mSize = buffer.remaining();
|
||||
}
|
||||
|
||||
|
@ -45,15 +53,12 @@ public class ByteBufferDataSource implements DataSource {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ByteBufferDataSource slice(long offset, long size) {
|
||||
if ((offset == 0) && (size == mSize)) {
|
||||
return this;
|
||||
}
|
||||
public ByteBuffer getByteBuffer(long offset, int size) {
|
||||
checkChunkValid(offset, size);
|
||||
|
||||
// checkChunkValid ensures that it's OK to cast offset and size to int.
|
||||
// checkChunkValid ensures that it's OK to cast offset to int.
|
||||
int chunkPosition = (int) offset;
|
||||
int chunkLimit = (int) (chunkPosition + size);
|
||||
int chunkLimit = chunkPosition + size;
|
||||
// Creating a slice of ByteBuffer modifies the state of the source ByteBuffer (position
|
||||
// and limit fields, to be more specific). We thus use synchronization around these
|
||||
// state-changing operations to make instances of this class thread-safe.
|
||||
|
@ -66,35 +71,35 @@ public class ByteBufferDataSource implements DataSource {
|
|||
|
||||
mBuffer.limit(chunkLimit);
|
||||
mBuffer.position(chunkPosition);
|
||||
// Create a ByteBufferDataSource for the slice of the buffer between limit and position.
|
||||
return new ByteBufferDataSource(mBuffer);
|
||||
return mBuffer.slice();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void feed(long offset, long size, DataSink sink) throws IOException {
|
||||
checkChunkValid(offset, size);
|
||||
|
||||
// checkChunkValid ensures that it's OK to cast offset and size to int.
|
||||
int chunkPosition = (int) offset;
|
||||
int chunkLimit = (int) (chunkPosition + size);
|
||||
ByteBuffer chunk;
|
||||
// Creating a slice of ByteBuffer modifies the state of the source ByteBuffer (position
|
||||
// and limit fields, to be more specific). We thus use synchronization around these
|
||||
// state-changing operations to make instances of this class thread-safe.
|
||||
synchronized (mBuffer) {
|
||||
// ByteBuffer.limit(int) and .position(int) check that that the position >= limit
|
||||
// invariant is not broken. Thus, the only way to safely change position and limit
|
||||
// without caring about their current values is to first set position to 0 or set the
|
||||
// limit to capacity.
|
||||
mBuffer.position(0);
|
||||
|
||||
mBuffer.limit(chunkLimit);
|
||||
mBuffer.position(chunkPosition);
|
||||
chunk = mBuffer.slice();
|
||||
public void copyTo(long offset, int size, ByteBuffer dest) {
|
||||
dest.put(getByteBuffer(offset, size));
|
||||
}
|
||||
|
||||
sink.consume(chunk);
|
||||
@Override
|
||||
public void feed(long offset, long size, DataSink sink) throws IOException {
|
||||
if ((size < 0) || (size > mSize)) {
|
||||
throw new IllegalArgumentException("size: " + size + ", source size: " + mSize);
|
||||
}
|
||||
sink.consume(getByteBuffer(offset, (int) size));
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteBufferDataSource slice(long offset, long size) {
|
||||
if ((offset == 0) && (size == mSize)) {
|
||||
return this;
|
||||
}
|
||||
if ((size < 0) || (size > mSize)) {
|
||||
throw new IllegalArgumentException("size: " + size + ", source size: " + mSize);
|
||||
}
|
||||
return new ByteBufferDataSource(
|
||||
getByteBuffer(offset, (int) size),
|
||||
false // no need to slice -- it's already a slice
|
||||
);
|
||||
}
|
||||
|
||||
private void checkChunkValid(long offset, long size) {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package com.android.apksigner.core.util;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
/**
|
||||
* Abstract representation of a source of data.
|
||||
|
@ -29,6 +30,25 @@ import java.io.IOException;
|
|||
* may have worked as the unifying abstraction.</li>
|
||||
* <li>Support sources which do not fit into logical memory as a contiguous region.</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>There are following ways to obtain a chunk of data from the data source:
|
||||
* <ul>
|
||||
* <li>Stream the chunk's data into a {@link DataSink} using
|
||||
* {@link #feed(long, long, DataSink) feed}. This is best suited for scenarios where there is no
|
||||
* need to have the chunk's data accessible at the same time, for example, when computing the
|
||||
* digest of the chunk. If you need to keep the chunk's data around after {@code feed}
|
||||
* completes, you must create a copy during {@code feed}. However, in that case the following
|
||||
* methods of obtaining the chunk's data may be more appropriate.</li>
|
||||
* <li>Obtain a {@link ByteBuffer} containing the chunk's data using
|
||||
* {@link #getByteBuffer(long, int) getByteBuffer}. Depending on the data source, the chunk's
|
||||
* data may or may not be copied by this operation. This is best suited for scenarios where
|
||||
* you need to access the chunk's data in arbitrary order, but don't need to modify the data and
|
||||
* thus don't require a copy of the data.</li>
|
||||
* <li>Copy the chunk's data to a {@link ByteBuffer} using
|
||||
* {@link #copyTo(long, int, ByteBuffer) copyTo}. This is best suited for scenarios where
|
||||
* you require a copy of the chunk's data, such as to when you need to modify the data.
|
||||
* </li>
|
||||
* </ul>
|
||||
*/
|
||||
public interface DataSource {
|
||||
|
||||
|
@ -45,9 +65,34 @@ public interface DataSource {
|
|||
*/
|
||||
void feed(long offset, long size, DataSink sink) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a buffer holding the contents of the specified chunk of data from this data source.
|
||||
* Changes to the data source are not guaranteed to be reflected in the returned buffer.
|
||||
* Similarly, changes in the buffer are not guaranteed to be reflected in the data source.
|
||||
*
|
||||
* <p>The returned buffer's position is {@code 0}, and the buffer's limit and capacity is
|
||||
* {@code size}.
|
||||
*
|
||||
* @param offset index (in bytes) at which the chunk starts inside data source
|
||||
* @param size size (in bytes) of the chunk
|
||||
*/
|
||||
ByteBuffer getByteBuffer(long offset, int size) throws IOException;
|
||||
|
||||
/**
|
||||
* Copies the specified chunk from this data source into the provided destination buffer,
|
||||
* advancing the destination buffer's position by {@code size}.
|
||||
*
|
||||
* @param offset index (in bytes) at which the chunk starts inside data source
|
||||
* @param size size (in bytes) of the chunk
|
||||
*/
|
||||
void copyTo(long offset, int size, ByteBuffer dest) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a data source representing the specified region of data of this data source. Changes
|
||||
* to data represented by this data source will also be visible in the returned data source.
|
||||
*
|
||||
* @param offset index (in bytes) at which the region starts inside data source
|
||||
* @param size size (in bytes) of the region
|
||||
*/
|
||||
DataSource slice(long offset, long size);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue