mirror of https://gitee.com/openkylin/qemu.git
image-fuzzer: Explicitly use integer division operator
Most of the division expressions in image-fuzzer assume integer division. Use the // operator to keep the same behavior when we move to Python 3. Signed-off-by: Eduardo Habkost <ehabkost@redhat.com> Reviewed-by: John Snow <jsnow@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-id: 20191016192430.25098-4-ehabkost@redhat.com Message-Id: <20191016192430.25098-4-ehabkost@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
c314e50b8a
commit
d974451c5b
|
@ -27,14 +27,14 @@
|
|||
UINT32_M = 31
|
||||
UINT64_M = 63
|
||||
# Fuzz vectors
|
||||
UINT8_V = [0, 0x10, UINT8/4, UINT8/2 - 1, UINT8/2, UINT8/2 + 1, UINT8 - 1,
|
||||
UINT8_V = [0, 0x10, UINT8//4, UINT8//2 - 1, UINT8//2, UINT8//2 + 1, UINT8 - 1,
|
||||
UINT8]
|
||||
UINT16_V = [0, 0x100, 0x1000, UINT16/4, UINT16/2 - 1, UINT16/2, UINT16/2 + 1,
|
||||
UINT16_V = [0, 0x100, 0x1000, UINT16//4, UINT16//2 - 1, UINT16//2, UINT16//2 + 1,
|
||||
UINT16 - 1, UINT16]
|
||||
UINT32_V = [0, 0x100, 0x1000, 0x10000, 0x100000, UINT32/4, UINT32/2 - 1,
|
||||
UINT32/2, UINT32/2 + 1, UINT32 - 1, UINT32]
|
||||
UINT64_V = UINT32_V + [0x1000000, 0x10000000, 0x100000000, UINT64/4,
|
||||
UINT64/2 - 1, UINT64/2, UINT64/2 + 1, UINT64 - 1,
|
||||
UINT32_V = [0, 0x100, 0x1000, 0x10000, 0x100000, UINT32//4, UINT32//2 - 1,
|
||||
UINT32//2, UINT32//2 + 1, UINT32 - 1, UINT32]
|
||||
UINT64_V = UINT32_V + [0x1000000, 0x10000000, 0x100000000, UINT64//4,
|
||||
UINT64//2 - 1, UINT64//2, UINT64//2 + 1, UINT64 - 1,
|
||||
UINT64]
|
||||
STRING_V = ['%s%p%x%d', '.1024d', '%.2049d', '%p%p%p%p', '%x%x%x%x',
|
||||
'%d%d%d%d', '%s%s%s%s', '%99999999999s', '%08x', '%%20d', '%%20n',
|
||||
|
|
|
@ -253,7 +253,7 @@ def gen_feat_ids():
|
|||
['>I', self.ext_offset, 0x6803f857, 'ext_magic'],
|
||||
# One feature table contains 3 fields and takes 48 bytes
|
||||
['>I', self.ext_offset + UINT32_S,
|
||||
len(feature_tables) / 3 * 48, 'ext_length']
|
||||
len(feature_tables) // 3 * 48, 'ext_length']
|
||||
] + feature_tables)
|
||||
self.ext_offset = inner_offset
|
||||
|
||||
|
@ -271,7 +271,7 @@ def create_l_structures(self):
|
|||
def create_l2_entry(host, guest, l2_cluster):
|
||||
"""Generate one L2 entry."""
|
||||
offset = l2_cluster * self.cluster_size
|
||||
l2_size = self.cluster_size / UINT64_S
|
||||
l2_size = self.cluster_size // UINT64_S
|
||||
entry_offset = offset + UINT64_S * (guest % l2_size)
|
||||
cluster_descriptor = host * self.cluster_size
|
||||
if not self.header['version'][0].value == 2:
|
||||
|
@ -283,8 +283,8 @@ def create_l2_entry(host, guest, l2_cluster):
|
|||
|
||||
def create_l1_entry(l2_cluster, l1_offset, guest):
|
||||
"""Generate one L1 entry."""
|
||||
l2_size = self.cluster_size / UINT64_S
|
||||
entry_offset = l1_offset + UINT64_S * (guest / l2_size)
|
||||
l2_size = self.cluster_size // UINT64_S
|
||||
entry_offset = l1_offset + UINT64_S * (guest // l2_size)
|
||||
# While snapshots are not supported bit #63 = 1
|
||||
entry_val = (1 << 63) + l2_cluster * self.cluster_size
|
||||
return ['>Q', entry_offset, entry_val, 'l1_entry']
|
||||
|
@ -298,11 +298,11 @@ def create_l1_entry(l2_cluster, l1_offset, guest):
|
|||
l2 = []
|
||||
else:
|
||||
meta_data = self._get_metadata()
|
||||
guest_clusters = random.sample(range(self.image_size /
|
||||
guest_clusters = random.sample(range(self.image_size //
|
||||
self.cluster_size),
|
||||
len(self.data_clusters))
|
||||
# Number of entries in a L1/L2 table
|
||||
l_size = self.cluster_size / UINT64_S
|
||||
l_size = self.cluster_size // UINT64_S
|
||||
# Number of clusters necessary for L1 table
|
||||
l1_size = int(ceil((max(guest_clusters) + 1) / float(l_size**2)))
|
||||
l1_start = self._get_adjacent_clusters(self.data_clusters |
|
||||
|
@ -318,7 +318,7 @@ def create_l1_entry(l2_cluster, l1_offset, guest):
|
|||
# L2 entries
|
||||
l2 = []
|
||||
for host, guest in zip(self.data_clusters, guest_clusters):
|
||||
l2_id = guest / l_size
|
||||
l2_id = guest // l_size
|
||||
if l2_id not in l2_ids:
|
||||
l2_ids.append(l2_id)
|
||||
l2_clusters.append(self._get_adjacent_clusters(
|
||||
|
@ -339,14 +339,14 @@ def create_refcount_structures(self):
|
|||
def allocate_rfc_blocks(data, size):
|
||||
"""Return indices of clusters allocated for refcount blocks."""
|
||||
cluster_ids = set()
|
||||
diff = block_ids = set([x / size for x in data])
|
||||
diff = block_ids = set([x // size for x in data])
|
||||
while len(diff) != 0:
|
||||
# Allocate all yet not allocated clusters
|
||||
new = self._get_available_clusters(data | cluster_ids,
|
||||
len(diff))
|
||||
# Indices of new refcount blocks necessary to cover clusters
|
||||
# in 'new'
|
||||
diff = set([x / size for x in new]) - block_ids
|
||||
diff = set([x // size for x in new]) - block_ids
|
||||
cluster_ids |= new
|
||||
block_ids |= diff
|
||||
return cluster_ids, block_ids
|
||||
|
@ -359,7 +359,7 @@ def allocate_rfc_table(data, init_blocks, block_size):
|
|||
blocks = set(init_blocks)
|
||||
clusters = set()
|
||||
# Number of entries in one cluster of the refcount table
|
||||
size = self.cluster_size / UINT64_S
|
||||
size = self.cluster_size // UINT64_S
|
||||
# Number of clusters necessary for the refcount table based on
|
||||
# the current number of refcount blocks
|
||||
table_size = int(ceil((max(blocks) + 1) / float(size)))
|
||||
|
@ -373,7 +373,7 @@ def allocate_rfc_table(data, init_blocks, block_size):
|
|||
table_size + 1))
|
||||
# New refcount blocks necessary for clusters occupied by the
|
||||
# refcount table
|
||||
diff = set([c / block_size for c in table_clusters]) - blocks
|
||||
diff = set([c // block_size for c in table_clusters]) - blocks
|
||||
blocks |= diff
|
||||
while len(diff) != 0:
|
||||
# Allocate clusters for new refcount blocks
|
||||
|
@ -382,12 +382,12 @@ def allocate_rfc_table(data, init_blocks, block_size):
|
|||
len(diff))
|
||||
# Indices of new refcount blocks necessary to cover
|
||||
# clusters in 'new'
|
||||
diff = set([x / block_size for x in new]) - blocks
|
||||
diff = set([x // block_size for x in new]) - blocks
|
||||
clusters |= new
|
||||
blocks |= diff
|
||||
# Check if the refcount table needs one more cluster
|
||||
if int(ceil((max(blocks) + 1) / float(size))) > table_size:
|
||||
new_block_id = (table_start + table_size) / block_size
|
||||
new_block_id = (table_start + table_size) // block_size
|
||||
# Check if the additional table cluster needs
|
||||
# one more refcount block
|
||||
if new_block_id not in blocks:
|
||||
|
@ -399,13 +399,13 @@ def allocate_rfc_table(data, init_blocks, block_size):
|
|||
def create_table_entry(table_offset, block_cluster, block_size,
|
||||
cluster):
|
||||
"""Generate a refcount table entry."""
|
||||
offset = table_offset + UINT64_S * (cluster / block_size)
|
||||
offset = table_offset + UINT64_S * (cluster // block_size)
|
||||
return ['>Q', offset, block_cluster * self.cluster_size,
|
||||
'refcount_table_entry']
|
||||
|
||||
def create_block_entry(block_cluster, block_size, cluster):
|
||||
"""Generate a list of entries for the current block."""
|
||||
entry_size = self.cluster_size / block_size
|
||||
entry_size = self.cluster_size // block_size
|
||||
offset = block_cluster * self.cluster_size
|
||||
entry_offset = offset + entry_size * (cluster % block_size)
|
||||
# While snapshots are not supported all refcounts are set to 1
|
||||
|
@ -415,7 +415,7 @@ def create_block_entry(block_cluster, block_size, cluster):
|
|||
# Number of refcount entries per refcount block
|
||||
# Convert self.cluster_size from bytes to bits to have the same
|
||||
# base for the numerator and denominator
|
||||
block_size = self.cluster_size * 8 / refcount_bits
|
||||
block_size = self.cluster_size * 8 // refcount_bits
|
||||
meta_data = self._get_metadata()
|
||||
if len(self.data_clusters) == 0:
|
||||
# All metadata for an empty guest image needs 4 clusters:
|
||||
|
@ -452,8 +452,8 @@ def create_block_entry(block_cluster, block_size, cluster):
|
|||
rfc_blocks = []
|
||||
|
||||
for cluster in sorted(self.data_clusters | meta_data):
|
||||
if cluster / block_size != block_id:
|
||||
block_id = cluster / block_size
|
||||
if cluster // block_size != block_id:
|
||||
block_id = cluster // block_size
|
||||
block_cluster = block_clusters[block_ids.index(block_id)]
|
||||
rfc_table.append(create_table_entry(table_offset,
|
||||
block_cluster,
|
||||
|
@ -587,7 +587,7 @@ def get_cluster_id(lst, length):
|
|||
def _alloc_data(img_size, cluster_size):
|
||||
"""Return a set of random indices of clusters allocated for guest data.
|
||||
"""
|
||||
num_of_cls = img_size/cluster_size
|
||||
num_of_cls = img_size // cluster_size
|
||||
return set(random.sample(range(1, num_of_cls + 1),
|
||||
random.randint(0, num_of_cls)))
|
||||
|
||||
|
@ -595,7 +595,7 @@ def _get_metadata(self):
|
|||
"""Return indices of clusters allocated for image metadata."""
|
||||
ids = set()
|
||||
for x in self:
|
||||
ids.add(x.offset/self.cluster_size)
|
||||
ids.add(x.offset // self.cluster_size)
|
||||
return ids
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue