Skip to content

Commit

Permalink
Fix texture corruption due to buffer mis-align
Browse files Browse the repository at this point in the history
This patch ensures that even when the dynamic allocator
fulfills an allocation request by returning multiple small
blocks, the memory is correctly aligned.

This wasn't the case previously, as alignment was only
guaranteed to match the block size, so when fulfilling
a large allocation with multiple smaller blocks, the memory
would be aligned to the smaller block size.

The validation triggered by the Vulkan layer before this fix
would look like this:

    VALIDATION [VUID-vkBindImageMemory-memoryOffset-01048 (0)]:
    vkBindImageMemory(): memoryOffset is 0x43300 but must be an integer
                         multiple of the VkMemoryRequirements::alignment
                         value 0x1000, returned from a call to
                         vkGetImageMemoryRequirements with image. The
                         Vulkan spec states: memoryOffset must be an
                         integer multiple of the alignment member of the
                         VkMemoryRequirements structure returned from a
                         call to vkGetImageMemoryRequirements [...]
  • Loading branch information
cloudhead authored and Zakarum committed Sep 16, 2019
1 parent e8ffcab commit 778b47f
Showing 1 changed file with 44 additions and 19 deletions.
63 changes: 44 additions & 19 deletions memory/src/allocator/dynamic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -308,14 +308,14 @@ where
.next_back()
{
// Allocate block for the chunk.
let (block, allocated) = self.alloc_from_entry(device, chunk_size, 1)?;
let (block, allocated) = self.alloc_from_entry(device, chunk_size, 1, block_size)?;
Ok((Chunk::from_block(block_size, block), allocated))
} else {
let total_blocks = self.sizes[&block_size].total_blocks;
let chunk_size =
(max_chunk_size.min(min_chunk_size.max(total_blocks * block_size)) / 2 + 1)
.next_power_of_two();
let (block, allocated) = self.alloc_block(device, chunk_size)?;
let (block, allocated) = self.alloc_block(device, chunk_size, block_size)?;
Ok((Chunk::from_block(block_size, block), allocated))
}
}
Expand All @@ -326,6 +326,7 @@ where
chunk_index: u32,
block_size: u64,
count: u32,
align: u64,
) -> Option<DynamicBlock<B>> {
log::trace!(
"Allocate {} consecutive blocks of size {} from chunk {}",
Expand All @@ -335,8 +336,9 @@ where
);

let ref mut chunk = chunks[chunk_index as usize];
let block_index = chunk.acquire_blocks(count)?;
let block_index = chunk.acquire_blocks(count, block_size, align)?;
let block_range = chunk.blocks_range(block_size, block_index, count);

debug_assert_eq!((block_range.end - block_range.start) % count as u64, 0);

Some(DynamicBlock {
Expand All @@ -359,6 +361,7 @@ where
device: &B::Device,
block_size: u64,
count: u32,
align: u64,
) -> Result<(DynamicBlock<B>, u64), gfx_hal::device::AllocationError> {
log::trace!(
"Allocate {} consecutive blocks for size {} from the entry",
Expand All @@ -370,9 +373,13 @@ where
let size_entry = self.sizes.entry(block_size).or_default();

for chunk_index in (&size_entry.ready_chunks).iter() {
if let Some(block) =
Self::alloc_from_chunk(&mut size_entry.chunks, chunk_index, block_size, count)
{
if let Some(block) = Self::alloc_from_chunk(
&mut size_entry.chunks,
chunk_index,
block_size,
count,
align,
) {
return Ok((block, 0));
}
}
Expand All @@ -386,8 +393,14 @@ where
let size_entry = self.sizes.entry(block_size).or_default();
let chunk_index = size_entry.chunks.insert(chunk) as u32;

let block = Self::alloc_from_chunk(&mut size_entry.chunks, chunk_index, block_size, count)
.expect("New chunk should yield blocks");
let block = Self::alloc_from_chunk(
&mut size_entry.chunks,
chunk_index,
block_size,
count,
align,
)
.expect("New chunk should yield blocks");

if !size_entry.chunks[chunk_index as usize].is_exhausted() {
size_entry.ready_chunks.add(chunk_index);
Expand All @@ -401,6 +414,7 @@ where
&mut self,
device: &B::Device,
block_size: u64,
align: u64,
) -> Result<(DynamicBlock<B>, u64), gfx_hal::device::AllocationError> {
log::trace!("Allocate block of size {}", block_size);

Expand All @@ -416,15 +430,20 @@ where
.range(block_size / 4..block_size * overhead)
.next()
{
return self.alloc_from_entry(device, size, ((block_size - 1) / size + 1) as u32);
return self.alloc_from_entry(
device,
size,
((block_size - 1) / size + 1) as u32,
align,
);
}
}

if size_entry.total_blocks == MIN_BLOCKS_PER_CHUNK as u64 {
self.chunks.insert(block_size);
}

self.alloc_from_entry(device, block_size, 1)
self.alloc_from_entry(device, block_size, 1, align)
}

fn free_chunk(&mut self, device: &B::Device, chunk: Chunk<B>, block_size: u64) -> u64 {
Expand Down Expand Up @@ -517,7 +536,7 @@ where
self.memory_type.0
);

self.alloc_block(device, aligned_size)
self.alloc_block(device, aligned_size, align)
}

fn free(&mut self, device: &B::Device, block: DynamicBlock<B>) -> u64 {
Expand Down Expand Up @@ -612,20 +631,26 @@ where
self.blocks == 0
}

fn acquire_blocks(&mut self, count: u32) -> Option<u32> {
fn acquire_blocks(&mut self, count: u32, block_size: u64, align: u64) -> Option<u32> {
debug_assert!(count > 0 && count <= MAX_BLOCKS_PER_CHUNK);

// Holds a bit-array of all positions with `count` free blocks.
let mut blocks = !0;
for i in 0..count {
blocks &= self.blocks >> i;
}
let index = blocks.trailing_zeros();
if index == MAX_BLOCKS_PER_CHUNK {
None
} else {
let mask = ((1 << count) - 1) << index;
self.blocks &= !mask;
Some(index)
// Find a position in `blocks` that is aligned.
while blocks != 0 {
let index = blocks.trailing_zeros();
blocks &= !(1 << index);

if (index as u64 * block_size) & (align - 1) == 0 {
let mask = ((1 << count) - 1) << index;
self.blocks &= !mask;
return Some(index);
}
}
None
}

fn release_blocks(&mut self, index: u32, count: u32) {
Expand Down

0 comments on commit 778b47f

Please sign in to comment.