Remove buffer resizing
This commit is contained in:
@@ -29,6 +29,23 @@ namespace fgl::engine::memory
|
||||
return seed;
|
||||
}
|
||||
|
||||
bool TransferData::convertRawToBuffer( Buffer& staging_buffer )
|
||||
{
|
||||
// Prepare the staging buffer first.
|
||||
assert( std::holds_alternative< RawData >( m_source ) );
|
||||
assert( !std::get< RawData >( m_source ).empty() );
|
||||
|
||||
// Check if we are capable of allocating into the staging buffer
|
||||
if ( !staging_buffer->canAllocate( std::get< RawData >( m_source ).size(), 1 ) ) return false;
|
||||
|
||||
HostVector< std::byte > vector { staging_buffer, std::get< RawData >( m_source ) };
|
||||
|
||||
m_source = vector.getHandle();
|
||||
std::get< TransferBufferHandle >( m_source )->setReady( true );
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TransferData::
|
||||
performImageStage( vk::raii::CommandBuffer& cmd_buffer, std::uint32_t transfer_idx, std::uint32_t graphics_idx )
|
||||
{
|
||||
@@ -142,23 +159,6 @@ namespace fgl::engine::memory
|
||||
return performBufferStage( copy_regions );
|
||||
}
|
||||
|
||||
bool TransferData::convertRawToBuffer( Buffer& staging_buffer )
|
||||
{
|
||||
// Prepare the staging buffer first.
|
||||
assert( std::holds_alternative< RawData >( m_source ) );
|
||||
assert( !std::get< RawData >( m_source ).empty() );
|
||||
|
||||
// Check if we are capable of allocating into the staging buffer
|
||||
if ( !staging_buffer->canAllocate( std::get< RawData >( m_source ).size(), 1 ) ) return false;
|
||||
|
||||
HostVector< std::byte > vector { staging_buffer, std::get< RawData >( m_source ) };
|
||||
|
||||
m_source = vector.getHandle();
|
||||
std::get< TransferBufferHandle >( m_source )->setReady( true );
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TransferData::stage(
|
||||
vk::raii::CommandBuffer& buffer,
|
||||
Buffer& staging_buffer,
|
||||
|
||||
@@ -96,7 +96,7 @@ namespace fgl::engine::memory
|
||||
|
||||
void TransferManager::resizeBuffer( const std::uint64_t size )
|
||||
{
|
||||
m_staging_buffer.resize( size );
|
||||
// m_staging_buffer.resize( size );
|
||||
}
|
||||
|
||||
void TransferManager::copySuballocationRegion(
|
||||
|
||||
@@ -11,15 +11,71 @@
|
||||
#include "align.hpp"
|
||||
#include "assets/transfer/TransferManager.hpp"
|
||||
#include "engine/debug/logging/logging.hpp"
|
||||
#include "engine/memory/buffers/BufferHandle.hpp"
|
||||
#include "engine/memory/buffers/exceptions.hpp"
|
||||
#include "engine/rendering/devices/Device.hpp"
|
||||
#include "math/literals/size.hpp"
|
||||
#include "memory/DefferedCleanup.hpp"
|
||||
|
||||
namespace fgl::engine::memory
|
||||
{
|
||||
|
||||
std::tuple< vk::Buffer, VmaAllocationInfo, VmaAllocation > BufferHandle::allocBuffer(
|
||||
const vk::DeviceSize memory_size, vk::BufferUsageFlags usage, const vk::MemoryPropertyFlags property_flags )
|
||||
{
|
||||
// Used for resizing.
|
||||
//TODO: Make this only available if resize is desired. Otherwise do not have it.
|
||||
usage |= vk::BufferUsageFlagBits::eTransferDst;
|
||||
usage |= vk::BufferUsageFlagBits::eTransferSrc;
|
||||
|
||||
assert( memory_size > 0 );
|
||||
vk::BufferCreateInfo buffer_info {};
|
||||
buffer_info.pNext = VK_NULL_HANDLE;
|
||||
buffer_info.flags = {};
|
||||
buffer_info.size = memory_size;
|
||||
buffer_info.usage = usage;
|
||||
buffer_info.sharingMode = vk::SharingMode::eExclusive;
|
||||
buffer_info.queueFamilyIndexCount = 0;
|
||||
buffer_info.pQueueFamilyIndices = VK_NULL_HANDLE;
|
||||
|
||||
VmaAllocationCreateInfo create_info {};
|
||||
|
||||
create_info.usage = VMA_MEMORY_USAGE_AUTO;
|
||||
|
||||
if ( property_flags & vk::MemoryPropertyFlagBits::eHostVisible )
|
||||
create_info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
|
||||
|
||||
if ( usage & vk::BufferUsageFlagBits::eTransferSrc )
|
||||
{
|
||||
//Remove VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM BIT if we are transfer src
|
||||
create_info.flags &= ~VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
|
||||
|
||||
create_info.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
|
||||
}
|
||||
|
||||
VmaAllocationInfo alloc_info {};
|
||||
VmaAllocation allocation {};
|
||||
|
||||
const VkBufferCreateInfo& vk_buffer_info = buffer_info;
|
||||
VkBuffer buffer { VK_NULL_HANDLE };
|
||||
if ( vmaCreateBuffer(
|
||||
Device::getInstance().allocator(), &vk_buffer_info, &create_info, &buffer, &allocation, nullptr )
|
||||
!= VK_SUCCESS )
|
||||
{
|
||||
throw BufferException( "Unable to allocate memory in VMA" );
|
||||
}
|
||||
|
||||
vmaGetAllocationInfo( Device::getInstance().allocator(), allocation, &alloc_info );
|
||||
|
||||
return std::make_tuple<
|
||||
vk::Buffer,
|
||||
VmaAllocationInfo,
|
||||
VmaAllocation >( std::move( buffer ), std::move( alloc_info ), std::move( allocation ) );
|
||||
}
|
||||
|
||||
void BufferHandle::deallocBuffer( const vk::Buffer& buffer, const VmaAllocation& allocation )
|
||||
{
|
||||
vmaDestroyBuffer( Device::getInstance().allocator(), buffer, allocation );
|
||||
}
|
||||
|
||||
void BufferHandle::swap( BufferHandle& other ) noexcept
|
||||
{
|
||||
std::swap( m_buffer, other.m_buffer );
|
||||
@@ -83,229 +139,43 @@ namespace fgl::engine::memory
|
||||
deallocBuffer( m_buffer, m_allocation );
|
||||
}
|
||||
|
||||
void* BufferHandle::map( const BufferSuballocationHandle& handle ) const
|
||||
vk::DeviceSize BufferHandle::largestBlock() const
|
||||
{
|
||||
if ( m_alloc_info.pMappedData == nullptr ) return nullptr;
|
||||
vk::DeviceSize largest { 0 };
|
||||
|
||||
return static_cast< std::byte* >( m_alloc_info.pMappedData ) + handle.offset();
|
||||
for ( const auto& size : m_free_blocks | std::views::values )
|
||||
{
|
||||
largest = std::max( largest, size );
|
||||
}
|
||||
|
||||
return largest;
|
||||
}
|
||||
|
||||
void BufferHandle::deallocBuffer( const vk::Buffer& buffer, const VmaAllocation& allocation )
|
||||
vk::DeviceSize BufferHandle::used() const
|
||||
{
|
||||
vmaDestroyBuffer( Device::getInstance().allocator(), buffer, allocation );
|
||||
vk::DeviceSize total_size { 0 };
|
||||
|
||||
for ( auto& suballocation : m_active_suballocations )
|
||||
{
|
||||
if ( suballocation.expired() ) continue;
|
||||
total_size += suballocation.lock()->size();
|
||||
}
|
||||
|
||||
return total_size;
|
||||
}
|
||||
|
||||
std::tuple< vk::Buffer, VmaAllocationInfo, VmaAllocation > BufferHandle::allocBuffer(
|
||||
const vk::DeviceSize memory_size, vk::BufferUsageFlags usage, const vk::MemoryPropertyFlags property_flags )
|
||||
vk::DeviceMemory BufferHandle::getMemory() const
|
||||
{
|
||||
// Used for resizing.
|
||||
//TODO: Make this only available if resize is desired. Otherwise do not have it.
|
||||
usage |= vk::BufferUsageFlagBits::eTransferDst;
|
||||
usage |= vk::BufferUsageFlagBits::eTransferSrc;
|
||||
assert( m_alloc_info.deviceMemory != VK_NULL_HANDLE );
|
||||
|
||||
assert( memory_size > 0 );
|
||||
vk::BufferCreateInfo buffer_info {};
|
||||
buffer_info.pNext = VK_NULL_HANDLE;
|
||||
buffer_info.flags = {};
|
||||
buffer_info.size = memory_size;
|
||||
buffer_info.usage = usage;
|
||||
buffer_info.sharingMode = vk::SharingMode::eExclusive;
|
||||
buffer_info.queueFamilyIndexCount = 0;
|
||||
buffer_info.pQueueFamilyIndices = VK_NULL_HANDLE;
|
||||
|
||||
VmaAllocationCreateInfo create_info {};
|
||||
|
||||
create_info.usage = VMA_MEMORY_USAGE_AUTO;
|
||||
|
||||
if ( property_flags & vk::MemoryPropertyFlagBits::eHostVisible )
|
||||
create_info.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
|
||||
|
||||
if ( usage & vk::BufferUsageFlagBits::eTransferSrc )
|
||||
{
|
||||
//Remove VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM BIT if we are transfer src
|
||||
create_info.flags &= ~VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
|
||||
|
||||
create_info.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
|
||||
}
|
||||
|
||||
VmaAllocationInfo alloc_info {};
|
||||
VmaAllocation allocation {};
|
||||
|
||||
const VkBufferCreateInfo& vk_buffer_info = buffer_info;
|
||||
VkBuffer buffer { VK_NULL_HANDLE };
|
||||
if ( vmaCreateBuffer(
|
||||
Device::getInstance().allocator(), &vk_buffer_info, &create_info, &buffer, &allocation, nullptr )
|
||||
!= VK_SUCCESS )
|
||||
{
|
||||
throw BufferException( "Unable to allocate memory in VMA" );
|
||||
}
|
||||
|
||||
vmaGetAllocationInfo( Device::getInstance().allocator(), allocation, &alloc_info );
|
||||
|
||||
return std::make_tuple<
|
||||
vk::Buffer,
|
||||
VmaAllocationInfo,
|
||||
VmaAllocation >( std::move( buffer ), std::move( alloc_info ), std::move( allocation ) );
|
||||
return m_alloc_info.deviceMemory;
|
||||
}
|
||||
|
||||
vk::DeviceSize BufferHandle::alignment() const
|
||||
std::string BufferHandle::sizeName() const
|
||||
{
|
||||
vk::DeviceSize size { 1 };
|
||||
|
||||
if ( m_usage & vk::BufferUsageFlagBits::eStorageBuffer )
|
||||
{
|
||||
size = std::max( size, Device::getInstance().m_properties.limits.minStorageBufferOffsetAlignment );
|
||||
}
|
||||
|
||||
if ( m_usage & vk::BufferUsageFlagBits::eUniformBuffer )
|
||||
{
|
||||
size = std::max( size, Device::getInstance().m_properties.limits.minUniformBufferOffsetAlignment );
|
||||
}
|
||||
|
||||
if ( m_memory_properties & vk::MemoryPropertyFlagBits::eHostVisible )
|
||||
{
|
||||
size = std::max( size, Device::getInstance().m_properties.limits.nonCoherentAtomSize );
|
||||
}
|
||||
|
||||
return size;
|
||||
return std::format( "{}: {}", m_debug_name, literals::size_literals::toString( size() ) );
|
||||
}
|
||||
|
||||
decltype( BufferHandle::m_free_blocks )::iterator BufferHandle::
|
||||
findAvailableBlock( vk::DeviceSize memory_size, std::uint32_t t_alignment )
|
||||
{
|
||||
//Find a free space.
|
||||
return std::ranges::find_if(
|
||||
m_free_blocks,
|
||||
[ this, memory_size, t_alignment ]( const std::pair< vk::DeviceSize, vk::DeviceSize >& pair )
|
||||
{
|
||||
const auto [ offset, size ] = pair;
|
||||
|
||||
const auto new_offset = align( offset, alignment(), t_alignment );
|
||||
const auto after_size { size - ( new_offset - offset ) };
|
||||
|
||||
// If the size of the block after alignment is greater than or equal to the size of the memory we want to allocate using it.
|
||||
return after_size >= memory_size;
|
||||
} );
|
||||
}
|
||||
|
||||
BufferSuballocation Buffer::allocate( const vk::DeviceSize desired_size, const std::uint32_t alignment )
|
||||
{
|
||||
auto allocation { operator->()->allocate( desired_size, alignment ) };
|
||||
|
||||
if ( !allocation )
|
||||
{
|
||||
// Resize to x1.5 the size, or the size plus the desired size x1.5, Whichever is bigger
|
||||
const auto optimal_size { std::max( this->size() * 2, this->size() + desired_size * 2 ) };
|
||||
this->resize( optimal_size );
|
||||
|
||||
FGL_ASSERT( optimal_size == this->size(), "Optimal size not met!" );
|
||||
|
||||
allocation = operator->()->allocate( desired_size, alignment );
|
||||
}
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
void Buffer::resize( const vk::DeviceSize size )
|
||||
{
|
||||
const std::shared_ptr< BufferHandle > old_handle { *this };
|
||||
const auto new_handle { old_handle->remake( size ) };
|
||||
new_handle->setDebugName( old_handle->m_debug_name );
|
||||
|
||||
*this = new_handle;
|
||||
}
|
||||
|
||||
std::shared_ptr< BufferHandle > BufferHandle::remake( vk::DeviceSize new_size )
|
||||
{
|
||||
ZoneScoped;
|
||||
auto new_handle { std::make_shared< BufferHandle >( new_size, m_usage, m_memory_properties ) };
|
||||
|
||||
const auto& old_allocations { m_active_suballocations };
|
||||
const auto& old_allocations_traces { m_allocation_traces };
|
||||
|
||||
// Remake all allocations into the new buffer
|
||||
std::vector<
|
||||
std::pair< std::shared_ptr< BufferSuballocationHandle >, std::shared_ptr< BufferSuballocationHandle > > >
|
||||
allocations {};
|
||||
allocations.reserve( old_allocations.size() );
|
||||
|
||||
for ( const auto& suballocation_weak : old_allocations )
|
||||
{
|
||||
if ( suballocation_weak.expired() ) continue;
|
||||
|
||||
auto suballocation { suballocation_weak.lock() };
|
||||
|
||||
auto [ old_suballocation, new_suballocation ] = suballocation->reallocate( new_handle );
|
||||
|
||||
allocations.emplace_back( old_suballocation, new_suballocation );
|
||||
|
||||
// Copy the data from the old allocation to the new allocation
|
||||
TransferManager::getInstance().copySuballocationRegion( old_suballocation, new_suballocation );
|
||||
|
||||
old_suballocation->flagReallocated( new_suballocation );
|
||||
}
|
||||
|
||||
return new_handle;
|
||||
}
|
||||
|
||||
/*
|
||||
void BufferHandle::resize( const vk::DeviceSize new_size )
|
||||
{
|
||||
log::warn( "Resizing buffer from {} to {}", size(), new_size );
|
||||
|
||||
std::shared_ptr< BufferHandle > new_handle { new BufferHandle( new_size, m_usage, m_memory_properties ) };
|
||||
|
||||
//Now we need to re-create all the current live allocations and transfer/replace them using the new buffer
|
||||
std::vector<
|
||||
std::pair< std::shared_ptr< BufferSuballocationHandle >, std::shared_ptr< BufferSuballocationHandle > > >
|
||||
allocations {};
|
||||
|
||||
allocations.reserve( m_active_suballocations.size() );
|
||||
|
||||
for ( auto& suballocation_weak : m_active_suballocations )
|
||||
{
|
||||
if ( suballocation_weak.expired() ) continue;
|
||||
try
|
||||
{
|
||||
auto suballocation { suballocation_weak.lock() };
|
||||
|
||||
auto new_suballocation { new_handle->allocate( suballocation->m_size, suballocation->m_alignment ) };
|
||||
allocations.emplace_back( suballocation, new_suballocation );
|
||||
}
|
||||
catch ( std::bad_weak_ptr& e )
|
||||
{
|
||||
// noop
|
||||
void();
|
||||
}
|
||||
}
|
||||
|
||||
std::swap( m_buffer, new_handle->m_buffer );
|
||||
std::swap( m_alloc_info, new_handle->m_alloc_info );
|
||||
std::swap( m_allocation, new_handle->m_allocation );
|
||||
std::swap( m_free_blocks, new_handle->m_free_blocks );
|
||||
std::swap( m_active_suballocations, new_handle->m_active_suballocations );
|
||||
std::swap( m_allocation_traces, new_handle->m_allocation_traces );
|
||||
|
||||
// This transforms any memory::Buffer to be the `new_handle` we allocated above.
|
||||
const auto old_handle { new_handle };
|
||||
FGL_ASSERT( old_handle.get() != this, "Old handle should not be the current buffer anymore!" );
|
||||
new_handle = this->shared_from_this();
|
||||
|
||||
for ( auto& [ old_allocation, new_allocation ] : allocations )
|
||||
{
|
||||
old_allocation->m_parent_buffer = old_handle;
|
||||
new_allocation->m_parent_buffer = new_handle;
|
||||
}
|
||||
|
||||
//Now we need to transfer the data from the old buffer to the new buffer
|
||||
for ( const auto& allocation : allocations )
|
||||
{
|
||||
const auto& [ old_suballocation, new_suballocation ] = allocation;
|
||||
TransferManager::getInstance().copySuballocationRegion( old_suballocation, new_suballocation );
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
std::shared_ptr< BufferSuballocationHandle > BufferHandle::
|
||||
allocate( vk::DeviceSize desired_memory_size, const vk::DeviceSize t_alignment )
|
||||
{
|
||||
@@ -403,6 +273,45 @@ namespace fgl::engine::memory
|
||||
return true;
|
||||
}
|
||||
|
||||
void BufferHandle::free( BufferSuballocationHandle& info )
|
||||
{
|
||||
ZoneScoped;
|
||||
|
||||
if ( info.offset() >= this->size() ) throw std::runtime_error( "Offset was outside of bounds of buffer" );
|
||||
if ( info.offset() + info.size() > this->size() )
|
||||
throw std::runtime_error(
|
||||
std::format(
|
||||
"offset() + size() was outside the bounds of the buffer ({} + {} == {} >= {})",
|
||||
info.offset(),
|
||||
info.size(),
|
||||
info.offset() + info.size(),
|
||||
size() ) );
|
||||
|
||||
//Add the block back to the free blocks
|
||||
m_free_blocks.emplace_back( info.offset(), info.size() );
|
||||
|
||||
mergeFreeBlocks();
|
||||
|
||||
#ifndef NDEBUG
|
||||
//Check that we haven't lost any memory
|
||||
std::size_t sum { 0 };
|
||||
for ( const auto& size : this->m_free_blocks | std::views::values )
|
||||
{
|
||||
sum += size;
|
||||
}
|
||||
|
||||
for ( auto& suballocation : m_active_suballocations )
|
||||
{
|
||||
if ( suballocation.expired() ) continue;
|
||||
sum += suballocation.lock()->size();
|
||||
}
|
||||
|
||||
if ( sum != this->size() )
|
||||
throw std::runtime_error(
|
||||
std::format( "Memory leaked! Expected {} was {}: Lost {}", this->size(), sum, this->size() - sum ) );
|
||||
#endif
|
||||
}
|
||||
|
||||
void BufferHandle::mergeFreeBlocks()
|
||||
{
|
||||
ZoneScoped;
|
||||
@@ -460,68 +369,75 @@ namespace fgl::engine::memory
|
||||
Device::getInstance().setDebugUtilsObjectName( info );
|
||||
}
|
||||
|
||||
void BufferHandle::free( BufferSuballocationHandle& info )
|
||||
void* BufferHandle::map( const BufferSuballocationHandle& handle ) const
|
||||
{
|
||||
ZoneScoped;
|
||||
if ( m_alloc_info.pMappedData == nullptr ) return nullptr;
|
||||
|
||||
if ( info.offset() >= this->size() ) throw std::runtime_error( "Offset was outside of bounds of buffer" );
|
||||
if ( info.offset() + info.size() > this->size() )
|
||||
throw std::runtime_error(
|
||||
std::format(
|
||||
"offset() + size() was outside the bounds of the buffer ({} + {} == {} >= {})",
|
||||
info.offset(),
|
||||
info.size(),
|
||||
info.offset() + info.size(),
|
||||
size() ) );
|
||||
|
||||
//Add the block back to the free blocks
|
||||
m_free_blocks.emplace_back( info.offset(), info.size() );
|
||||
|
||||
mergeFreeBlocks();
|
||||
|
||||
#ifndef NDEBUG
|
||||
//Check that we haven't lost any memory
|
||||
std::size_t sum { 0 };
|
||||
for ( const auto& size : this->m_free_blocks | std::views::values )
|
||||
{
|
||||
sum += size;
|
||||
}
|
||||
|
||||
for ( auto& suballocation : m_active_suballocations )
|
||||
{
|
||||
if ( suballocation.expired() ) continue;
|
||||
sum += suballocation.lock()->size();
|
||||
}
|
||||
|
||||
if ( sum != this->size() )
|
||||
throw std::runtime_error(
|
||||
std::format( "Memory leaked! Expected {} was {}: Lost {}", this->size(), sum, this->size() - sum ) );
|
||||
#endif
|
||||
return static_cast< std::byte* >( m_alloc_info.pMappedData ) + handle.offset();
|
||||
}
|
||||
|
||||
vk::DeviceSize BufferHandle::used() const
|
||||
vk::DeviceSize BufferHandle::alignment() const
|
||||
{
|
||||
vk::DeviceSize total_size { 0 };
|
||||
vk::DeviceSize size { 1 };
|
||||
|
||||
for ( auto& suballocation : m_active_suballocations )
|
||||
if ( m_usage & vk::BufferUsageFlagBits::eStorageBuffer )
|
||||
{
|
||||
if ( suballocation.expired() ) continue;
|
||||
total_size += suballocation.lock()->size();
|
||||
size = std::max( size, Device::getInstance().m_properties.limits.minStorageBufferOffsetAlignment );
|
||||
}
|
||||
|
||||
return total_size;
|
||||
if ( m_usage & vk::BufferUsageFlagBits::eUniformBuffer )
|
||||
{
|
||||
size = std::max( size, Device::getInstance().m_properties.limits.minUniformBufferOffsetAlignment );
|
||||
}
|
||||
|
||||
if ( m_memory_properties & vk::MemoryPropertyFlagBits::eHostVisible )
|
||||
{
|
||||
size = std::max( size, Device::getInstance().m_properties.limits.nonCoherentAtomSize );
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
vk::DeviceSize BufferHandle::largestBlock() const
|
||||
decltype( BufferHandle::m_free_blocks )::iterator BufferHandle::
|
||||
findAvailableBlock( vk::DeviceSize memory_size, const vk::DeviceSize t_alignment )
|
||||
{
|
||||
vk::DeviceSize largest { 0 };
|
||||
//Find a free space.
|
||||
return std::ranges::find_if(
|
||||
m_free_blocks,
|
||||
[ this, memory_size, t_alignment ]( const std::pair< vk::DeviceSize, vk::DeviceSize >& pair )
|
||||
{
|
||||
const auto [ offset, size ] = pair;
|
||||
|
||||
for ( const auto& size : m_free_blocks | std::views::values )
|
||||
{
|
||||
largest = std::max( largest, size );
|
||||
}
|
||||
const auto new_offset = align( offset, alignment(), t_alignment );
|
||||
const auto after_size { size - ( new_offset - offset ) };
|
||||
|
||||
return largest;
|
||||
// If the size of the block after alignment is greater than or equal to the size of the memory we want to allocate using it.
|
||||
return after_size >= memory_size;
|
||||
} );
|
||||
}
|
||||
|
||||
Buffer Buffer::operator=( const std::shared_ptr< BufferHandle >& other )
|
||||
{
|
||||
std::shared_ptr< BufferHandle >::operator=( other );
|
||||
return *this;
|
||||
}
|
||||
|
||||
Buffer::
|
||||
Buffer( vk::DeviceSize memory_size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags memory_properties ) :
|
||||
std::shared_ptr< BufferHandle >( std::make_shared< BufferHandle >( memory_size, usage, memory_properties ) )
|
||||
{}
|
||||
|
||||
Buffer::Buffer( const std::shared_ptr< BufferHandle >& buffer ) : std::shared_ptr< BufferHandle >( buffer )
|
||||
{}
|
||||
|
||||
BufferSuballocation Buffer::allocate( const vk::DeviceSize desired_size, const std::uint32_t alignment ) const
|
||||
{
|
||||
return std::shared_ptr< BufferHandle >::operator->()->allocate( desired_size, alignment );
|
||||
}
|
||||
|
||||
vk::DeviceSize Buffer::size() const
|
||||
{
|
||||
return std::shared_ptr< BufferHandle >::operator->()->size();
|
||||
}
|
||||
|
||||
} // namespace fgl::engine::memory
|
||||
|
||||
@@ -110,19 +110,11 @@ namespace fgl::engine::memory
|
||||
vk::Buffer getVkBuffer() const { return m_buffer; }
|
||||
|
||||
//! Returns the vulkan memory handle for this buffer
|
||||
vk::DeviceMemory getMemory() const
|
||||
{
|
||||
assert( m_alloc_info.deviceMemory != VK_NULL_HANDLE );
|
||||
vk::DeviceMemory getMemory() const;
|
||||
|
||||
return m_alloc_info.deviceMemory;
|
||||
}
|
||||
std::string sizeName() const;
|
||||
|
||||
FGL_FORCE_INLINE std::string sizeName() const
|
||||
{
|
||||
return std::format( "{}: {}", m_debug_name, literals::size_literals::toString( size() ) );
|
||||
}
|
||||
|
||||
friend struct BufferSuballocationHandle;
|
||||
friend class BufferSuballocationHandle;
|
||||
friend class BufferSuballocation; //TODO: Remove this
|
||||
|
||||
friend gui::AllocationList gui::getTotalAllocated();
|
||||
@@ -132,8 +124,6 @@ namespace fgl::engine::memory
|
||||
private:
|
||||
|
||||
friend class Buffer;
|
||||
std::shared_ptr< BufferHandle > remake( vk::DeviceSize new_size );
|
||||
// void resize( vk::DeviceSize new_size );
|
||||
|
||||
public:
|
||||
|
||||
@@ -170,33 +160,24 @@ namespace fgl::engine::memory
|
||||
//! Returns the required alignment for this buffer.
|
||||
vk::DeviceSize alignment() const;
|
||||
|
||||
decltype( m_free_blocks )::iterator findAvailableBlock( vk::DeviceSize memory_size, std::uint32_t t_alignment );
|
||||
decltype( m_free_blocks )::iterator
|
||||
findAvailableBlock( vk::DeviceSize memory_size, vk::DeviceSize t_alignment );
|
||||
};
|
||||
|
||||
class Buffer final : public std::shared_ptr< BufferHandle >
|
||||
{
|
||||
Buffer operator=( const std::shared_ptr< BufferHandle >& other )
|
||||
{
|
||||
std::shared_ptr< BufferHandle >::operator=( other );
|
||||
return *this;
|
||||
}
|
||||
Buffer operator=( const std::shared_ptr< BufferHandle >& other );
|
||||
|
||||
public:
|
||||
|
||||
[[nodiscard]] Buffer(
|
||||
vk::DeviceSize memory_size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags memory_properties ) :
|
||||
std::shared_ptr< BufferHandle >( std::make_shared< BufferHandle >( memory_size, usage, memory_properties ) )
|
||||
{}
|
||||
vk::DeviceSize memory_size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags memory_properties );
|
||||
|
||||
[[nodiscard]] explicit Buffer( const std::shared_ptr< BufferHandle >& buffer ) :
|
||||
std::shared_ptr< BufferHandle >( buffer )
|
||||
{}
|
||||
[[nodiscard]] explicit Buffer( const std::shared_ptr< BufferHandle >& buffer );
|
||||
|
||||
BufferSuballocation allocate( vk::DeviceSize desired_size, std::uint32_t alignment = 1 );
|
||||
BufferSuballocation allocate( const vk::DeviceSize desired_size, const std::uint32_t alignment = 1 ) const;
|
||||
|
||||
[[nodiscard]] vk::DeviceSize size() const { return std::shared_ptr< BufferHandle >::operator->()->size(); }
|
||||
|
||||
void resize( vk::DeviceSize size );
|
||||
[[nodiscard]] vk::DeviceSize size() const;
|
||||
|
||||
~Buffer() = default;
|
||||
};
|
||||
|
||||
@@ -11,11 +11,6 @@
|
||||
|
||||
namespace fgl::engine::memory
|
||||
{
|
||||
vk::Buffer BufferSuballocationHandle::getBuffer() const
|
||||
{
|
||||
return m_parent_buffer->getVkBuffer();
|
||||
}
|
||||
|
||||
BufferSuballocationHandle::BufferSuballocationHandle(
|
||||
const Buffer& p_buffer,
|
||||
const vk::DeviceSize offset,
|
||||
@@ -31,28 +26,11 @@ namespace fgl::engine::memory
|
||||
// assert( memory_size != 0 && "BufferSuballocation::BufferSuballocation() called with memory_size == 0" );
|
||||
}
|
||||
|
||||
vk::Buffer BufferSuballocationHandle::getVkBuffer() const
|
||||
{
|
||||
return m_parent_buffer->getVkBuffer();
|
||||
}
|
||||
|
||||
BufferSuballocationHandle::~BufferSuballocationHandle()
|
||||
{
|
||||
m_parent_buffer->free( *this );
|
||||
}
|
||||
|
||||
vk::BufferCopy BufferSuballocationHandle::copyRegion(
|
||||
const BufferSuballocationHandle& target,
|
||||
const std::size_t suballocation_offset,
|
||||
const vk::DeviceSize size ) const
|
||||
{
|
||||
vk::BufferCopy copy {};
|
||||
copy.size = size == 0 ? std::min( this->m_size, target.m_size ) : size;
|
||||
copy.srcOffset = this->offset();
|
||||
copy.dstOffset = target.offset() + suballocation_offset;
|
||||
return copy;
|
||||
}
|
||||
|
||||
void BufferSuballocationHandle::copyTo(
|
||||
const vk::raii::CommandBuffer& cmd_buffer,
|
||||
const BufferSuballocationHandle& other,
|
||||
@@ -65,20 +43,39 @@ namespace fgl::engine::memory
|
||||
cmd_buffer.copyBuffer( this->getVkBuffer(), other.getVkBuffer(), copy_regions );
|
||||
}
|
||||
|
||||
std::pair< std::shared_ptr< BufferSuballocationHandle >, std::shared_ptr< BufferSuballocationHandle > >
|
||||
BufferSuballocationHandle::reallocate( const std::shared_ptr< BufferHandle >& shared )
|
||||
{
|
||||
auto old_allocation { this->shared_from_this() };
|
||||
auto new_allocation { shared->allocate( m_size, m_alignment ) };
|
||||
|
||||
return { old_allocation, new_allocation };
|
||||
}
|
||||
|
||||
void BufferSuballocationHandle::markSource( const std::shared_ptr< BufferSuballocationHandle >& source )
|
||||
{
|
||||
m_dependents.push_back( source );
|
||||
}
|
||||
|
||||
void BufferSuballocationHandle::setReady( const bool value )
|
||||
{
|
||||
std::ranges::remove_if( m_dependents, []( const auto& handle ) { return handle.expired(); } );
|
||||
m_staged = value;
|
||||
}
|
||||
|
||||
vk::BufferCopy BufferSuballocationHandle::copyRegion(
|
||||
const BufferSuballocationHandle& target,
|
||||
const std::size_t suballocation_offset,
|
||||
const vk::DeviceSize size ) const
|
||||
{
|
||||
vk::BufferCopy copy {};
|
||||
copy.size = size == 0 ? std::min( this->m_size, target.m_size ) : size;
|
||||
copy.srcOffset = this->offset();
|
||||
copy.dstOffset = target.offset() + suballocation_offset;
|
||||
return copy;
|
||||
}
|
||||
|
||||
vk::Buffer BufferSuballocationHandle::getBuffer() const
|
||||
{
|
||||
return m_parent_buffer->getVkBuffer();
|
||||
}
|
||||
|
||||
vk::Buffer BufferSuballocationHandle::getVkBuffer() const
|
||||
{
|
||||
return m_parent_buffer->getVkBuffer();
|
||||
}
|
||||
|
||||
bool BufferSuballocationHandle::stable() const
|
||||
{
|
||||
return std::ranges::
|
||||
@@ -89,10 +86,4 @@ namespace fgl::engine::memory
|
||||
{
|
||||
return m_staged;
|
||||
}
|
||||
|
||||
void BufferSuballocationHandle::setReady( const bool value )
|
||||
{
|
||||
std::ranges::remove_if( m_dependents, []( const auto& handle ) { return handle.expired(); } );
|
||||
m_staged = value;
|
||||
}
|
||||
} // namespace fgl::engine::memory
|
||||
@@ -39,9 +39,6 @@ namespace fgl::engine::memory
|
||||
|
||||
void* m_ptr { nullptr };
|
||||
|
||||
bool m_reallocated { false };
|
||||
std::shared_ptr< BufferSuballocationHandle > m_reallocated_to { nullptr };
|
||||
|
||||
bool m_staged { false };
|
||||
std::vector< std::weak_ptr< BufferSuballocationHandle > > m_dependents {};
|
||||
|
||||
@@ -61,19 +58,6 @@ namespace fgl::engine::memory
|
||||
const BufferSuballocationHandle& other,
|
||||
std::size_t offset ) const;
|
||||
|
||||
void flagReallocated( const std::shared_ptr< BufferSuballocationHandle >& shared )
|
||||
{
|
||||
m_reallocated = true;
|
||||
m_reallocated_to = shared;
|
||||
}
|
||||
|
||||
std::pair< std::shared_ptr< BufferSuballocationHandle >, std::shared_ptr< BufferSuballocationHandle > >
|
||||
reallocate( const std::shared_ptr< BufferHandle >& shared );
|
||||
|
||||
bool reallocated() const { return m_reallocated; }
|
||||
|
||||
std::shared_ptr< BufferSuballocationHandle > reallocatedTo() const { return m_reallocated_to; }
|
||||
|
||||
void markSource( const std::shared_ptr< BufferSuballocationHandle >& source );
|
||||
|
||||
void setReady( bool value );
|
||||
|
||||
Reference in New Issue
Block a user