depth-prepass #1
@@ -116,11 +116,11 @@ namespace fgl::engine::memory
|
||||
bool TransferData::performBufferStage( CopyRegionMap& copy_regions )
|
||||
{
|
||||
ZoneScoped;
|
||||
auto& source { std::get< TransferBufferHandle >( m_source ) };
|
||||
auto& target { std::get< TransferBufferHandle >( m_target ) };
|
||||
const auto& source { std::get< TransferBufferHandle >( m_source ) };
|
||||
const auto& target { std::get< TransferBufferHandle >( m_target ) };
|
||||
const CopyRegionKey key { std::make_pair( source->getBuffer(), target->getBuffer() ) };
|
||||
|
||||
const auto copy_info { source->copyRegion( *target, m_target_offset ) };
|
||||
const auto copy_info { source->copyRegion( *target, m_target_offset, m_size ) };
|
||||
|
||||
if ( auto itter = copy_regions.find( key ); itter != copy_regions.end() )
|
||||
{
|
||||
|
||||
@@ -335,6 +335,17 @@ namespace fgl::engine::memory
|
||||
const auto aligned_offset { align( selected_block_offset, alignment(), t_alignment ) };
|
||||
FGL_ASSERT( aligned_offset % combineAlignment( alignment(), t_alignment ) == 0, "Alignment failed!" );
|
||||
|
||||
const auto stride { t_alignment };
|
||||
FGL_ASSERT(
|
||||
aligned_offset % stride == 0,
|
||||
std::format(
|
||||
"{} % {} != 0 (Was {}). The offset should be aligned with the stride, alignment() = {}, t_alignment = {}",
|
||||
aligned_offset,
|
||||
stride,
|
||||
aligned_offset % stride,
|
||||
alignment(),
|
||||
t_alignment ) );
|
||||
|
||||
//Fix the offset and size if they aren't alligned
|
||||
if ( aligned_offset != selected_block_offset )
|
||||
{
|
||||
|
||||
@@ -27,6 +27,7 @@ namespace fgl::engine::memory
|
||||
m_alignment( alignment ),
|
||||
m_ptr( m_parent_buffer->map( *this ) )
|
||||
{
|
||||
if ( memory_size == 1024 ) throw std::runtime_error( "AAAAA" );
|
||||
// assert( memory_size != 0 && "BufferSuballocation::BufferSuballocation() called with memory_size == 0" );
|
||||
}
|
||||
|
||||
@@ -40,11 +41,13 @@ namespace fgl::engine::memory
|
||||
m_parent_buffer->free( *this );
|
||||
}
|
||||
|
||||
vk::BufferCopy BufferSuballocationHandle::
|
||||
copyRegion( const BufferSuballocationHandle& target, const std::size_t suballocation_offset ) const
|
||||
vk::BufferCopy BufferSuballocationHandle::copyRegion(
|
||||
const BufferSuballocationHandle& target,
|
||||
const std::size_t suballocation_offset,
|
||||
const vk::DeviceSize size ) const
|
||||
{
|
||||
vk::BufferCopy copy {};
|
||||
copy.size = std::min( this->m_size, target.m_size );
|
||||
copy.size = size == 0 ? std::min( this->m_size, target.m_size ) : size;
|
||||
copy.srcOffset = this->offset();
|
||||
copy.dstOffset = target.offset() + suballocation_offset;
|
||||
return copy;
|
||||
|
||||
@@ -78,8 +78,8 @@ namespace fgl::engine::memory
|
||||
|
||||
void setReady( bool value );
|
||||
|
||||
[[nodiscard]] vk::BufferCopy
|
||||
copyRegion( const BufferSuballocationHandle& target, std::size_t suballocation_offset ) const;
|
||||
[[nodiscard]] vk::BufferCopy copyRegion(
|
||||
const BufferSuballocationHandle& target, std::size_t suballocation_offset, vk::DeviceSize size = 0 ) const;
|
||||
|
||||
[[nodiscard]] vk::Buffer getBuffer() const;
|
||||
[[nodiscard]] vk::Buffer getVkBuffer() const;
|
||||
|
||||
@@ -10,7 +10,9 @@
|
||||
namespace fgl::engine::memory
|
||||
{
|
||||
|
||||
constexpr std::uint32_t min_capacity { 1024 };
|
||||
//TODO: Lower this number to make small copies not as impactful to the staging buffer.
|
||||
// For some reason lowing this to a very low value causes the alignment for allocations to fail.
|
||||
constexpr std::uint32_t min_capacity { 512 };
|
||||
|
||||
[[nodiscard]] BufferVector::BufferVector( Buffer& buffer, const std::uint32_t count, const std::uint32_t stride ) :
|
||||
BufferSuballocation( buffer.allocate( std::max( count, min_capacity ) * stride, stride ) ),
|
||||
@@ -26,6 +28,13 @@ namespace fgl::engine::memory
|
||||
m_offset,
|
||||
m_stride,
|
||||
m_offset % m_stride ) );
|
||||
FGL_ASSERT(
|
||||
count * stride <= this->bytesize(),
|
||||
std::format(
|
||||
"Allocation create insufficent buffer for count * stride = {} * {} > {}",
|
||||
count,
|
||||
stride,
|
||||
this->bytesize() ) );
|
||||
}
|
||||
|
||||
//! Returns the offset count from the start of the buffer to the first element
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
namespace fgl::engine::memory
|
||||
{
|
||||
struct Buffer;
|
||||
class Buffer;
|
||||
}
|
||||
|
||||
namespace fgl::engine
|
||||
|
||||
@@ -33,6 +33,8 @@ namespace fgl::engine
|
||||
|
||||
class Index
|
||||
{
|
||||
static_assert( std::is_default_constructible_v< T >, "T must be default constructible" );
|
||||
T m_source_data {};
|
||||
IndexedVector< T >& m_vector;
|
||||
std::uint32_t m_idx { std::numeric_limits< std::uint32_t >::max() };
|
||||
|
||||
@@ -48,20 +50,30 @@ namespace fgl::engine
|
||||
|
||||
using Vec = IndexedVector;
|
||||
|
||||
void update( const T& t ) { return m_vector.updateData( m_idx, t ); }
|
||||
|
||||
Index& operator=( const Index& other ) = delete;
|
||||
|
||||
Index( const Index& other ) : m_vector( other.m_vector ), m_idx( m_vector.acquireInternal() )
|
||||
void update( const T& t )
|
||||
{
|
||||
//TODO: Update the data from the original item
|
||||
static_assert( std::is_copy_constructible_v< T >, "T must be copy constructible" );
|
||||
m_source_data = t;
|
||||
return m_vector.updateData( m_idx, t );
|
||||
}
|
||||
|
||||
// Copy
|
||||
Index& operator=( const Index& other ) = delete;
|
||||
|
||||
// Copy
|
||||
Index( const Index& other ) : m_vector( other.m_vector ), m_idx( m_vector.acquireInternal() )
|
||||
{
|
||||
update( other.m_source_data );
|
||||
}
|
||||
|
||||
// Move
|
||||
Index& operator=( Index&& other ) = delete;
|
||||
|
||||
// Move
|
||||
Index( Index&& other ) noexcept : m_vector( other.m_vector ), m_idx( other.m_idx )
|
||||
{
|
||||
other.m_idx = std::numeric_limits< std::uint32_t >::max();
|
||||
update( other.m_source_data );
|
||||
}
|
||||
|
||||
// Privated to force returning to the IndexedVector
|
||||
|
||||
Reference in New Issue
Block a user