Completely rework staging system

This commit is contained in:
2024-06-27 14:23:41 -04:00
parent 0374ac47ce
commit db5de6096d
42 changed files with 1389 additions and 426 deletions

4
.gitignore vendored
View File

@@ -40,3 +40,7 @@ read.lock
*.psess
*.vsp
*.vspx
#Hide log files
*.log
/vklog

View File

@@ -14,6 +14,7 @@
#include "KeyboardMovementController.hpp"
#include "assets/stores.hpp"
#include "engine/Average.hpp"
#include "engine/assets/TransferManager.hpp"
#include "engine/buffers/UniqueFrameSuballocation.hpp"
#include "engine/debug/drawers.hpp"
#include "engine/literals/size.hpp"
@@ -31,7 +32,9 @@ namespace fgl::engine
{
ZoneScoped;
using namespace fgl::literals::size_literals;
initGlobalStagingBuffer( 512_MiB );
TransferManager::createInstance( device, 512_MiB );
#if ENABLE_IMGUI
initImGui();
#endif
@@ -40,19 +43,6 @@ namespace fgl::engine
static Average< float, 60 * 15 > rolling_ms_average;
void preStage( vk::raii::CommandBuffer& cmd_buffer )
{
ZoneScopedN( "Pre-Stage" );
getTextureStore().stage( cmd_buffer );
}
void postStage()
{
ZoneScopedN( "Post-Stage" );
getTextureStore().confirmStaged();
}
void EngineContext::run()
{
TracyCZoneN( TRACY_PrepareEngine, "Inital Run", true );
@@ -127,6 +117,8 @@ namespace fgl::engine
while ( !m_window.shouldClose() )
{
TransferManager::getInstance().submitNow();
ZoneScopedN( "Poll" );
glfwPollEvents();
@@ -157,8 +149,6 @@ namespace fgl::engine
if ( auto [ command_buffer, gui_command_buffer ] = m_renderer.beginFrame(); *command_buffer )
{
preStage( command_buffer );
ZoneScopedN( "Render" );
//Update
const std::uint16_t frame_index { m_renderer.getFrameIndex() };
@@ -194,6 +184,9 @@ namespace fgl::engine
m_culling_system.startPass( frame_info );
TracyVkCollect( frame_info.tracy_ctx, *command_buffer );
TransferManager::getInstance().recordOwnershipTransferDst( command_buffer );
m_culling_system.wait();
m_renderer.beginSwapchainRendererPass( command_buffer );
@@ -210,10 +203,13 @@ namespace fgl::engine
m_renderer.endFrame();
TransferManager::getInstance().dump();
FrameMark;
}
postStage();
using namespace std::chrono_literals;
std::this_thread::sleep_for( 13ms );
}
Device::getInstance().device().waitIdle();
@@ -293,8 +289,6 @@ namespace fgl::engine
object.getTransform().translation = WorldCoordinate( 0.0f );
object.addFlag( IS_VISIBLE | IS_ENTITY );
object.getModel()->stage( command_buffer );
m_game_objects_root.addGameObject( std::move( object ) );
}
}

View File

@@ -21,20 +21,9 @@ namespace fgl::engine
template < typename T >
struct AssetInterface
{
//! Stages the asset to the device (GPU)
virtual void stage( vk::raii::CommandBuffer& buffer ) = 0;
friend class AssetStore< T >;
bool m_been_staged { false };
public:
//! Is the Asset ready to be used. (Returns false if not staged)
inline bool isReady() const { return m_been_staged; }
inline void setReady() { m_been_staged = true; };
AssetInterface() = default;
virtual ~AssetInterface() = default;
};

View File

@@ -0,0 +1,505 @@
//
// Created by kj16609 on 6/26/24.
//
#include "TransferManager.hpp"
#include "engine/buffers/BufferSuballocation.hpp"
#include "engine/buffers/exceptions.hpp"
#include "engine/buffers/vector/HostVector.hpp"
#include "engine/image/Image.hpp"
#include "engine/image/ImageHandle.hpp"
#include "engine/literals/size.hpp"
#include "engine/texture/Texture.hpp"
namespace fgl::engine
{
void TransferManager::recordCommands( vk::raii::CommandBuffer& command_buffer )
{
ZoneScoped;
//Keep inserting new commands until we fill up the staging buffer
if ( queue.size() > 0 ) log::info( "[TransferManager]: Queue size: {}", queue.size() );
while ( queue.size() > 0 )
{
TransferData data { std::move( queue.front() ) };
queue.pop();
if ( data.stage(
command_buffer, *staging_buffer, copy_regions, transfer_queue_index, graphics_queue_index ) )
{
processing.emplace_back( std::move( data ) );
}
else
{
// We were unable to stage for a reason
log::info( "Unable to stage object. Breaking out of loop" );
queue.push( data );
break;
}
}
std::vector< vk::BufferMemoryBarrier > from_memory_barriers { createFromGraphicsBarriers() };
// Acquire the buffer from the queue family
command_buffer.pipelineBarrier(
vk::PipelineStageFlagBits::eBottomOfPipe,
vk::PipelineStageFlagBits::eTransfer,
vk::DependencyFlags(),
{},
from_memory_barriers,
{} );
//Record all the buffer copies
for ( auto& [ key, regions ] : copy_regions )
{
auto& [ source, target ] = key;
command_buffer.copyBuffer( source, target, regions );
}
std::vector< vk::BufferMemoryBarrier > to_buffer_memory_barriers { createFromTransferBarriers() };
// Release the buffer regions back to the graphics queue
command_buffer.pipelineBarrier(
vk::PipelineStageFlagBits::eTransfer,
vk::PipelineStageFlagBits::eVertexInput | vk::PipelineStageFlagBits::eVertexShader,
vk::DependencyFlags(),
{},
to_buffer_memory_barriers,
{} );
}
void TransferManager::submitBuffer( vk::raii::CommandBuffer& command_buffer )
{
ZoneScoped;
std::vector< vk::Fence > fences { completion_fence };
Device::getInstance()->resetFences( fences );
command_buffer.end();
vk::SubmitInfo info {};
std::vector< vk::CommandBuffer > buffers { *command_buffer };
std::vector< vk::Semaphore > sems { transfer_semaphore };
info.setSignalSemaphores( sems );
info.setCommandBuffers( buffers );
transfer_queue.submit( info, completion_fence );
}
std::vector< vk::BufferMemoryBarrier > TransferManager::createFromGraphicsBarriers()
{
std::vector< vk::BufferMemoryBarrier > barriers {};
for ( auto& [ key, regions ] : copy_regions )
{
auto& [ source, target ] = key;
for ( const auto& region : regions )
{
vk::BufferMemoryBarrier barrier {};
barrier.buffer = target;
barrier.offset = region.dstOffset;
barrier.size = region.size;
barrier.srcAccessMask = vk::AccessFlagBits::eNone;
barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite;
barrier.srcQueueFamilyIndex = graphics_queue_index;
barrier.dstQueueFamilyIndex = transfer_queue_index;
barriers.emplace_back( barrier );
}
}
return barriers;
}
std::vector< vk::BufferMemoryBarrier > TransferManager::createToTransferBarriers()
{
std::vector< vk::BufferMemoryBarrier > barriers {};
for ( auto& [ key, regions ] : copy_regions )
{
auto& [ source, target ] = key;
for ( const auto& region : regions )
{
vk::BufferMemoryBarrier barrier {};
barrier.buffer = target;
barrier.offset = region.dstOffset;
barrier.size = region.size;
barrier.srcAccessMask = vk::AccessFlagBits::eNone;
barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite;
barrier.srcQueueFamilyIndex = graphics_queue_index;
barrier.dstQueueFamilyIndex = transfer_queue_index;
barriers.emplace_back( barrier );
}
}
return barriers;
}
std::vector< vk::BufferMemoryBarrier > TransferManager::createFromTransferBarriers()
{
std::vector< vk::BufferMemoryBarrier > barriers {};
for ( auto& [ key, regions ] : copy_regions )
{
auto& [ source, target ] = key;
for ( const auto& region : regions )
{
vk::BufferMemoryBarrier barrier {};
barrier.buffer = target;
barrier.offset = region.dstOffset;
barrier.size = region.size;
barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
barrier.dstAccessMask = vk::AccessFlagBits::eVertexAttributeRead | vk::AccessFlagBits::eIndexRead;
barrier.srcQueueFamilyIndex = transfer_queue_index;
barrier.dstQueueFamilyIndex = graphics_queue_index;
barriers.emplace_back( barrier );
}
}
return barriers;
}
std::vector< vk::BufferMemoryBarrier > TransferManager::createToGraphicsBarriers()
{
std::vector< vk::BufferMemoryBarrier > barriers {};
for ( const auto& [ key, regions ] : copy_regions )
{
const auto& [ src, dst ] = key;
for ( const auto& region : regions )
{
vk::BufferMemoryBarrier barrier {};
barrier.buffer = dst;
barrier.offset = region.dstOffset;
barrier.size = region.size;
barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite,
barrier.dstAccessMask = vk::AccessFlagBits::eIndexRead | vk::AccessFlagBits::eVertexAttributeRead;
barrier.srcQueueFamilyIndex = transfer_queue_index;
barrier.dstQueueFamilyIndex = graphics_queue_index;
barriers.emplace_back( barrier );
}
}
return barriers;
}
inline static std::unique_ptr< TransferManager > global_transfer_manager {};
void TransferManager::takeOwnership( vk::raii::CommandBuffer& command_buffer )
{
std::vector< vk::BufferMemoryBarrier > barriers { createToTransferBarriers() };
command_buffer.pipelineBarrier(
vk::PipelineStageFlagBits::eNone, vk::PipelineStageFlagBits::eTransfer, {}, {}, barriers, {} );
}
void TransferManager::recordOwnershipTransferDst( vk::raii::CommandBuffer& command_buffer )
{
ZoneScoped;
std::vector< vk::BufferMemoryBarrier > barriers { createToGraphicsBarriers() };
command_buffer.pipelineBarrier(
vk::PipelineStageFlagBits::eTransfer,
vk::PipelineStageFlagBits::eVertexInput | vk::PipelineStageFlagBits::eVertexShader,
{},
{},
barriers,
{} );
}
void TransferManager::dump()
{
//Block on fence
std::vector< vk::Fence > fences { completion_fence };
(void)Device::getInstance()->waitForFences( fences, VK_TRUE, std::numeric_limits< std::size_t >::max() );
processing.clear();
copy_regions.clear();
}
void TransferManager::prepareStaging()
{}
void TransferManager::createInstance( Device& device, std::uint64_t buffer_size )
{
log::info(
"Transfer manager created with a buffer size of {}",
fgl::literals::size_literals::to_string( buffer_size ) );
global_transfer_manager = std::make_unique< TransferManager >( device, buffer_size );
}
TransferManager& TransferManager::getInstance()
{
assert( global_transfer_manager );
return *global_transfer_manager;
}
void TransferManager::copyToBuffer( BufferVector& source, BufferVector& target )
{
TransferData transfer_data { source.getHandle(), target.getHandle() };
queue.emplace( std::move( transfer_data ) );
}
void TransferManager::copyToImage( std::vector< std::byte >&& data, Image& image )
{
assert( data.size() > 0 );
TransferData transfer_data { std::forward< std::vector< std::byte > >( data ), image.m_handle };
assert( std::get< TransferData::RawData >( transfer_data.m_source ).size() > 0 );
queue.emplace( std::move( transfer_data ) );
log::debug( "[TransferManager]: Queue size now {}", queue.size() );
}
TransferManager::TransferManager( Device& device, std::uint64_t buffer_size ) :
transfer_queue_index( device.phyDevice()
.queueInfo()
.getIndex( vk::QueueFlagBits::eTransfer, vk::QueueFlagBits::eGraphics ) ),
graphics_queue_index( device.phyDevice().queueInfo().getIndex( vk::QueueFlagBits::eGraphics ) ),
transfer_queue( device->getQueue( transfer_queue_index, 0 ) ),
transfer_semaphore( device->createSemaphore( {} ) ),
transfer_buffers( Device::getInstance().device().allocateCommandBuffers( cmd_buffer_allocinfo ) ),
completion_fence( device->createFence( {} ) )
{
resizeBuffer( buffer_size );
}
void TransferManager::submitNow()
{
ZoneScoped;
auto& transfer_buffer { transfer_buffers[ 0 ] };
transfer_buffer.reset();
vk::CommandBufferBeginInfo info {};
transfer_buffer.begin( info );
recordCommands( transfer_buffer );
submitBuffer( transfer_buffer );
if ( processing.size() > 0 ) log::debug( "Submitted {} objects to be transfered", processing.size() );
for ( auto& processed : processing )
{
processed.markGood();
}
//Drop the data
processing.clear();
}
bool TransferData::
performImageStage( vk::raii::CommandBuffer& cmd_buffer, std::uint32_t transfer_idx, std::uint32_t graphics_idx )
{
auto& source_buffer { std::get< TransferBufferHandle >( m_source ) };
auto& dest_image { std::get< TransferImageHandle >( m_target ) };
vk::ImageSubresourceRange range;
range.aspectMask = vk::ImageAspectFlagBits::eColor;
range.baseMipLevel = 0;
range.levelCount = 1;
range.baseArrayLayer = 0;
range.layerCount = 1;
vk::ImageMemoryBarrier barrier {};
barrier.oldLayout = vk::ImageLayout::eUndefined;
barrier.newLayout = vk::ImageLayout::eTransferDstOptimal;
barrier.image = dest_image->getVkImage();
barrier.subresourceRange = range;
barrier.srcAccessMask = {};
barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite;
const std::vector< vk::ImageMemoryBarrier > barriers_to { barrier };
cmd_buffer.pipelineBarrier(
vk::PipelineStageFlagBits::eTopOfPipe,
vk::PipelineStageFlagBits::eTransfer,
vk::DependencyFlags(),
{},
{},
barriers_to );
vk::BufferImageCopy region {};
region.bufferOffset = source_buffer->getOffset();
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageOffset = vk::Offset3D( 0, 0, 0 );
region.imageExtent = vk::Extent3D( dest_image->extent(), 1 );
std::vector< vk::BufferImageCopy > regions { region };
cmd_buffer.copyBufferToImage(
source_buffer->getVkBuffer(), dest_image->getVkImage(), vk::ImageLayout::eTransferDstOptimal, regions );
//Transfer back to eGeneral
vk::ImageMemoryBarrier barrier_from {};
barrier_from.oldLayout = barrier.newLayout;
barrier_from.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
barrier_from.image = dest_image->getVkImage();
barrier_from.subresourceRange = range;
barrier_from.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
barrier_from.dstAccessMask = vk::AccessFlagBits::eShaderRead;
barrier_from.srcQueueFamilyIndex = transfer_idx;
barrier_from.dstQueueFamilyIndex = graphics_idx;
const std::vector< vk::ImageMemoryBarrier > barriers_from { barrier_from };
cmd_buffer.pipelineBarrier(
vk::PipelineStageFlagBits::eTransfer,
vk::PipelineStageFlagBits::eFragmentShader,
vk::DependencyFlags(),
{},
{},
barriers_from );
return true;
}
bool TransferData::performRawImageStage(
vk::raii::CommandBuffer& buffer,
Buffer& staging_buffer,
std::uint32_t transfer_idx,
std::uint32_t graphics_idx )
{
if ( !convertRawToBuffer( staging_buffer ) ) return false;
return performImageStage( buffer, transfer_idx, graphics_idx );
}
bool TransferData::performBufferStage( CopyRegionMap& copy_regions )
{
ZoneScoped;
auto& source { std::get< TransferBufferHandle >( m_source ) };
auto& target { std::get< TransferBufferHandle >( m_target ) };
const CopyRegionKey key { std::make_pair( source->getBuffer(), target->getBuffer() ) };
const auto copy_info { source->copyRegion( *target ) };
if ( auto itter = copy_regions.find( key ); itter != copy_regions.end() )
{
auto& [ key_i, regions ] = *itter;
regions.emplace_back( copy_info );
}
else
{
std::vector< vk::BufferCopy > copies { copy_info };
copy_regions.insert( std::make_pair( key, copies ) );
}
return true;
}
bool TransferData::performRawBufferStage( Buffer& staging_buffer, CopyRegionMap& copy_regions )
{
log::debug( "Raw buffer -> Buffer staging" );
if ( !convertRawToBuffer( staging_buffer ) ) return false;
return performBufferStage( copy_regions );
}
bool TransferData::convertRawToBuffer( Buffer& staging_buffer )
{
// Prepare the staging buffer first.
assert( std::holds_alternative< RawData >( m_source ) );
assert( std::get< RawData >( m_source ).size() > 0 );
try
{
HostVector< std::byte > vector { staging_buffer, std::get< RawData >( m_source ) };
m_source = vector.getHandle();
return true;
}
catch ( BufferOOM )
{
log::warn( "Staging buffer full. Aborting stage" );
return false;
}
std::unreachable();
}
bool TransferData::stage(
vk::raii::CommandBuffer& buffer,
Buffer& staging_buffer,
CopyRegionMap& copy_regions,
std::uint32_t transfer_idx,
std::uint32_t graphics_idx )
{
ZoneScoped;
switch ( m_type )
{
default:
throw std::runtime_error( "Invalid transfer type" );
case IMAGE_FROM_RAW:
return performRawImageStage( buffer, staging_buffer, transfer_idx, graphics_idx );
case IMAGE_FROM_BUFFER:
return performImageStage( buffer, transfer_idx, graphics_idx );
case BUFFER_FROM_RAW:
return performRawBufferStage( staging_buffer, copy_regions );
case BUFFER_FROM_BUFFER:
return performBufferStage( copy_regions );
}
std::unreachable();
}
void TransferData::markBad()
{
switch ( m_type )
{
case BUFFER_FROM_RAW:
[[fallthrough]];
case BUFFER_FROM_BUFFER:
std::get< TransferBufferHandle >( m_target )->setReady( false );
break;
case IMAGE_FROM_RAW:
[[fallthrough]];
case IMAGE_FROM_BUFFER:
std::get< TransferImageHandle >( m_target )->setReady( false );
}
}
void TransferData::markGood()
{
switch ( m_type )
{
case BUFFER_FROM_RAW:
[[fallthrough]];
case BUFFER_FROM_BUFFER:
std::get< TransferBufferHandle >( m_target )->setReady( true );
break;
case IMAGE_FROM_RAW:
[[fallthrough]];
case IMAGE_FROM_BUFFER:
std::get< TransferImageHandle >( m_target )->setReady( true );
}
}
} // namespace fgl::engine

View File

@@ -0,0 +1,275 @@
//
// Created by kj16609 on 6/26/24.
//
#pragma once
#include <vulkan/vulkan_raii.hpp>
#include <functional>
#include <queue>
#include <thread>
#include "engine/FGL_DEFINES.hpp"
#include "engine/buffers/Buffer.hpp"
#include "engine/buffers/BufferSuballocationHandle.hpp"
#include "engine/buffers/vector/concepts.hpp"
#include "engine/image/ImageHandle.hpp"
#include "engine/literals/size.hpp"
#include "engine/utils.hpp"
namespace fgl::engine
{
class BufferVector;
class Texture;
class ImageHandle;
struct BufferSuballocationHandle;
class Image;
class BufferSuballocation;
// <Source,Target>
using CopyRegionKey = std::pair< vk::Buffer, vk::Buffer >;
struct BufferHasher
{
std::size_t operator()( const vk::Buffer& buffer ) const
{
return reinterpret_cast< std::size_t >( static_cast< VkBuffer >( buffer ) );
}
};
struct CopyRegionKeyHasher
{
std::size_t operator()( const std::pair< vk::Buffer, vk::Buffer >& pair ) const
{
const std::size_t hash_a { BufferHasher {}( std::get< 0 >( pair ) ) };
const std::size_t hash_b { BufferHasher {}( std::get< 1 >( pair ) ) };
std::size_t seed { 0 };
fgl::engine::hashCombine( seed, hash_a, hash_b );
return seed;
}
};
using CopyRegionMap = std::unordered_map< CopyRegionKey, std::vector< vk::BufferCopy >, CopyRegionKeyHasher >;
class TransferData
{
enum TransferType
{
IMAGE_FROM_RAW,
IMAGE_FROM_BUFFER,
BUFFER_FROM_BUFFER,
BUFFER_FROM_RAW
} m_type;
using RawData = std::vector< std::byte >;
using TransferBufferHandle = std::shared_ptr< BufferSuballocationHandle >;
using TransferImageHandle = std::shared_ptr< ImageHandle >;
using SourceData = std::variant< RawData, TransferBufferHandle, TransferImageHandle >;
using TargetData = std::variant< TransferBufferHandle, TransferImageHandle >;
SourceData m_source;
TargetData m_target;
bool performImageStage(
vk::raii::CommandBuffer& cmd_buffer, std::uint32_t transfer_idx, std::uint32_t graphics_idx );
bool performRawImageStage(
vk::raii::CommandBuffer& buffer,
Buffer& staging_buffer,
std::uint32_t graphics_idx,
std::uint32_t transfer_idx );
bool performBufferStage( CopyRegionMap& copy_regions );
bool performRawBufferStage( Buffer& staging_buffer, CopyRegionMap& copy_regions );
bool convertRawToBuffer( Buffer& );
friend class TransferManager;
public:
TransferData() = delete;
TransferData( const TransferData& ) = default;
TransferData& operator=( const TransferData& ) = default;
TransferData( TransferData&& other ) = default;
TransferData& operator=( TransferData&& ) = default;
TransferData(
const std::shared_ptr< BufferSuballocationHandle >& source,
const std::shared_ptr< BufferSuballocationHandle >& target ) :
m_type( BUFFER_FROM_BUFFER ),
m_source( source ),
m_target( target )
{
log::debug(
"[TransferManager]: Queued buffer -> buffer transfer: {}",
fgl::literals::size_literals::to_string( source->m_size ) );
markBad();
}
TransferData( std::vector< std::byte >&& source, const std::shared_ptr< BufferSuballocationHandle >& target ) :
m_type( BUFFER_FROM_RAW ),
m_source( std::forward< std::vector< std::byte > >( source ) ),
m_target( target )
{
log::debug(
"[TransferManager]: Queued raw -> buffer transfer: {}",
literals::size_literals::to_string( std::get< RawData >( m_source ).size() ) );
assert( std::get< RawData >( m_source ).size() > 0 );
markBad();
}
TransferData(
const std::shared_ptr< BufferSuballocationHandle >& source, const std::shared_ptr< ImageHandle >& target ) :
m_type( IMAGE_FROM_BUFFER ),
m_source( source ),
m_target( target )
{
log::debug(
"[TransferManager]: Queued image -> image transfer: {}",
fgl::literals::size_literals::to_string( source->m_size ) );
markBad();
}
TransferData( std::vector< std::byte >&& source, const std::shared_ptr< ImageHandle >& target ) :
m_type( IMAGE_FROM_RAW ),
m_source( std::forward< std::vector< std::byte > >( source ) ),
m_target( target )
{
log::debug(
"[TransferManager]: Queued raw -> image transfer: {}",
literals::size_literals::to_string( std::get< RawData >( m_source ).size() ) );
assert( std::get< RawData >( m_source ).size() > 0 );
markBad();
}
bool stage(
vk::raii::CommandBuffer& buffer,
Buffer& staging_buffer,
CopyRegionMap& copy_regions,
std::uint32_t transfer_idx,
std::uint32_t graphics_idx );
//! Marks the target as not staged/not ready
void markBad();
//! Marks the target as staged/ready
void markGood();
};
class TransferManager
{
//TODO: Ring Buffer
std::queue< TransferData > queue {};
std::vector< TransferData > processing {};
// std::thread transfer_thread;
//! Buffer used for any raw -> buffer transfers
std::unique_ptr< Buffer > staging_buffer {};
private:
//! Map to store copy regions for processing vectors
CopyRegionMap copy_regions {};
std::uint32_t transfer_queue_index;
std::uint32_t graphics_queue_index;
vk::raii::Queue transfer_queue;
//! Signaled once a transfer completes
vk::raii::Semaphore transfer_semaphore;
vk::CommandBufferAllocateInfo cmd_buffer_allocinfo { Device::getInstance().getCommandPool(),
vk::CommandBufferLevel::ePrimary,
1 };
std::vector< vk::raii::CommandBuffer > transfer_buffers;
vk::raii::Fence completion_fence;
void recordCommands( vk::raii::CommandBuffer& command_buffer );
void submitBuffer( vk::raii::CommandBuffer& command_buffer );
//! Creates barriers that releases ownership from the graphics family to the transfer queue.
std::vector< vk::BufferMemoryBarrier > createFromGraphicsBarriers();
//! Returns barriers that acquires ownership from the graphics family to the transfer queue
std::vector< vk::BufferMemoryBarrier > createToTransferBarriers();
//! Returns barriers that releases ownership from the transfer family to the graphics family
std::vector< vk::BufferMemoryBarrier > createFromTransferBarriers();
//! Creates barriers that acquires ownership from the transfer family to the graphics family
std::vector< vk::BufferMemoryBarrier > createToGraphicsBarriers();
public:
vk::raii::Semaphore& getFinishedSem() { return transfer_semaphore; }
void takeOwnership( vk::raii::CommandBuffer& buffer );
//! Records the barriers required for transfering queue ownership
void recordOwnershipTransferDst( vk::raii::CommandBuffer& command_buffer );
//! Drops the processed items
void dump();
//! Prepares the staging buffer. Filling it as much as possible
void prepareStaging();
static void createInstance( Device& device, std::uint64_t buffer_size );
static TransferManager& getInstance();
TransferManager( Device& device, std::uint64_t buffer_size );
FGL_DELETE_ALL_Ro5( TransferManager );
void resizeBuffer( const std::uint64_t size )
{
staging_buffer = std::make_unique< Buffer >(
size,
vk::BufferUsageFlagBits::eTransferSrc,
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent );
}
//! Queues an buffer to be transfered
template < typename BufferT >
requires is_device_vector< BufferT >
void copyToBuffer( std::vector< std::byte >&& data, BufferT& buffer )
{
assert( data.size() > 0 );
TransferData transfer_data { std::forward< std::vector< std::byte > >( data ), buffer.m_handle };
queue.emplace( std::move( transfer_data ) );
}
template < typename T, typename BufferT >
requires is_device_vector< BufferT >
void copyToBuffer( const std::vector< T >& data, BufferT& buffer )
{
assert( data.size() > 0 );
std::vector< std::byte > punned_data {};
punned_data.resize( sizeof( T ) * data.size() );
std::memcpy( punned_data.data(), data.data(), sizeof( T ) * data.size() );
copyToBuffer( std::move( punned_data ), buffer );
}
void copyToBuffer( BufferVector& source, BufferVector& target );
void copyToImage( std::vector< std::byte >&& data, Image& image );
//! Forces the queue to be submitted now before the buffer is filled.
void submitNow();
};
} // namespace fgl::engine

View File

@@ -5,27 +5,13 @@
#include "Buffer.hpp"
#include "BufferSuballocationHandle.hpp"
#include "align.hpp"
#include "engine/buffers/exceptions.hpp"
#include "engine/literals/size.hpp"
#include "engine/rendering/Device.hpp"
namespace fgl::engine
{
std::unique_ptr< Buffer > global_staging_buffer { nullptr };
void initGlobalStagingBuffer( std::uint64_t size )
{
using namespace fgl::literals::size_literals;
global_staging_buffer = std::make_unique< Buffer >(
size,
vk::BufferUsageFlagBits::eTransferSrc,
vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eDeviceLocal );
}
Buffer& getGlobalStagingBuffer()
{
assert( global_staging_buffer && "Global staging buffer not initialized" );
return *global_staging_buffer.get();
}
BufferHandle::BufferHandle(
vk::DeviceSize memory_size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags memory_properties ) :
m_memory_size( memory_size ),
@@ -85,7 +71,7 @@ namespace fgl::engine
Device::getInstance().allocator(), &vk_buffer_info, &alloc_info, &buffer, &m_allocation, nullptr )
!= VK_SUCCESS )
{
throw std::runtime_error( "Failed to allocate" );
throw BufferException( "Unable to allocate memory in VMA" );
}
m_buffer = buffer;
@@ -215,7 +201,7 @@ namespace fgl::engine
<< ( allocated_memory_counter + free_memory_counter - memory_size ) << std::endl;
}
throw exceptions::AllocationException();
throw BufferOOM();
}
//Allocate

View File

@@ -5,38 +5,18 @@
#pragma once
#include <tracy/Tracy.hpp>
#include <vulkan/vulkan.h>
#include <vulkan/vulkan.hpp>
#include <cassert>
#include <cmath>
#include <concepts>
#include <cstdint>
#include <cstring>
#include <iostream>
#include <map>
#include <memory>
#include <stdexcept>
#include <unordered_map>
#include "align.hpp"
#include "engine/literals/size.hpp"
#include "vma/vma_impl.hpp"
namespace fgl::engine::exceptions
{
struct EngineError : public std::runtime_error
{
EngineError( const char* msg ) : std::runtime_error( msg ) {}
};
struct AllocationException : public EngineError
{
AllocationException() : EngineError( "Failed to allocate memory" ) {}
};
} // namespace fgl::engine::exceptions
namespace fgl::engine
{
@@ -190,6 +170,4 @@ namespace fgl::engine
void setDebugName( const std::string str );
};
void initGlobalStagingBuffer( std::uint64_t size );
Buffer& getGlobalStagingBuffer();
} // namespace fgl::engine

View File

@@ -7,6 +7,8 @@
#include "Buffer.hpp"
#include "BufferSuballocationHandle.hpp"
#include "SuballocationView.hpp"
#include "align.hpp"
#include "engine/logging/logging.hpp"
namespace fgl::engine
{

View File

@@ -4,6 +4,7 @@
#pragma once
#include "BufferSuballocationHandle.hpp"
#include "engine/concepts/is_suballocation.hpp"
#include "engine/rendering/Device.hpp"
@@ -19,6 +20,8 @@ namespace fgl::engine
{
std::shared_ptr< BufferSuballocationHandle > m_handle;
friend class TransferManager;
protected:
vk::DeviceSize m_offset;
@@ -43,6 +46,8 @@ namespace fgl::engine
SuballocationView view( const vk::DeviceSize offset, const vk::DeviceSize size ) const;
bool ready() const { return m_handle->ready(); }
void* ptr() const;
vk::DeviceSize bytesize() const noexcept { return m_byte_size; }
@@ -55,6 +60,8 @@ namespace fgl::engine
vk::DescriptorBufferInfo descriptorInfo() const;
const std::shared_ptr< BufferSuballocationHandle >& getHandle() { return m_handle; }
~BufferSuballocation() = default;
};
@@ -62,6 +69,8 @@ namespace fgl::engine
template < typename T >
struct HostSingleT final : public BufferSuballocation
{
friend class TransferData;
using value_type = T;
HostSingleT() = delete;

View File

@@ -5,9 +5,16 @@
#include "BufferSuballocationHandle.hpp"
#include "Buffer.hpp"
#include "BufferSuballocation.hpp"
#include "engine/logging/logging.hpp"
namespace fgl::engine
{
vk::Buffer BufferSuballocationHandle::getBuffer()
{
return buffer.getBuffer();
}
BufferSuballocationHandle::
BufferSuballocationHandle( Buffer& p_buffer, vk::DeviceSize offset, vk::DeviceSize memory_size ) :
buffer( p_buffer ),
@@ -28,4 +35,31 @@ namespace fgl::engine
buffer.free( *this );
}
vk::BufferCopy BufferSuballocationHandle::copyRegion( BufferSuballocationHandle& target )
{
vk::BufferCopy copy {};
copy.size = std::min( this->m_size, target.m_size );
copy.srcOffset = this->getOffset();
copy.dstOffset = target.getOffset();
log::debug(
"Created buffer copy of size {} from offset [{:X}]:{} to [{:X}]:{}",
copy.size,
reinterpret_cast< std::size_t >( static_cast< VkBuffer >( this->getVkBuffer() ) ),
copy.srcOffset,
reinterpret_cast< std::size_t >( static_cast< VkBuffer >( target.getVkBuffer() ) ),
copy.dstOffset );
return copy;
}
void BufferSuballocationHandle::copyTo( vk::raii::CommandBuffer& cmd_buffer, BufferSuballocationHandle& other )
{
vk::BufferCopy copy_region { copyRegion( other ) };
std::vector< vk::BufferCopy > copy_regions { copy_region };
cmd_buffer.copyBuffer( this->getVkBuffer(), other.getVkBuffer(), copy_regions );
}
} // namespace fgl::engine

View File

@@ -6,6 +6,11 @@
#include <vulkan/vulkan.hpp>
namespace vk::raii
{
class CommandBuffer;
}
namespace fgl::engine
{
class Buffer;
@@ -22,19 +27,30 @@ namespace fgl::engine
void* mapped { nullptr };
public:
bool m_staged { false };
BufferSuballocationHandle() = delete;
BufferSuballocationHandle( const BufferSuballocationHandle& ) = delete;
BufferSuballocationHandle& operator=( const BufferSuballocationHandle& ) = delete;
vk::Buffer getBuffer();
BufferSuballocationHandle( BufferSuballocationHandle&& ) = delete;
BufferSuballocationHandle& operator=( BufferSuballocationHandle&& ) = delete;
BufferSuballocationHandle( Buffer& buffer, vk::DeviceSize memory_size, vk::DeviceSize offset );
~BufferSuballocationHandle();
vk::BufferCopy copyRegion( BufferSuballocationHandle& target );
void copyTo( vk::raii::CommandBuffer& cmd_buffer, BufferSuballocationHandle& other );
vk::Buffer getVkBuffer() const;
bool ready() const { return m_staged; }
void setReady( const bool value ) { m_staged = value; }
vk::DeviceSize getOffset() const { return m_offset; }
};

View File

@@ -0,0 +1,29 @@
//
// Created by kj16609 on 6/26/24.
//
#pragma once
#include <exception>
namespace fgl::engine
{
struct EngineException : public std::runtime_error
{
explicit EngineException( const char* str ) : std::runtime_error( str ) {}
};
struct BufferException : public EngineException
{
BufferException() = delete;
explicit BufferException( const char* str ) : EngineException( str ) {}
};
struct BufferOOM : public BufferException
{
explicit BufferOOM() : BufferException( "Buffer OOM" ) {}
};
} // namespace fgl::engine

View File

@@ -0,0 +1,54 @@
//
// Created by kj16609 on 6/26/24.
//
#include "BufferVector.hpp"
#include "engine/assets/TransferManager.hpp"
namespace fgl::engine
{
//! Returns the offset count from the start of the buffer to the first element
[[nodiscard]] std::uint32_t BufferVector::getOffsetCount() const
{
assert( !std::isnan( m_count ) );
assert( !std::isnan( m_stride ) );
assert( m_count * m_stride == this->bytesize() );
assert( m_offset % m_stride == 0 && "Offset must be aligned from the stride" );
return static_cast< std::uint32_t >( this->m_offset / m_stride );
}
[[nodiscard]] std::uint32_t BufferVector::stride() const noexcept
{
assert( !std::isnan( m_stride ) );
assert( m_count * m_stride <= this->bytesize() );
return m_stride;
}
[[nodiscard]] std::uint32_t BufferVector::size() const noexcept
{
assert( !std::isnan( m_count ) );
assert( m_count * m_stride <= this->bytesize() );
return m_count;
}
void BufferVector::resize( const std::uint32_t count )
{
assert( count > 0 );
assert( !std::isnan( m_stride ) );
assert( !std::isnan( m_count ) );
//If the capacity is higher then what we are requesting then we simply just ignore the request.
// TODO: Maybe this is bad? I'm unsure. But reducing the number of allocations is always good
if ( count < m_count ) return;
BufferVector other { this->getBuffer(), count, m_stride };
TransferManager::getInstance().copyToBuffer( *this, other );
*this = std::move( other );
}
} // namespace fgl::engine

View File

@@ -30,7 +30,8 @@ namespace fgl::engine
BufferSuballocation( buffer.suballocate( count * stride ) ),
m_count( count ),
m_stride( stride )
{}
{
}
BufferVector( const BufferVector& ) = delete;
@@ -42,47 +43,10 @@ namespace fgl::engine
public:
//! Returns the offset count from the start of the buffer to the first element
[[nodiscard]] std::uint32_t getOffsetCount() const
{
assert( !std::isnan( m_count ) );
assert( !std::isnan( m_stride ) );
assert( m_count * m_stride == this->bytesize() );
assert( m_offset % m_stride == 0 && "Offset must be aligned from the stride" );
return static_cast< std::uint32_t >( this->m_offset / m_stride );
}
[[nodiscard]] std::uint32_t stride() const noexcept
{
assert( !std::isnan( m_stride ) );
assert( m_count * m_stride <= this->bytesize() );
return m_stride;
}
[[nodiscard]] std::uint32_t size() const noexcept
{
assert( !std::isnan( m_count ) );
assert( m_count * m_stride <= this->bytesize() );
return m_count;
}
void resize( const std::uint32_t count )
{
assert( count > 0 );
assert( !std::isnan( m_stride ) );
assert( !std::isnan( m_count ) );
//If the capacity is higher then what we are requesting then we simply just ignore the request.
// TODO: Maybe this is bad? I'm unsure. But reducing the number of allocations is always good
if ( count < m_count ) return;
BufferVector other { this->getBuffer(), count, m_stride };
Device::getInstance().copyBuffer( this->getBuffer(), other.getBuffer(), 0, 0, this->size() );
*this = std::move( other );
}
std::uint32_t getOffsetCount() const;
std::uint32_t stride() const noexcept;
std::uint32_t size() const noexcept;
void resize( std::uint32_t count );
};
} // namespace fgl::engine

View File

@@ -5,73 +5,34 @@
#pragma once
#include "BufferVector.hpp"
#include "HostVector.hpp"
#include "concepts.hpp"
#include "engine/assets/TransferManager.hpp"
#include "engine/literals/size.hpp"
#include "engine/logging/logging.hpp"
namespace fgl::engine
{
template < typename T >
class DeviceVector final : public BufferVector
class DeviceVector final : public BufferVector, public DeviceVectorBase
{
std::unique_ptr< HostVector< T > > m_staging_buffer {};
bool staged { false };
public:
DeviceVector( Buffer& buffer, const std::uint32_t count = 1 ) : BufferVector( buffer, count, sizeof( T ) )
{
log::debug( "Creating DeviceVector of size {}", count );
log::debug(
"Creating DeviceVector of size {}", fgl::literals::size_literals::to_string( count * sizeof( T ) ) );
assert( count != 0 && "BufferSuballocationVector::BufferSuballocationVector() called with count == 0" );
}
bool hasStaging() const { return m_staging_buffer != nullptr; }
void createStaging( const std::vector< T >& data )
{
m_staging_buffer = std::make_unique< HostVector< T > >( getGlobalStagingBuffer(), data );
}
HostVector< T >& getStaging() { return *m_staging_buffer; }
void stage()
{
auto buffer { Device::getInstance().beginSingleTimeCommands() };
stage( buffer );
Device::getInstance().endSingleTimeCommands( buffer );
dropStaging();
}
void stage( vk::raii::CommandBuffer& command_buffer )
{
assert( m_staging_buffer && "DeviceVector::stage() called without staging buffer" );
//Copy
vk::BufferCopy copy_region { m_staging_buffer->getOffset(), this->m_offset, this->m_byte_size };
command_buffer.copyBuffer( m_staging_buffer->getVkBuffer(), this->getVkBuffer(), copy_region );
staged = true;
}
void dropStaging()
{
assert( staged && "Staging buffer has not been commanded to write yet!" );
m_staging_buffer.reset();
}
/**
* @brief Constructs a new DeviceVector from a vector, Requires a command buffer to copy the data to the device
* @param buffer
* @brief Constructs a new DeviceVector from a vector using an allocation of the supplied buffer
* @param buffer buffer to suballocate from
* @param data
* @param command_buffer
*/
DeviceVector( Buffer& buffer, const std::vector< T >& data ) :
DeviceVector( buffer, static_cast< std::uint32_t >( data.size() ) )
{
createStaging( data );
TransferManager::getInstance().copyToBuffer( data, *this );
}
};

View File

@@ -0,0 +1,18 @@
//
// Created by kj16609 on 6/26/24.
//
#pragma once
namespace fgl::engine
{
struct DeviceVectorBase
{};
template < typename T >
concept is_device_vector = requires( T t ) {
requires std::is_base_of_v< DeviceVectorBase, T >;
requires std::is_base_of_v< BufferSuballocation, T >;
};
} // namespace fgl::engine

View File

@@ -0,0 +1,21 @@
//
// Created by kj16609 on 6/25/24.
//
#pragma once
#define FULL_DEBUG_BARRIER( buffer ) \
{ \
vk::MemoryBarrier memory_barrier {}; \
memory_barrier.srcAccessMask = vk::AccessFlagBits::eMemoryWrite | vk::AccessFlagBits::eMemoryRead; \
memory_barrier.dstAccessMask = vk::AccessFlagBits::eMemoryWrite | vk::AccessFlagBits::eMemoryRead; \
std::vector< vk::MemoryBarrier > barriers { memory_barrier }; \
\
buffer.pipelineBarrier( \
vk::PipelineStageFlagBits::eAllCommands, \
vk::PipelineStageFlagBits::eAllCommands, \
vk::DependencyFlagBits::eByRegion, \
barriers, \
{}, \
{} ); \
}

View File

@@ -98,6 +98,8 @@ namespace fgl::engine
m_infos[ binding_idx ] = tex.getImageView().descriptorInfo(
tex.getImageView().getSampler().getVkSampler(), vk::ImageLayout::eShaderReadOnlyOptimal );
log::info( "Bound texture {} to global texture array", tex.getID() );
vk::WriteDescriptorSet write {};
write.dstSet = m_set;
write.dstBinding = binding_idx;

View File

@@ -17,9 +17,11 @@ namespace fgl::engine
class Image
{
std::shared_ptr< ImageHandle > m_handle {};
std::shared_ptr< ImageHandle > m_handle;
std::weak_ptr< ImageView > view {};
friend class TransferManager;
public:
Image() = delete;
@@ -29,8 +31,6 @@ namespace fgl::engine
m_handle( std::make_shared< ImageHandle >( extent, format, image, usage ) )
{}
[[nodiscard]] vk::Image& getVkImage();
Image& setName( const std::string str );
Image(
@@ -42,9 +42,7 @@ namespace fgl::engine
m_handle( std::make_shared< ImageHandle >( extent, format, usage, inital_layout, final_layout ) )
{}
Image( Image&& other ) = default;
Image( const Image& other ) : m_handle( other.m_handle ), view() {}
Image( const Image& other ) : m_handle( other.m_handle ) {}
Image& operator=( const Image& other )
{
@@ -53,6 +51,8 @@ namespace fgl::engine
return *this;
}
Image( Image&& other ) = default;
Image& operator=( Image&& other ) noexcept
{
m_handle = std::move( other.m_handle );

View File

@@ -12,7 +12,8 @@ namespace fgl::engine
m_extent( extent ),
m_format( format ),
m_usage( usage ),
m_image( image )
m_image( image ),
m_staged( true ) // Set staged to be true since we don't need to stage this image.
{
assert( std::get< vk::Image >( m_image ) != VK_NULL_HANDLE );
}
@@ -53,8 +54,8 @@ namespace fgl::engine
m_usage( usage ),
m_initial_layout( inital_layout ),
m_final_layout( final_layout ),
m_image( createImage( extent, format, inital_layout, usage ) )
m_image( createImage( extent, format, inital_layout, usage ) ),
m_staged( true )
{
assert( std::holds_alternative< vk::raii::Image >( m_image ) );
assert( *std::get< vk::raii::Image >( m_image ) != VK_NULL_HANDLE );

View File

@@ -30,6 +30,8 @@ namespace fgl::engine
// Because of the way the swapchain works we need to be able to storage a `VkImage` handle.
std::variant< vk::raii::Image, vk::Image > m_image;
bool m_staged { false };
friend class ImageView;
friend class Image;
@@ -66,6 +68,10 @@ namespace fgl::engine
vk::Extent2D extent() const { return m_extent; }
bool ready() const { return m_staged; }
void setReady( const bool value ) { m_staged = value; }
vk::ImageAspectFlags aspectMask() const
{
vk::ImageAspectFlags flags {};

View File

@@ -67,4 +67,9 @@ namespace fgl::engine
m_resource->setName( str );
}
bool ImageView::ready()
{
return m_resource->ready();
}
} // namespace fgl::engine

View File

@@ -26,6 +26,9 @@ namespace fgl::engine
void setName( const std::string str );
//! Returns true if the resource has been staged
bool ready();
ImageView() = delete;
ImageView( const ImageView& ) = delete;

View File

@@ -15,7 +15,9 @@ namespace fgl::engine
vk::Filter min_filter,
vk::Filter mag_filter,
vk::SamplerMipmapMode mipmode,
vk::SamplerAddressMode address_mode )
vk::SamplerAddressMode sampler_wrap_u,
vk::SamplerAddressMode sampler_wrap_v,
vk::SamplerAddressMode sampler_wrap_w )
{
vk::SamplerCreateInfo info;
@@ -24,9 +26,9 @@ namespace fgl::engine
info.mipmapMode = mipmode;
info.addressModeU = address_mode;
info.addressModeV = address_mode;
info.addressModeW = address_mode;
info.addressModeU = sampler_wrap_u;
info.addressModeV = sampler_wrap_v;
info.addressModeW = sampler_wrap_w;
info.minLod = -1000;
info.maxLod = 1000;
@@ -37,12 +39,70 @@ namespace fgl::engine
}
Sampler::Sampler(
vk::Filter min_filter,
vk::Filter mag_filter,
vk::SamplerMipmapMode mipmap_mode,
vk::SamplerAddressMode sampler_mode ) :
const vk::Filter min_filter,
const vk::Filter mag_filter,
const vk::SamplerMipmapMode mipmap_mode,
const vk::SamplerAddressMode sampler_wrap_u,
const vk::SamplerAddressMode sampler_wrap_v,
const vk::SamplerAddressMode sampler_wrap_w ) :
valid( true ),
m_sampler( createSampler( mag_filter, min_filter, mipmap_mode, sampler_mode ) )
m_sampler( createSampler( mag_filter, min_filter, mipmap_mode, sampler_wrap_u, sampler_wrap_v, sampler_wrap_w ) )
{}
namespace gl
{
vk::Filter filterToVk( int value )
{
switch ( value )
{
default:
throw std::runtime_error( "Failed to translate fitler value from opengl to vulkan!" );
case GL_NEAREST:
return vk::Filter::eNearest;
case GL_LINEAR:
return vk::Filter::eLinear;
case GL_LINEAR_MIPMAP_LINEAR:
return vk::Filter::eLinear;
}
std::unreachable();
}
vk::SamplerAddressMode wrappingToVk( const int val )
{
switch ( val )
{
default:
throw std::runtime_error( "Failed to translate wrapping filter to vk address mode" );
case GL_REPEAT:
return vk::SamplerAddressMode::eRepeat;
#ifdef GL_CLAMP_TO_BORDER
case GL_CLAMP_TO_BORDER:
return vk::SamplerAddressMode::eClampToBorder;
#endif
#ifdef GL_CLAMP_TO_EDGE
case GL_CLAMP_TO_EDGE:
return vk::SamplerAddressMode::eClampToEdge;
#endif
}
};
} // namespace gl
/**
*
* @param mag_filter
* @param min_filter
* @param wraps x wrap
* @param wrapt y wrap
*/
Sampler::Sampler( int min_filter, int mag_filter, int wraps, int wrapt ) :
Sampler(
gl::filterToVk( min_filter ),
gl::filterToVk( mag_filter ),
vk::SamplerMipmapMode::eLinear,
gl::wrappingToVk( wraps ),
gl::wrappingToVk( wrapt ) )
{}
Sampler::Sampler( Sampler&& other ) : valid( other.valid ), m_sampler( std::move( other.m_sampler ) )

View File

@@ -7,6 +7,8 @@
#include <vulkan/vulkan.hpp>
#include <vulkan/vulkan_raii.hpp>
#include "engine/FGL_DEFINES.hpp"
namespace fgl::engine
{
@@ -29,7 +31,28 @@ namespace fgl::engine
vk::Filter min_filter,
vk::Filter mag_filter,
vk::SamplerMipmapMode mipmap_mode,
vk::SamplerAddressMode sampler_mode );
vk::SamplerAddressMode sampler_wrap_u,
vk::SamplerAddressMode sampler_wrap_v,
vk::SamplerAddressMode sampler_wrap_w );
FGL_FORCE_INLINE_FLATTEN Sampler(
vk::Filter min_filter,
vk::Filter mag_filter,
vk::SamplerMipmapMode mipmap_mode,
vk::SamplerAddressMode sampler_wrap_u,
vk::SamplerAddressMode sampler_wrap_v ) :
Sampler( min_filter, mag_filter, mipmap_mode, sampler_wrap_u, sampler_wrap_v, sampler_wrap_v )
{}
FGL_FORCE_INLINE_FLATTEN Sampler(
vk::Filter min_filter,
vk::Filter mag_filter,
vk::SamplerMipmapMode mipmap_mode,
vk::SamplerAddressMode sampler_wrap_u ) :
Sampler( min_filter, mag_filter, mipmap_mode, sampler_wrap_u, sampler_wrap_u, sampler_wrap_u )
{}
Sampler( int min_filter, int mag_filter, int wraps, int wrapt );
VkSampler operator*() { return *m_sampler; }

View File

@@ -52,6 +52,16 @@ namespace fgl::engine
return box;
}
bool Model::ready()
{
//Return true if even a single primitive is ready
for ( auto& primitive : this->m_primitives )
{
if ( primitive.ready() ) return true;
}
return false;
}
std::vector< vk::DrawIndexedIndirectCommand > Model::getDrawCommand( const std::uint32_t index ) const
{
ZoneScoped;
@@ -132,14 +142,4 @@ namespace fgl::engine
return model_ptr;
}
void Model::stage( vk::raii::CommandBuffer& cmd_buffer )
{
assert( !m_primitives.empty() );
for ( auto& primitive : m_primitives )
{
primitive.m_vertex_buffer.stage( cmd_buffer );
primitive.m_index_buffer.stage( cmd_buffer );
}
}
} // namespace fgl::engine

View File

@@ -14,7 +14,6 @@
#include "engine/buffers/Buffer.hpp"
#include "engine/primitives/TransformComponent.hpp"
#include "engine/primitives/boxes/OrientedBoundingBox.hpp"
#include "engine/rendering/Device.hpp"
namespace fgl::engine
{
@@ -44,6 +43,8 @@ namespace fgl::engine
public:
bool ready();
//! Returns the bounding box in model space
const OrientedBoundingBox< CoordinateSpace::Model >& getBoundingBox() const { return m_bounding_box; }
@@ -64,8 +65,6 @@ namespace fgl::engine
static std::vector< std::shared_ptr< Model > >
createModelsFromScene( const std::filesystem::path& path, Buffer& vertex_buffer, Buffer& index_buffer );
void stage( vk::raii::CommandBuffer& cmd_buffer );
const std::string& getName() const { return m_name; }
Model(

View File

@@ -21,10 +21,10 @@ namespace fgl::engine
return { std::move( vertex_buffer_suballoc ), std::move( index_buffer_suballoc ), bounds, mode };
}
TextureID Primitive::getTextureID() const
TextureID Primitive::getAlbedoTextureID() const
{
if ( m_texture )
return m_texture->getID();
if ( m_textures.albedo )
return m_textures.albedo->getID();
else
return std::numeric_limits< TextureID >::max();
}

View File

@@ -35,6 +35,29 @@ namespace fgl::engine
TRI_FAN = TINYGLTF_MODE_TRIANGLE_FAN
};
struct PrimitiveTextures
{
std::shared_ptr< Texture > albedo { nullptr };
std::shared_ptr< Texture > normal { nullptr };
inline bool hasTextures() const { return albedo || normal; }
inline bool ready() const
{
if ( albedo )
{
if ( !albedo->ready() ) return false;
}
if ( normal )
{
if ( !normal->ready() ) return false;
}
return true;
}
};
struct Primitive
{
VertexBufferSuballocation m_vertex_buffer;
@@ -42,7 +65,13 @@ namespace fgl::engine
OrientedBoundingBox< CoordinateSpace::Model > m_bounding_box;
PrimitiveMode m_mode;
std::shared_ptr< Texture > m_texture;
PrimitiveTextures m_textures {};
//! Returns true if the primitive is ready to be rendered (must have all textures, vertex buffer, and index buffer ready)
bool ready() const
{
return m_textures.ready() && m_vertex_buffer.ready() && m_index_buffer.ready();
}
Primitive(
VertexBufferSuballocation&& vertex_buffer,
@@ -52,21 +81,20 @@ namespace fgl::engine
m_vertex_buffer( std::move( vertex_buffer ) ),
m_index_buffer( std::move( index_buffer ) ),
m_bounding_box( bounding_box ),
m_mode( mode ),
m_texture( nullptr )
m_mode( mode )
{}
Primitive(
VertexBufferSuballocation&& vertex_buffer,
IndexBufferSuballocation&& index_buffer,
const OrientedBoundingBox< CoordinateSpace::Model >& bounding_box,
std::shared_ptr< Texture >&& texture,
PrimitiveTextures&& textures,
const PrimitiveMode mode ) :
m_vertex_buffer( std::move( vertex_buffer ) ),
m_index_buffer( std::move( index_buffer ) ),
m_bounding_box( bounding_box ),
m_mode( mode ),
m_texture( std::forward< decltype( m_texture ) >( texture ) )
m_textures( std::forward< decltype( m_textures ) >( textures ) )
{}
Primitive() = delete;
@@ -80,7 +108,7 @@ namespace fgl::engine
Buffer& vertex_buffer,
Buffer& index_buffer );
TextureID getTextureID() const;
TextureID getAlbedoTextureID() const;
};
} // namespace fgl::engine

View File

@@ -22,13 +22,6 @@ namespace fgl::engine
}
else
throw std::runtime_error( "Unknown model file extension" );
//Stage
for ( auto& prim : m_primitives )
{
prim.m_index_buffer.stage();
prim.m_vertex_buffer.stage();
}
}
void ModelBuilder::loadVerts( std::vector< Vertex > verts, std::vector< std::uint32_t > indicies )

View File

@@ -14,6 +14,10 @@
#include <engine/logging/logging.hpp>
#include "engine/assets/stores.hpp"
#include "engine/descriptors/DescriptorSet.hpp"
#include "engine/image/ImageView.hpp"
namespace fgl::engine
{
@@ -131,7 +135,38 @@ namespace fgl::engine
return root.accessors.at( prim.attributes.at( attrib ) );
}
std::shared_ptr< Texture > SceneBuilder::loadTexture( const tinygltf::Primitive& prim, const tinygltf::Model& root )
std::shared_ptr< Texture > SceneBuilder::
getTextureForParameter( const tinygltf::Parameter& parameter, const tinygltf::Model& root )
{
const auto texture_idx { parameter.TextureIndex() };
const tinygltf::Texture& tex_info { root.textures[ texture_idx ] };
const auto source_idx { tex_info.source };
const tinygltf::Image& source { root.images[ source_idx ] };
if ( source.uri.empty() ) throw std::runtime_error( "Unsupported loading method for image (Must be a file)" );
const std::filesystem::path filepath { source.uri };
const auto full_path { m_root / filepath };
const auto sampler_idx { tex_info.sampler };
const tinygltf::Sampler& sampler_info { root.samplers[ sampler_idx ] };
Sampler sampler { sampler_info.minFilter, sampler_info.magFilter, sampler_info.wrapS, sampler_info.wrapT };
std::shared_ptr< Texture > texture { getTextureStore().load( full_path ) };
texture->getImageView().getSampler() = std::move( sampler );
//Prepare the texture into the global system
Texture::getTextureDescriptorSet().bindTexture( 0, texture );
Texture::getTextureDescriptorSet().update();
return texture;
}
PrimitiveTextures SceneBuilder::loadTextures( const tinygltf::Primitive& prim, const tinygltf::Model& root )
{
ZoneScoped;
const auto mat_idx { prim.material };
@@ -141,15 +176,40 @@ namespace fgl::engine
throw std::runtime_error( "No material for primitive. One was expected" );
}
const auto& material { root.materials[ mat_idx ] };
const tinygltf::Material& materials { root.materials[ mat_idx ] };
for ( const auto& [ key, value ] : material.values )
for ( const auto& [ key, value ] : materials.values )
{
log::debug( "Parsing texture for key {}", key );
log::debug( "Found key: {}", key );
}
//TODO:
throw std::runtime_error( "No material loader implemented" );
auto findParameter = [ &materials ]( const std::string name ) -> std::optional< tinygltf::Parameter >
{
const auto& itter { materials.values.find( name ) };
if ( itter == materials.values.end() )
return std::nullopt;
else
return { itter->second };
};
const auto albedo { findParameter( "baseColorTexture" ) };
const auto normal { findParameter( "normalTexture" ) };
const auto occlusion_texture { findParameter( "occlusionTexture" ) };
PrimitiveTextures textures {};
if ( albedo.has_value() )
{
textures.albedo = getTextureForParameter( *albedo, root );
}
if ( normal.has_value() )
{
textures.normal = getTextureForParameter( *normal, root );
}
return textures;
}
std::vector< std::shared_ptr< Model > > SceneBuilder::getModels()
@@ -281,9 +341,10 @@ namespace fgl::engine
m_vertex_buffer,
m_index_buffer ) };
// If we have a texcoord then we have a UV map. Meaning we likely have textures to use
if ( !has_texcoord ) return primitive_mesh;
primitive_mesh.m_texture = loadTexture( prim, root );
primitive_mesh.m_textures = loadTextures( prim, root );
return primitive_mesh;
}
@@ -368,6 +429,7 @@ namespace fgl::engine
{
ZoneScoped;
if ( !std::filesystem::exists( path ) ) throw std::runtime_error( "Failed to find scene at filepath" );
m_root = path.parent_path();
tinygltf::TinyGLTF loader {};
tinygltf::Model gltf_model {};

View File

@@ -12,6 +12,7 @@
namespace fgl::engine
{
struct PrimitiveTextures;
struct Vertex;
class Model;
struct Primitive;
@@ -25,12 +26,15 @@ namespace tinygltf
class Model;
struct Primitive;
struct Accessor;
struct Parameter;
} // namespace tinygltf
namespace fgl::engine
{
class SceneBuilder
{
//! Root path. Set by 'load' functions
std::filesystem::path m_root {};
Buffer& m_vertex_buffer;
Buffer& m_index_buffer;
@@ -58,8 +62,10 @@ namespace fgl::engine
const tinygltf::Accessor& getAccessorForAttribute(
const tinygltf::Primitive& prim, const tinygltf::Model& root, const std::string attrib ) const;
std::shared_ptr< Texture >
getTextureForParameter( const tinygltf::Parameter& parameter, const tinygltf::Model& root );
std::shared_ptr< Texture > loadTexture( const tinygltf::Primitive& prim, const tinygltf::Model& root );
PrimitiveTextures loadTextures( const tinygltf::Primitive& prim, const tinygltf::Model& root );
public:

View File

@@ -9,6 +9,7 @@
#pragma GCC diagnostic pop
#include "ModelBuilder.hpp"
#include "engine/assets/TransferManager.hpp"
#include "engine/assets/stores.hpp"
#include "engine/descriptors/DescriptorSet.hpp"
#include "engine/image/ImageView.hpp"
@@ -259,6 +260,7 @@ namespace fgl::engine
std::shared_ptr< Texture > tex {
getTextureStore().load( filepath.parent_path() / source.uri, vk::Format::eR8G8B8A8Unorm )
};
Sampler smp { translateFilterToVK( sampler.minFilter ),
translateFilterToVK( sampler.magFilter ),
vk::SamplerMipmapMode::eLinear,
@@ -267,18 +269,19 @@ namespace fgl::engine
tex->getImageView().getSampler() = std::move( smp );
tex->createImGuiSet();
tex->stage();
Texture::getTextureDescriptorSet().bindTexture( 0, tex );
Texture::getTextureDescriptorSet().update();
//Stage texture
auto cmd { Device::getInstance().beginSingleTimeCommands() };
PrimitiveTextures primitive_textures {};
primitive_textures.albedo = tex;
Primitive prim { std::move( vertex_buffer ),
std::move( index_buffer ),
bounding_box,
std::move( tex ),
std::move( primitive_textures ),
PrimitiveMode::TRIS };
m_primitives.emplace_back( std::move( prim ) );

View File

@@ -357,23 +357,6 @@ namespace fgl::engine
return allocator;
}
void Device::copyBuffer(
vk::Buffer dst, vk::Buffer src, vk::DeviceSize dst_offset, vk::DeviceSize src_offset, vk::DeviceSize size )
{
vk::raii::CommandBuffer commandBuffer { beginSingleTimeCommands() };
vk::BufferCopy copyRegion {};
copyRegion.size = size;
copyRegion.srcOffset = src_offset;
copyRegion.dstOffset = dst_offset;
std::vector< vk::BufferCopy > copy_regions { copyRegion };
commandBuffer.copyBuffer( src, dst, copy_regions );
endSingleTimeCommands( commandBuffer );
}
vk::Result Device::setDebugUtilsObjectName( const vk::DebugUtilsObjectNameInfoEXT& nameInfo )
{
#ifndef NDEBUG

View File

@@ -69,13 +69,6 @@ namespace fgl::engine
VmaAllocator m_allocator;
void copyBuffer(
vk::Buffer dst,
vk::Buffer src,
vk::DeviceSize dst_offset,
vk::DeviceSize src_offset,
vk::DeviceSize size = VK_WHOLE_SIZE );
public:
vk::PhysicalDeviceProperties m_properties;
@@ -87,25 +80,6 @@ namespace fgl::engine
static Device& getInstance();
template < typename Dst, typename Src >
requires( is_buffer< Dst > || is_suballocation< Dst > ) && (is_buffer< Src > || is_suballocation< Src >)
void copyBuffer(
Dst& dst,
Src& src,
vk::DeviceSize dst_offset,
vk::DeviceSize src_offset,
vk::DeviceSize size = VK_WHOLE_SIZE )
{
copyBuffer( dst.getBuffer(), src.getBuffer(), dst_offset, src_offset, size );
}
template < typename Dst, typename Src >
requires is_suballocation< Dst > && is_suballocation< Src >
void copyBuffer( Dst& dst, Src& src, vk::DeviceSize size )
{
copyBuffer( dst, src, dst.offset(), src.offset(), size );
}
private:
VmaAllocator createVMAAllocator();

View File

@@ -5,7 +5,6 @@
#include "QueuePool.hpp"
#include "Attachment.hpp"
#include "PhysicalDevice.hpp"
namespace fgl::engine
{
@@ -26,13 +25,16 @@ namespace fgl::engine
}
}
QueuePool::QueueIndex QueuePool::getIndex( const vk::QueueFlags flags )
QueuePool::QueueIndex QueuePool::getIndex( const vk::QueueFlags flags, const vk::QueueFlags anti_flags )
{
for ( std::uint32_t i = 0; i < queue_info.size(); ++i )
{
const auto& [ props, can_present, num_allocated ] = queue_info[ i ];
if ( props.queueFlags & flags && props.queueCount > 0 ) return i;
if ( ( props.queueFlags & flags ) && !( anti_flags & flags ) && props.queueCount > 0 )
{
return i;
}
}
throw std::runtime_error( "Failed to get index of queue family with given flags" );

View File

@@ -40,7 +40,7 @@ namespace fgl::engine
using QueueIndex = std::uint32_t;
//! Returns a unique list of indexes with the matching flags
QueueIndex getIndex( const vk::QueueFlags flags );
QueueIndex getIndex( const vk::QueueFlags flags, const vk::QueueFlags anti_flags = vk::QueueFlags( 0 ) );
std::uint32_t getPresentIndex();
};

View File

@@ -135,6 +135,20 @@ namespace fgl::engine
dependencies.push_back( subpass_dependency );
}
template < is_subpass SrcT >
void registerFullDependency( SrcT& parent )
{
log::critical( "!!!!!!!!!!!! Performance risk !!!!!!!!!!!!!" );
log::critical(
"Rendering pass using a full dependency. THIS IS MOST LIKELY NOT WHAT YOU WANT UNLESS DEBUGGING" );
constexpr vk::AccessFlags all_access { vk::AccessFlagBits::eMemoryWrite | vk::AccessFlagBits::eMemoryRead };
constexpr vk::PipelineStageFlags all_stages { vk::PipelineStageFlagBits::eAllCommands
| vk::PipelineStageFlagBits::eAllGraphics };
registerDependencyFrom(
parent, all_access, all_stages, all_access, all_stages, vk::DependencyFlagBits::eByRegion );
}
template < is_subpass SrcT >
void registerDependencyFrom(
SrcT& parent,
@@ -154,18 +168,31 @@ namespace fgl::engine
dependency_flags );
}
void registerSelfDependency(
const vk::AccessFlags src_access_flags,
const vk::PipelineStageFlags src_stage_flags,
const vk::AccessFlags dst_access_flags,
const vk::PipelineStageFlags dst_stage_flags,
const vk::DependencyFlags dependency_flags )
{
registerDependencyFrom(
*this, src_access_flags, src_stage_flags, dst_access_flags, dst_stage_flags, dependency_flags );
}
void registerDependencyFromExternal(
const vk::AccessFlags access_flags,
const vk::PipelineStageFlags stage_flags,
const vk::AccessFlags src_access_flags,
const vk::PipelineStageFlags src_stage_flags,
const vk::AccessFlags dst_access_flags = vk::AccessFlagBits::eNone,
const vk::PipelineStageFlags dst_stage_flags = vk::PipelineStageFlagBits::eNone,
const vk::DependencyFlags dependency_flags = {} )
{
registerDependency(
VK_SUBPASS_EXTERNAL,
this->index,
access_flags,
stage_flags,
access_flags,
stage_flags,
src_access_flags,
src_stage_flags,
dst_access_flags == vk::AccessFlagBits::eNone ? src_access_flags : dst_access_flags,
dst_stage_flags == vk::PipelineStageFlagBits::eNone ? src_stage_flags : dst_stage_flags,
dependency_flags );
}

View File

@@ -1,10 +1,5 @@
#include "SwapChain.hpp"
#include "Attachment.hpp"
#include "RenderPass.hpp"
#include "Subpass.hpp"
// std
#include <array>
#include <cstdlib>
#include <cstring>
@@ -12,6 +7,10 @@
#include <limits>
#include <stdexcept>
#include "Attachment.hpp"
#include "RenderPass.hpp"
#include "Subpass.hpp"
namespace fgl::engine
{
@@ -73,18 +72,20 @@ namespace fgl::engine
vk::SubmitInfo submitInfo {};
vk::Semaphore waitSemaphores[] = { imageAvailableSemaphores[ currentFrame ] };
vk::PipelineStageFlags waitStages[] = { vk::PipelineStageFlagBits::eColorAttachmentOutput };
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
std::vector< vk::Semaphore > wait_sems { imageAvailableSemaphores[ currentFrame ],
TransferManager::getInstance().getFinishedSem() };
std::vector< vk::PipelineStageFlags > wait_stages { vk::PipelineStageFlagBits::eColorAttachmentOutput,
vk::PipelineStageFlagBits::eTopOfPipe };
submitInfo.setWaitSemaphores( wait_sems );
submitInfo.setWaitDstStageMask( wait_stages );
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &( *buffers );
vk::Semaphore signalSemaphores[] = { renderFinishedSemaphores[ currentFrame ] };
submitInfo.signalSemaphoreCount = 1;
submitInfo.pSignalSemaphores = signalSemaphores;
std::vector< vk::Semaphore > signaled_semaphores { renderFinishedSemaphores[ currentFrame ] };
submitInfo.setSignalSemaphores( signaled_semaphores );
Device::getInstance().device().resetFences( fences );
@@ -94,12 +95,10 @@ namespace fgl::engine
vk::PresentInfoKHR presentInfo = {};
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = signalSemaphores;
presentInfo.setWaitSemaphores( signaled_semaphores );
vk::SwapchainKHR swapChains[] = { swapChain };
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = swapChains;
std::vector< vk::SwapchainKHR > swapchains { swapChain };
presentInfo.setSwapchains( swapchains );
std::array< std::uint32_t, 1 > indicies { { imageIndex } };
presentInfo.setImageIndices( indicies );
@@ -311,21 +310,10 @@ namespace fgl::engine
1, depthAttachment, g_buffer_composite, g_buffer_position, g_buffer_normal, g_buffer_albedo
};
// To prevent the composite buffer from getting obliterated by the gui pass and so we can use it to render to the GUI in certian areas, We need to keep them seperate and the composite image to be unmodified.
Subpass<
vk::PipelineBindPoint::eGraphics,
UsedAttachment< DepthAttachment, vk::ImageLayout::eDepthStencilAttachmentOptimal >,
UsedAttachment< ColoredPresentAttachment, vk::ImageLayout::eColorAttachmentOptimal >,
InputAttachment< ColorAttachment, vk::ImageLayout::eShaderReadOnlyOptimal > >
gui_subpass { 2, depthAttachment, colorAttachment, g_buffer_composite };
composite_subpass.registerDependencyFromExternal(
vk::AccessFlagBits::eColorAttachmentWrite, vk::PipelineStageFlagBits::eColorAttachmentOutput );
/*
g_buffer_subpass -> composite_subpass -> gui_subpass
*/
// Register a dependency for the composite subpass that prevents it from working until the g_buffer_subpass has finished writing it's color attachments
// For color attachments
composite_subpass.registerDependencyFrom(
g_buffer_subpass,
vk::AccessFlagBits::eColorAttachmentWrite,
@@ -334,16 +322,61 @@ namespace fgl::engine
vk::PipelineStageFlagBits::eFragmentShader,
vk::DependencyFlagBits::eByRegion );
// For depth attachment
composite_subpass.registerDependencyFrom(
g_buffer_subpass,
vk::AccessFlagBits::eDepthStencilAttachmentWrite,
vk::PipelineStageFlagBits::eEarlyFragmentTests | vk::PipelineStageFlagBits::eLateFragmentTests,
vk::AccessFlagBits::eDepthStencilAttachmentRead | vk::AccessFlagBits::eDepthStencilAttachmentWrite,
vk::PipelineStageFlagBits::eEarlyFragmentTests | vk::PipelineStageFlagBits::eLateFragmentTests,
vk::DependencyFlagBits::eByRegion );
/*
composite_subpass.registerDependencyFrom(
g_buffer_subpass,
vk::AccessFlagBits::eColorAttachmentWrite,
vk::PipelineStageFlagBits::eColorAttachmentOutput,
vk::AccessFlagBits::eTransferWrite,
vk::PipelineStageFlagBits::eTopOfPipe,
vk::DependencyFlagBits::eByRegion );
*/
// To prevent the composite buffer from getting obliterated by the gui pass and so we can use it to render to the GUI in certian areas, We need to keep them seperate and the composite image to be unmodified.
Subpass<
vk::PipelineBindPoint::eGraphics,
UsedAttachment< DepthAttachment, vk::ImageLayout::eDepthStencilAttachmentOptimal >,
UsedAttachment< ColoredPresentAttachment, vk::ImageLayout::eColorAttachmentOptimal >,
InputAttachment< ColorAttachment, vk::ImageLayout::eShaderReadOnlyOptimal > >
gui_subpass { 2, depthAttachment, colorAttachment, g_buffer_composite };
gui_subpass.registerDependencyFromExternal(
vk::AccessFlagBits::eColorAttachmentWrite, vk::PipelineStageFlagBits::eColorAttachmentOutput );
/*
g_buffer_subpass -> composite_subpass -> gui_subpass
*/
gui_subpass.registerDependencyFrom(
composite_subpass,
vk::AccessFlagBits::eColorAttachmentWrite,
vk::PipelineStageFlagBits::eColorAttachmentOutput,
vk::AccessFlagBits::eInputAttachmentRead,
vk::AccessFlagBits::eShaderRead,
vk::PipelineStageFlagBits::eFragmentShader,
vk::DependencyFlagBits::eByRegion );
//composite_subpass.registerFullDependency( g_buffer_subpass );
//gui_subpass.registerFullDependency( composite_subpass );
gui_subpass.registerDependencyFrom(
composite_subpass,
vk::AccessFlagBits::eColorAttachmentWrite,
vk::PipelineStageFlagBits::eColorAttachmentOutput,
vk::AccessFlagBits::eColorAttachmentWrite | vk::AccessFlagBits::eColorAttachmentRead,
vk::PipelineStageFlagBits::eColorAttachmentOutput,
vk::DependencyFlagBits::eByRegion );
gui_subpass.registerDependencyToExternal(
vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite,
vk::AccessFlagBits::eColorAttachmentWrite,
vk::PipelineStageFlagBits::eColorAttachmentOutput,
vk::AccessFlagBits::eMemoryRead,
vk::PipelineStageFlagBits::eBottomOfPipe,

View File

@@ -3,7 +3,6 @@
#include <vulkan/vulkan.h>
#include <memory>
#include <optional>
#include <vector>
#include "Device.hpp"

View File

@@ -7,18 +7,18 @@
#include <initializer_list>
#include "engine/FrameInfo.hpp"
#include "engine/buffers/BufferSuballocation.hpp"
#include "engine/assets/TransferManager.hpp"
#include "engine/descriptors/DescriptorSet.hpp"
#include "engine/image/Image.hpp"
#include "engine/image/ImageView.hpp"
#include "engine/logging/logging.hpp"
#include "engine/math/noise/perlin/generator.hpp"
#include "objectloaders/stb_image.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Weffc++"
#pragma GCC diagnostic ignored "-Wold-style-cast"
#pragma GCC diagnostic ignored "-Wconversion"
#include "objectloaders/stb_image.h"
#include "imgui/backends/imgui_impl_vulkan.h"
#pragma GCC diagnostic pop
@@ -62,7 +62,7 @@ namespace fgl::engine
void Texture::drawImGui( vk::Extent2D extent )
{
if ( !isReady() )
if ( !ready() )
{
log::debug( "Unable to draw Image {}. Image not ready", this->getID() );
return;
@@ -91,7 +91,7 @@ namespace fgl::engine
const ImVec2 imgui_size { static_cast< float >( extent.width ), static_cast< float >( extent.height ) };
if ( !isReady() )
if ( !ready() )
{
//TODO: Render placeholder
log::warn( "Attempted to render texture {} but texture was not ready!", this->m_texture_id );
@@ -101,15 +101,16 @@ namespace fgl::engine
return ImGui::ImageButton( static_cast< ImTextureID >( getImGuiDescriptorSet() ), imgui_size );
}
Texture::Texture( const std::tuple< std::vector< std::byte >, int, int, vk::Format >& tuple ) :
Texture( std::get< 0 >( tuple ), std::get< 1 >( tuple ), std::get< 2 >( tuple ), std::get< 3 >( tuple ) )
Texture::Texture( std::tuple< std::vector< std::byte >, int, int, vk::Format > tuple ) :
Texture(
std::move( std::get< 0 >( tuple ) ), std::get< 1 >( tuple ), std::get< 2 >( tuple ), std::get< 3 >( tuple ) )
{}
Texture::Texture( const std::vector< std::byte >& data, const int x, const int y, const vk::Format format ) :
Texture( data, vk::Extent2D( x, y ), format )
Texture::Texture( std::vector< std::byte >&& data, const int x, const int y, const vk::Format format ) :
Texture( std::forward< std::vector< std::byte > >( data ), vk::Extent2D( x, y ), format )
{}
Texture::Texture( const std::vector< std::byte >& data, const vk::Extent2D extent, const vk::Format format ) :
Texture::Texture( std::vector< std::byte >&& data, const vk::Extent2D extent, const vk::Format format ) :
m_texture_id( getNextID() ),
m_extent( extent )
{
@@ -124,9 +125,7 @@ namespace fgl::engine
m_image_view = image->getView();
m_staging = std::make_unique< BufferSuballocation >( getGlobalStagingBuffer(), data.size() );
//Copy data info buffer
std::memcpy( reinterpret_cast< unsigned char* >( m_staging->ptr() ), data.data(), data.size() );
TransferManager::getInstance().copyToImage( std::forward< std::vector< std::byte > >( data ), *image );
}
Texture::Texture( const std::filesystem::path& path, const vk::Format format ) :
@@ -138,97 +137,12 @@ namespace fgl::engine
Texture::~Texture()
{
//TODO: Implement deffered destruction
if ( m_imgui_set != VK_NULL_HANDLE ) ImGui_ImplVulkan_RemoveTexture( m_imgui_set );
}
void Texture::stage()
{
auto command_buffer { Device::getInstance().beginSingleTimeCommands() };
stage( command_buffer );
Device::getInstance().endSingleTimeCommands( command_buffer );
setReady();
m_staging.reset();
}
void Texture::stage( vk::raii::CommandBuffer& cmd )
{
ZoneScoped;
//assert( m_staging && "Can't stage. No staging buffer made" );
// Texutres are made with a staging buffer in RAM, Thus if the buffer has been dropped then we have been sucesfully staged.
if ( !m_staging ) return;
vk::ImageSubresourceRange range;
range.aspectMask = vk::ImageAspectFlagBits::eColor;
range.baseMipLevel = 0;
range.levelCount = 1;
range.baseArrayLayer = 0;
range.layerCount = 1;
vk::ImageMemoryBarrier barrier {};
barrier.oldLayout = vk::ImageLayout::eUndefined;
barrier.newLayout = vk::ImageLayout::eTransferDstOptimal;
barrier.image = m_image_view->getVkImage();
barrier.subresourceRange = range;
barrier.srcAccessMask = {};
barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite;
const std::vector< vk::ImageMemoryBarrier > barriers_to { barrier };
cmd.pipelineBarrier(
vk::PipelineStageFlagBits::eTopOfPipe,
vk::PipelineStageFlagBits::eTransfer,
vk::DependencyFlags(),
{},
{},
barriers_to );
vk::BufferImageCopy region {};
region.bufferOffset = m_staging->getOffset();
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eColor;
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageOffset = vk::Offset3D( 0, 0, 0 );
region.imageExtent = vk::Extent3D( m_extent, 1 );
std::vector< vk::BufferImageCopy > regions { region };
cmd.copyBufferToImage(
m_staging->getVkBuffer(), m_image_view->getVkImage(), vk::ImageLayout::eTransferDstOptimal, regions );
//Transfer back to eGeneral
vk::ImageMemoryBarrier barrier_from {};
barrier_from.oldLayout = barrier.newLayout;
barrier_from.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal;
barrier_from.image = m_image_view->getVkImage();
barrier_from.subresourceRange = range;
barrier_from.srcAccessMask = vk::AccessFlagBits::eTransferWrite;
barrier_from.dstAccessMask = vk::AccessFlagBits::eShaderRead;
const std::vector< vk::ImageMemoryBarrier > barriers_from { barrier_from };
cmd.pipelineBarrier(
vk::PipelineStageFlagBits::eTransfer,
vk::PipelineStageFlagBits::eFragmentShader,
vk::DependencyFlags(),
{},
{},
barriers_from );
}
void Texture::dropStaging()
{
if ( m_staging ) m_staging.reset();
if ( m_image_view.use_count() == 1 )
{
log::info( "Destroying texture {}", getID() );
}
}
vk::DescriptorImageInfo Texture::getDescriptor() const
@@ -249,7 +163,7 @@ namespace fgl::engine
void Texture::createImGuiSet()
{
if ( !this->isReady() )
if ( !this->ready() )
{
log::debug( "Unable to create ImGui set. Texture was not ready" );
return;
@@ -274,7 +188,7 @@ namespace fgl::engine
vk::DescriptorSet& Texture::getImGuiDescriptorSet()
{
assert( !m_staging );
assert( ready() );
assert( m_imgui_set != VK_NULL_HANDLE );
return m_imgui_set;
}
@@ -286,12 +200,15 @@ namespace fgl::engine
m_extent()
{
m_image_view->getSampler() = std::move( sampler );
setReady();
}
bool Texture::ready() const
{
return this->m_image_view->ready();
}
TextureID Texture::getID() const
{
assert( !m_staging );
return m_texture_id;
}

View File

@@ -9,6 +9,7 @@
#include <filesystem>
#include "engine/assets/AssetManager.hpp"
#include "engine/image/ImageView.hpp"
#include "engine/image/Sampler.hpp"
namespace fgl::engine
@@ -32,39 +33,34 @@ namespace fgl::engine
template < typename T >
friend class AssetStore;
friend class TransferManager;
//TODO: Implement reusing texture ids
TextureID m_texture_id;
std::shared_ptr< ImageView > m_image_view {};
std::unique_ptr< BufferSuballocation > m_staging { nullptr };
vk::Extent2D m_extent;
vk::DescriptorSet m_imgui_set { VK_NULL_HANDLE };
[[nodiscard]] Texture( const std::tuple< std::vector< std::byte >, int, int, vk::Format >& );
[[nodiscard]] Texture( std::tuple< std::vector< std::byte >, int, int, vk::Format > );
[[nodiscard]]
Texture( const std::vector< std::byte >& data, const int x, const int y, const vk::Format texture_format );
Texture( std::vector< std::byte >&& data, const int x, const int y, const vk::Format texture_format );
[[nodiscard]]
Texture( const std::vector< std::byte >& data, const vk::Extent2D extent, const vk::Format texture_format );
Texture( std::vector< std::byte >&& data, const vk::Extent2D extent, const vk::Format texture_format );
[[nodiscard]] Texture( const std::filesystem::path& path, const vk::Format format );
[[nodiscard]] Texture( const std::filesystem::path& path );
void stage( vk::raii::CommandBuffer& cmd ) override;
void dropStaging();
public:
Texture() = delete;
~Texture();
void stage();
Texture( const Texture& ) = delete;
Texture& operator=( const Texture& ) = delete;
@@ -73,6 +69,8 @@ namespace fgl::engine
Texture( Image& image, Sampler sampler = Sampler() );
bool ready() const;
[[nodiscard]] TextureID getID() const;
[[nodiscard]] vk::DescriptorImageInfo getDescriptor() const;