Implement basic glTF loading for sponza
This commit is contained in:
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -34,3 +34,6 @@
|
||||
[submodule "dependencies/glfw3"]
|
||||
path = dependencies/glfw3
|
||||
url = https://github.com/glfw/glfw.git
|
||||
[submodule "dependencies/tinygltf"]
|
||||
path = dependencies/tinygltf
|
||||
url = https://github.com/syoyo/tinygltf.git
|
||||
@@ -38,13 +38,13 @@ file(GLOB VERT_SHADERS "${CMAKE_CURRENT_SOURCE_DIR}/shaders/*.vert")
|
||||
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/bin/shaders")
|
||||
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/bin/models")
|
||||
|
||||
file(GLOB MODELS "${CMAKE_CURRENT_SOURCE_DIR}/models/*.obj")
|
||||
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/models" DESTINATION "${CMAKE_BINARY_DIR}/bin")
|
||||
|
||||
foreach (MODEL IN LISTS MODELS)
|
||||
get_filename_component(FILENAME ${MODEL} NAME)
|
||||
add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/bin/models/${FILENAME}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${MODEL} ${CMAKE_BINARY_DIR}/bin/models/${FILENAME}
|
||||
COMMENT "Copying ${MODEL}")
|
||||
COMMENT " Copying ${MODEL} ")
|
||||
list(APPEND OBJ_MODELS ${CMAKE_BINARY_DIR}/bin/models/${FILENAME})
|
||||
endforeach ()
|
||||
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
|
||||
|
||||
|
||||
include(dependencies/tracy)
|
||||
include(dependencies/qt)
|
||||
include(dependencies/fmt)
|
||||
include(dependencies/spdlog)
|
||||
include(dependencies/lz4)
|
||||
include(dependencies/blurhash)
|
||||
@@ -127,7 +127,7 @@
|
||||
set(FGL_FLAGS "${FGL_OPTIMIZATION_FLAGS_${UPPER_BUILD_TYPE}} ${FGL_FLAGS_${UPPER_BUILD_TYPE}}" PARENT_SCOPE) # Flags for our shit
|
||||
#set(FGL_FLAGS "${FGL_OPTIMIZATION_FLAGS_${UPPER_BUILD_TYPE}}" PARENT_SCOPE)
|
||||
set(FGL_CHILD_FLAGS "${FGL_OPTIMIZATION_FLAGS_${UPPER_BUILD_TYPE}}" PARENT_SCOPE) # Child flags for adding optmization to anything we build ourselves but doesn't follow our standard
|
||||
set(CMAKE_CXX_FLAGS "${FGL_CHILD_FLAGS}")
|
||||
#set(CMAKE_CXX_FLAGS "${FGL_CHILD_FLAGS}")
|
||||
endif ()
|
||||
endfunction()
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@ layout (set = 0, binding = 0) uniform CameraInfo {
|
||||
mat4 inverse_view;
|
||||
} ubo;
|
||||
|
||||
#define NEAR_PLANE 0.1f
|
||||
#define FAR_PLANE 100.0f
|
||||
#define NEAR_PLANE 0.01f
|
||||
#define FAR_PLANE 10.0f
|
||||
|
||||
float linearDepth(float depth)
|
||||
{
|
||||
|
||||
@@ -2,4 +2,5 @@
|
||||
add_subdirectory(vma)
|
||||
add_subdirectory(imgui)
|
||||
add_subdirectory(core)
|
||||
add_subdirectory(engine)
|
||||
add_subdirectory(engine)
|
||||
add_subdirectory(objectloaders)
|
||||
@@ -21,7 +21,7 @@ target_precompile_headers(FGLEngine PRIVATE
|
||||
|
||||
target_compile_definitions(FGLEngine PUBLIC VULKAN_HPP_FLAGS_MASK_TYPE_AS_PUBLIC)
|
||||
|
||||
target_link_libraries(FGLEngine PUBLIC Vulkan::Vulkan glfw glm FGLImGui Tracy::TracyClient VMA)
|
||||
target_link_libraries(FGLEngine PUBLIC Vulkan::Vulkan glfw glm FGLImGui Tracy::TracyClient VMA FGLLoader)
|
||||
target_include_directories(FGLEngine PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/..)
|
||||
set_target_properties(FGLEngine PROPERTIES COMPILE_FLAGS ${FGL_FLAGS})
|
||||
#target_compile_definitions(FGLEngine PRIVATE TRACY_ENABLE=1)
|
||||
|
||||
@@ -236,6 +236,23 @@ namespace fgl::engine
|
||||
}
|
||||
}*/
|
||||
|
||||
{
|
||||
std::shared_ptr< Model > model { Model::createModel(
|
||||
Device::getInstance(),
|
||||
"models/khronos-sponza/Sponza.gltf",
|
||||
m_entity_renderer.getVertexBuffer(),
|
||||
m_entity_renderer.getIndexBuffer() ) };
|
||||
|
||||
auto sponza = GameObject::createGameObject();
|
||||
sponza.model = model;
|
||||
sponza.transform.translation = { 0.0f, 0.0f, 0.0f };
|
||||
sponza.transform.scale = { 0.007f, 0.007f, 0.007f };
|
||||
|
||||
sponza.model->syncBuffers( command_buffer );
|
||||
|
||||
game_objects.emplace( sponza.getId(), std::move( sponza ) );
|
||||
}
|
||||
|
||||
{
|
||||
std::shared_ptr< Model > model { Model::createModel(
|
||||
Device::getInstance(),
|
||||
@@ -253,6 +270,8 @@ namespace fgl::engine
|
||||
|
||||
game_objects.emplace( smooth_vase.getId(), std::move( smooth_vase ) );
|
||||
}
|
||||
/*
|
||||
|
||||
{
|
||||
std::shared_ptr< Model > flat_model { Model::createModel(
|
||||
Device::getInstance(),
|
||||
@@ -287,6 +306,7 @@ namespace fgl::engine
|
||||
|
||||
game_objects.emplace( floor.getId(), std::move( floor ) );
|
||||
}
|
||||
*/
|
||||
|
||||
Device::getInstance().endSingleTimeCommands( command_buffer );
|
||||
|
||||
|
||||
@@ -7,15 +7,6 @@
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Weffc++"
|
||||
#pragma GCC diagnostic ignored "-Wuseless-cast"
|
||||
|
||||
#define TINYOBJLOADER_IMPLEMENTATION
|
||||
#include "tinyobjloader/tiny_obj_loader.h"
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
#define GLM_ENABLE_EXPERIMENTAL
|
||||
#define GLM_FORCE_RADIANS
|
||||
#define GLM_FORCE_DEPTH_ZERO_TO_ONE
|
||||
@@ -25,11 +16,19 @@
|
||||
#include <unordered_map>
|
||||
|
||||
#include "engine/buffers/Buffer.hpp"
|
||||
#include "engine/buffers/SuballocationView.hpp"
|
||||
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wold-style-cast"
|
||||
#pragma GCC diagnostic ignored "-Weffc++"
|
||||
#include "objectloaders/tiny_gltf.h"
|
||||
#include "objectloaders/tiny_obj_loader.h"
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace std
|
||||
{
|
||||
|
||||
template <>
|
||||
struct hash< fgl::engine::Vertex >
|
||||
{
|
||||
@@ -46,29 +45,48 @@ namespace std
|
||||
namespace fgl::engine
|
||||
{
|
||||
|
||||
vk::DrawIndexedIndirectCommand Model::
|
||||
buildParameters( VertexBufferSuballocation& vertex_buffer, IndexBufferSuballocation& index_buffer )
|
||||
std::vector< vk::DrawIndexedIndirectCommand > Model::buildParameters( const std::vector< Primitive >& primitives )
|
||||
{
|
||||
vk::DrawIndexedIndirectCommand cmd;
|
||||
std::vector< vk::DrawIndexedIndirectCommand > draw_parameters;
|
||||
|
||||
cmd.indexCount = index_buffer.count();
|
||||
cmd.firstIndex = index_buffer.getOffsetCount();
|
||||
for ( const auto& primitive : primitives )
|
||||
{
|
||||
vk::DrawIndexedIndirectCommand cmd;
|
||||
cmd.indexCount = primitive.m_index_buffer.count();
|
||||
cmd.firstIndex = primitive.m_index_buffer.getOffsetCount();
|
||||
|
||||
cmd.vertexOffset = static_cast< std::int32_t >( vertex_buffer.getOffsetCount() );
|
||||
cmd.vertexOffset = static_cast< std::int32_t >( primitive.m_vertex_buffer.getOffsetCount() );
|
||||
|
||||
cmd.firstInstance = 0;
|
||||
cmd.instanceCount = 1;
|
||||
cmd.firstInstance = 0;
|
||||
cmd.instanceCount = 1;
|
||||
|
||||
return cmd;
|
||||
draw_parameters.emplace_back( std::move( cmd ) );
|
||||
}
|
||||
|
||||
return draw_parameters;
|
||||
}
|
||||
|
||||
Model::Model( Device& device, const Builder& builder ) :
|
||||
std::vector< vk::DrawIndexedIndirectCommand > Model::getDrawCommand( const std::uint32_t index ) const
|
||||
{
|
||||
std::vector< vk::DrawIndexedIndirectCommand > draw_commands;
|
||||
draw_commands.reserve( m_primitives.size() );
|
||||
for ( const auto& cmd : m_draw_parameters )
|
||||
{
|
||||
auto new_cmd { cmd };
|
||||
new_cmd.firstInstance = index;
|
||||
|
||||
draw_commands.push_back( new_cmd );
|
||||
}
|
||||
|
||||
return draw_commands;
|
||||
}
|
||||
|
||||
Model::Model( Device& device, Builder& builder ) :
|
||||
m_device( device ),
|
||||
m_vertex_buffer( builder.m_vertex_buffer, builder.verts ),
|
||||
has_index_buffer( builder.m_index_buffer.size() > 0 ),
|
||||
m_index_buffer( builder.m_index_buffer, builder.indicies ),
|
||||
m_draw_parameters( buildParameters( m_vertex_buffer, m_index_buffer ) )
|
||||
{}
|
||||
m_draw_parameters( buildParameters( builder.m_primitives ) )
|
||||
{
|
||||
m_primitives = std::move( builder.m_primitives );
|
||||
}
|
||||
|
||||
std::unique_ptr< Model > Model::
|
||||
createModel( Device& device, const std::filesystem::path& path, Buffer& vertex_buffer, Buffer& index_buffer )
|
||||
@@ -81,28 +99,11 @@ namespace fgl::engine
|
||||
|
||||
void Model::syncBuffers( vk::CommandBuffer& cmd_buffer )
|
||||
{
|
||||
m_vertex_buffer.stage( cmd_buffer );
|
||||
|
||||
m_index_buffer.stage( cmd_buffer );
|
||||
}
|
||||
|
||||
void Model::bind( vk::CommandBuffer& cmd_buffer )
|
||||
{
|
||||
std::vector< vk::Buffer > vertex_buffers { m_vertex_buffer.getVkBuffer() };
|
||||
|
||||
cmd_buffer.bindVertexBuffers( 0, vertex_buffers, { 0 } );
|
||||
|
||||
if ( has_index_buffer ) cmd_buffer.bindIndexBuffer( m_index_buffer.getVkBuffer(), 0, vk::IndexType::eUint32 );
|
||||
}
|
||||
|
||||
void Model::draw( vk::CommandBuffer& cmd_buffer )
|
||||
{
|
||||
cmd_buffer.drawIndexed(
|
||||
m_draw_parameters.indexCount,
|
||||
m_draw_parameters.instanceCount,
|
||||
m_draw_parameters.firstIndex,
|
||||
m_draw_parameters.vertexOffset,
|
||||
m_draw_parameters.firstInstance );
|
||||
for ( auto& primitive : m_primitives )
|
||||
{
|
||||
primitive.m_vertex_buffer.stage( cmd_buffer );
|
||||
primitive.m_index_buffer.stage( cmd_buffer );
|
||||
}
|
||||
}
|
||||
|
||||
std::vector< vk::VertexInputBindingDescription > Vertex::getBindingDescriptions()
|
||||
@@ -143,8 +144,160 @@ namespace fgl::engine
|
||||
|
||||
void Model::Builder::loadModel( const std::filesystem::path& filepath )
|
||||
{
|
||||
verts.clear();
|
||||
indicies.clear();
|
||||
if ( filepath.extension() == ".obj" )
|
||||
{
|
||||
loadObj( filepath );
|
||||
}
|
||||
else if ( filepath.extension() == ".gltf" )
|
||||
{
|
||||
loadGltf( filepath );
|
||||
}
|
||||
else
|
||||
//Dunno
|
||||
throw std::runtime_error( "Unknown model file extension" );
|
||||
}
|
||||
|
||||
void Model::Builder::loadGltf( const std::filesystem::path& filepath )
|
||||
{
|
||||
std::cout << "Loading gltf model " << filepath << std::endl;
|
||||
|
||||
if ( !std::filesystem::exists( filepath ) ) throw std::runtime_error( "File does not exist" );
|
||||
|
||||
m_primitives.clear();
|
||||
|
||||
tinygltf::Model model {};
|
||||
tinygltf::TinyGLTF loader {};
|
||||
std::string err;
|
||||
std::string warn;
|
||||
|
||||
loader.RemoveImageLoader();
|
||||
|
||||
loader.LoadASCIIFromFile( &model, &err, &warn, filepath );
|
||||
|
||||
if ( !err.empty() ) throw std::runtime_error( err );
|
||||
|
||||
if ( !warn.empty() )
|
||||
std::cout << "Warning while loading model \"" << filepath << "\"\nWarning:" << warn << std::endl;
|
||||
|
||||
for ( const tinygltf::Mesh& mesh : model.meshes )
|
||||
{
|
||||
for ( const tinygltf::Primitive& primitive : mesh.primitives )
|
||||
{
|
||||
//TODO: Implement modes
|
||||
|
||||
//Load indicies
|
||||
std::vector< std::uint32_t > indicies_data;
|
||||
{
|
||||
auto& indicies_accessor { model.accessors.at( primitive.indices ) };
|
||||
auto& buffer_view { model.bufferViews.at( indicies_accessor.bufferView ) };
|
||||
auto& buffer { model.buffers.at( buffer_view.buffer ) };
|
||||
|
||||
indicies_data.resize( static_cast< std::uint64_t >( indicies_accessor.count ) );
|
||||
|
||||
assert( indicies_accessor.type == TINYGLTF_TYPE_SCALAR );
|
||||
|
||||
if ( indicies_accessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT )
|
||||
{
|
||||
unsigned short* data { reinterpret_cast< unsigned short* >(
|
||||
buffer.data.data() + buffer_view.byteOffset + indicies_accessor.byteOffset ) };
|
||||
for ( std::size_t i = 0; i < indicies_accessor.count; i++ )
|
||||
{
|
||||
indicies_data[ i ] = data[ i ];
|
||||
}
|
||||
}
|
||||
else if ( indicies_accessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT )
|
||||
{
|
||||
std::memcpy(
|
||||
indicies_data.data(),
|
||||
buffer.data.data() + buffer_view.byteOffset + indicies_accessor.byteOffset,
|
||||
static_cast< std::uint64_t >( indicies_accessor.count ) * sizeof( std::uint32_t ) );
|
||||
}
|
||||
else
|
||||
throw std::runtime_error( "Unknown index type" );
|
||||
}
|
||||
|
||||
//Load positions
|
||||
std::vector< glm::vec3 > position_data;
|
||||
{
|
||||
auto& position_accessor { model.accessors.at( primitive.attributes.at( "POSITION" ) ) };
|
||||
auto& buffer_view { model.bufferViews.at( position_accessor.bufferView ) };
|
||||
auto& buffer { model.buffers.at( buffer_view.buffer ) };
|
||||
|
||||
position_data.resize( static_cast< std::uint64_t >( position_accessor.count ) );
|
||||
|
||||
//Check the type
|
||||
assert( position_accessor.componentType == TINYGLTF_COMPONENT_TYPE_FLOAT );
|
||||
assert( position_accessor.type == TINYGLTF_TYPE_VEC3 );
|
||||
static_assert( sizeof( glm::vec3 ) == sizeof( float ) * 3, "glm::vec3 is not three floats" );
|
||||
|
||||
std::memcpy(
|
||||
position_data.data(),
|
||||
buffer.data.data() + buffer_view.byteOffset + position_accessor.byteOffset,
|
||||
static_cast< std::uint64_t >( position_accessor.count ) * sizeof( glm::vec3 ) );
|
||||
}
|
||||
|
||||
std::vector< glm::vec3 > normals;
|
||||
|
||||
if ( primitive.attributes.find( "NORMAL" ) != primitive.attributes.end() )
|
||||
{
|
||||
auto& normal_accessor { model.accessors.at( primitive.attributes.at( "NORMAL" ) ) };
|
||||
auto& buffer_view { model.bufferViews.at( normal_accessor.bufferView ) };
|
||||
auto& buffer { model.buffers.at( buffer_view.buffer ) };
|
||||
|
||||
normals.resize( static_cast< std::uint64_t >( normal_accessor.count ) );
|
||||
|
||||
//Check the type
|
||||
assert( normal_accessor.componentType == TINYGLTF_COMPONENT_TYPE_FLOAT );
|
||||
assert( normal_accessor.type == TINYGLTF_TYPE_VEC3 );
|
||||
|
||||
std::memcpy(
|
||||
normals.data(),
|
||||
buffer.data.data() + buffer_view.byteOffset + normal_accessor.byteOffset,
|
||||
static_cast< std::uint64_t >( normal_accessor.count ) * sizeof( glm::vec3 ) );
|
||||
}
|
||||
else
|
||||
normals.resize( position_data.size() );
|
||||
|
||||
std::vector< Vertex > verts;
|
||||
verts.resize( position_data.size() );
|
||||
for ( std::size_t i = 0; i < position_data.size(); i++ )
|
||||
{
|
||||
//Fix position to be -Z UP
|
||||
//verts[ i ].m_position = position_data[ i ];
|
||||
verts[ i ].m_position = { position_data[ i ].x, -position_data[ i ].y, position_data[ i ].z };
|
||||
verts[ i ].m_normal = normals[ i ];
|
||||
}
|
||||
|
||||
VertexBufferSuballocation vertex_buffer { m_vertex_buffer, verts };
|
||||
IndexBufferSuballocation index_buffer { m_index_buffer, indicies_data };
|
||||
|
||||
Primitive prim { std::move( vertex_buffer ), std::move( index_buffer ) };
|
||||
|
||||
m_primitives.emplace_back( std::move( prim ) );
|
||||
}
|
||||
|
||||
std::cout << "Mesh has " << mesh.primitives.size() << " primitives" << std::endl;
|
||||
}
|
||||
|
||||
for ( const tinygltf::Scene& scene : model.scenes )
|
||||
{
|
||||
std::cout << "Loading scene " << scene.name << std::endl;
|
||||
std::cout << "Scene has " << scene.nodes.size() << " nodes" << std::endl;
|
||||
|
||||
for ( auto child : scene.nodes )
|
||||
{
|
||||
std::cout << "Child: " << child << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "Scenes: " << model.scenes.size() << std::endl;
|
||||
|
||||
std::cout << "Meshes: " << model.meshes.size() << std::endl;
|
||||
}
|
||||
|
||||
void Model::Builder::loadObj( const std::filesystem::path& filepath )
|
||||
{
|
||||
m_primitives.clear();
|
||||
|
||||
tinyobj::attrib_t attrib {};
|
||||
std::vector< tinyobj::shape_t > shapes {};
|
||||
@@ -157,6 +310,9 @@ namespace fgl::engine
|
||||
|
||||
std::unordered_map< Vertex, std::uint32_t > unique_verts {};
|
||||
|
||||
std::vector< Vertex > verts;
|
||||
std::vector< std::uint32_t > indicies;
|
||||
|
||||
for ( const auto& shape : shapes )
|
||||
{
|
||||
for ( const auto& index : shape.mesh.indices )
|
||||
@@ -213,6 +369,10 @@ namespace fgl::engine
|
||||
}
|
||||
}
|
||||
|
||||
m_primitives.emplace_back(
|
||||
VertexBufferSuballocation( m_vertex_buffer, std::move( verts ) ),
|
||||
IndexBufferSuballocation( m_index_buffer, std::move( indicies ) ) );
|
||||
|
||||
std::cout << unique_verts.size() << " unique verts" << std::endl;
|
||||
}
|
||||
} // namespace fgl::engine
|
||||
@@ -33,6 +33,8 @@ namespace fgl::engine
|
||||
static std::vector< vk::VertexInputBindingDescription > getBindingDescriptions();
|
||||
static std::vector< vk::VertexInputAttributeDescription > getAttributeDescriptions();
|
||||
|
||||
Vertex() noexcept = default;
|
||||
|
||||
bool operator==( const Vertex& other ) const
|
||||
{
|
||||
return m_position == other.m_position && m_color == other.m_color && m_normal == other.m_normal
|
||||
@@ -54,28 +56,40 @@ namespace fgl::engine
|
||||
|
||||
using ModelMatrixInfoBufferSuballocation = HostVector< ModelMatrixInfo >;
|
||||
|
||||
struct Primitive
|
||||
{
|
||||
VertexBufferSuballocation m_vertex_buffer;
|
||||
IndexBufferSuballocation m_index_buffer;
|
||||
|
||||
Primitive( VertexBufferSuballocation&& vertex_buffer, IndexBufferSuballocation&& index_buffer ) :
|
||||
m_vertex_buffer( std::move( vertex_buffer ) ),
|
||||
m_index_buffer( std::move( index_buffer ) )
|
||||
{}
|
||||
|
||||
Primitive() = delete;
|
||||
Primitive( const Primitive& other ) = delete;
|
||||
Primitive( Primitive&& other ) = default;
|
||||
};
|
||||
|
||||
class Model
|
||||
{
|
||||
Device& m_device;
|
||||
VertexBufferSuballocation m_vertex_buffer;
|
||||
|
||||
bool has_index_buffer { false };
|
||||
IndexBufferSuballocation m_index_buffer;
|
||||
std::vector< ::fgl::engine::Primitive > m_primitives {};
|
||||
|
||||
vk::DrawIndexedIndirectCommand
|
||||
buildParameters( VertexBufferSuballocation& vertex_buffer, IndexBufferSuballocation& index_buffer );
|
||||
std::vector< vk::DrawIndexedIndirectCommand > buildParameters( const std::vector< Primitive >& primitives );
|
||||
|
||||
vk::DrawIndexedIndirectCommand m_draw_parameters;
|
||||
std::vector< vk::DrawIndexedIndirectCommand > m_draw_parameters;
|
||||
|
||||
public:
|
||||
|
||||
struct Builder
|
||||
{
|
||||
std::vector< Vertex > verts {};
|
||||
std::vector< std::uint32_t > indicies {};
|
||||
Buffer& m_vertex_buffer;
|
||||
Buffer& m_index_buffer;
|
||||
|
||||
std::vector< ::fgl::engine::Primitive > m_primitives {};
|
||||
|
||||
Builder() = delete;
|
||||
|
||||
Builder( Buffer& parent_vertex_buffer, Buffer& parent_index_buffer ) :
|
||||
@@ -84,19 +98,18 @@ namespace fgl::engine
|
||||
{}
|
||||
|
||||
void loadModel( const std::filesystem::path& filepath );
|
||||
void loadObj( const std::filesystem::path& filepath );
|
||||
void loadGltf( const std::filesystem::path& filepath );
|
||||
};
|
||||
|
||||
vk::DrawIndexedIndirectCommand getDrawCommand() const { return m_draw_parameters; }
|
||||
std::vector< vk::DrawIndexedIndirectCommand > getDrawCommand( const std::uint32_t index ) const;
|
||||
|
||||
static std::unique_ptr< Model > createModel(
|
||||
Device& device, const std::filesystem::path& path, Buffer& vertex_buffer, Buffer& index_buffer );
|
||||
|
||||
void syncBuffers( vk::CommandBuffer& cmd_buffer );
|
||||
|
||||
void bind( vk::CommandBuffer& cmd_buffer );
|
||||
void draw( vk::CommandBuffer& cmd_buffer );
|
||||
|
||||
Model( Device& device, const Builder& builder );
|
||||
Model( Device& device, Builder& builder );
|
||||
|
||||
~Model() = default;
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
#include "Buffer.hpp"
|
||||
|
||||
#include "BufferSuballocationHandle.hpp"
|
||||
#include "engine/Device.hpp"
|
||||
|
||||
namespace fgl::engine
|
||||
@@ -102,6 +103,146 @@ namespace fgl::engine
|
||||
return size;
|
||||
}
|
||||
|
||||
std::shared_ptr< BufferSuballocationHandle > Buffer::
|
||||
suballocate( vk::DeviceSize memory_size, std::uint32_t allignment )
|
||||
{
|
||||
ZoneScoped;
|
||||
//Calculate alignment from alignment, ubo_alignment, and atom_size_alignment
|
||||
memory_size = align( memory_size, alignment() );
|
||||
|
||||
//Find a free space.
|
||||
auto itter = std::find_if(
|
||||
m_free_blocks.begin(),
|
||||
m_free_blocks.end(),
|
||||
[ this, memory_size, allignment ]( const std::pair< vk::DeviceSize, vk::DeviceSize >& pair )
|
||||
{
|
||||
auto [ offset, size ] = pair;
|
||||
|
||||
const auto new_offset = align( offset, alignment(), allignment );
|
||||
size -= new_offset - offset;
|
||||
|
||||
return size >= memory_size;
|
||||
} );
|
||||
|
||||
if ( itter == m_free_blocks.end() )
|
||||
{
|
||||
std::cout << "====== !!! OOM !!! ======\n"
|
||||
"====== Allocated Blocks ======\n";
|
||||
|
||||
for ( auto [ offset, size ] : m_free_blocks )
|
||||
{
|
||||
std::cout << "Offset: " << std::hex << offset << " Size: " << std::dec << size << "\n";
|
||||
|
||||
std::cout << "Aligned offset: " << std::hex << align( offset, alignment(), allignment )
|
||||
<< " Size: " << std::dec << size << "\n"
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
std::cout << "====== Suballocations ======\n";
|
||||
|
||||
for ( auto [ offset, size ] : m_suballocations )
|
||||
{
|
||||
std::cout << "Offset: " << std::hex << offset << " Size: " << std::dec << size << "\n";
|
||||
}
|
||||
|
||||
std::cout << "=============================\n"
|
||||
<< "Attempted to allocate block of size: "
|
||||
<< fgl::literals::size_literals::to_string( memory_size ) << std::endl;
|
||||
|
||||
throw std::runtime_error( "Failed to find free space" );
|
||||
}
|
||||
|
||||
//Allocate
|
||||
auto [ offset, size ] = *itter;
|
||||
m_free_blocks.erase( itter );
|
||||
|
||||
const auto aligned_offset { align( offset, alignment(), allignment ) };
|
||||
|
||||
if ( aligned_offset != offset )
|
||||
{
|
||||
m_free_blocks.emplace_back( std::make_pair( offset, aligned_offset - offset ) );
|
||||
offset = aligned_offset;
|
||||
size -= aligned_offset - offset;
|
||||
}
|
||||
|
||||
m_suballocations.insert_or_assign( offset, memory_size );
|
||||
|
||||
if ( size - memory_size > 0 )
|
||||
m_free_blocks.emplace_back( std::make_pair( offset + memory_size, size - memory_size ) );
|
||||
|
||||
return std::make_shared< BufferSuballocationHandle >( *this, offset, memory_size );
|
||||
}
|
||||
|
||||
void Buffer::free( fgl::engine::BufferSuballocationHandle& info )
|
||||
{
|
||||
ZoneScoped;
|
||||
|
||||
{
|
||||
//Find the suballocation
|
||||
auto itter = m_suballocations.find( info.m_offset );
|
||||
|
||||
if ( itter == m_suballocations.end() ) throw std::runtime_error( "Failed to find suballocation" );
|
||||
|
||||
//Remove the suballocation
|
||||
m_suballocations.erase( itter );
|
||||
}
|
||||
|
||||
// Forward check
|
||||
{
|
||||
ZoneScopedN( "Forward check" );
|
||||
auto itter = std::find_if(
|
||||
m_free_blocks.begin(),
|
||||
m_free_blocks.end(),
|
||||
[ &info ]( const std::pair< vk::DeviceSize, vk::DeviceSize >& pair )
|
||||
{
|
||||
auto& [ offset, size ] = pair;
|
||||
return offset > info.m_offset && offset == info.m_offset + info.m_size;
|
||||
} );
|
||||
|
||||
//If itter is not end then we have found a block where itter->offset > offset
|
||||
|
||||
if ( itter != m_free_blocks.end() )
|
||||
{
|
||||
auto& [ free_offset, free_size ] = *itter;
|
||||
info.m_size += free_size; // Add their size to ours
|
||||
|
||||
//Nuke block
|
||||
m_free_blocks.erase( itter );
|
||||
}
|
||||
}
|
||||
|
||||
// Backwards check
|
||||
{
|
||||
ZoneScopedN( "Backwards check" );
|
||||
auto prev_block = std::find_if(
|
||||
m_free_blocks.begin(),
|
||||
m_free_blocks.end(),
|
||||
[ &info ]( const std::pair< vk::DeviceSize, vk::DeviceSize >& pair )
|
||||
{
|
||||
auto& [ offset, size ] = pair;
|
||||
return offset + size + 1 == info.m_offset;
|
||||
} );
|
||||
|
||||
if ( prev_block != m_free_blocks.end() )
|
||||
{
|
||||
auto& [ offset, size ] = *prev_block;
|
||||
size += info.m_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
//No block before us. We are the free block
|
||||
m_free_blocks.push_back( { info.m_offset, info.m_size } );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void* Buffer::map( BufferSuballocationHandle& handle )
|
||||
{
|
||||
if ( m_handle->m_alloc_info.pMappedData == nullptr ) return nullptr;
|
||||
|
||||
return static_cast< std::byte* >( m_handle->m_alloc_info.pMappedData ) + handle.m_offset;
|
||||
}
|
||||
|
||||
Buffer::~Buffer()
|
||||
{}
|
||||
|
||||
|
||||
@@ -25,13 +25,8 @@ namespace fgl::engine
|
||||
{
|
||||
|
||||
class BufferHandle;
|
||||
|
||||
struct BufferSuballocationInfo
|
||||
{
|
||||
std::shared_ptr< BufferHandle > buffer;
|
||||
vk::DeviceSize offset;
|
||||
vk::DeviceSize size;
|
||||
};
|
||||
class BufferSuballocation;
|
||||
struct BufferSuballocationHandle;
|
||||
|
||||
//TODO: Dynamic/onDemand resizing of Buffer for suballocations
|
||||
//TODO: Defragmentation
|
||||
@@ -40,9 +35,9 @@ namespace fgl::engine
|
||||
|
||||
class BufferHandle
|
||||
{
|
||||
vk::Buffer m_buffer;
|
||||
vk::Buffer m_buffer { VK_NULL_HANDLE };
|
||||
VmaAllocation m_allocation {};
|
||||
VmaAllocationInfo m_alloc_info;
|
||||
VmaAllocationInfo m_alloc_info {};
|
||||
|
||||
vk::DeviceSize m_memory_size;
|
||||
|
||||
@@ -113,17 +108,7 @@ namespace fgl::engine
|
||||
//! Total memory size of the buffer
|
||||
vk::DeviceSize size() const { return m_handle->m_memory_size; }
|
||||
|
||||
void* map( BufferSuballocationInfo& info )
|
||||
{
|
||||
assert(
|
||||
info.offset + info.size <= m_handle->m_memory_size
|
||||
&& "BufferSuballocationT::map() called with invalid size" );
|
||||
assert(
|
||||
m_handle->m_alloc_info.pMappedData
|
||||
&& "BufferSuballocationT::map() called on buffer with no mapped data" );
|
||||
|
||||
return static_cast< std::byte* >( m_handle->m_alloc_info.pMappedData ) + info.offset;
|
||||
}
|
||||
void* map( BufferSuballocationHandle& info );
|
||||
|
||||
bool isMappable() const { return m_handle->m_alloc_info.pMappedData != nullptr; }
|
||||
|
||||
@@ -142,130 +127,10 @@ namespace fgl::engine
|
||||
* @par
|
||||
* @note Alignment for atom_size is 0 if buffer is not host visible
|
||||
*/
|
||||
BufferSuballocationInfo suballocate( vk::DeviceSize memory_size, std::uint32_t allignment )
|
||||
{
|
||||
ZoneScoped;
|
||||
//Calculate alignment from alignment, ubo_alignment, and atom_size_alignment
|
||||
memory_size = align( memory_size, alignment() );
|
||||
std::shared_ptr< BufferSuballocationHandle >
|
||||
suballocate( vk::DeviceSize memory_size, std::uint32_t allignment = 1 );
|
||||
|
||||
//Find a free space.
|
||||
auto itter = std::find_if(
|
||||
m_free_blocks.begin(),
|
||||
m_free_blocks.end(),
|
||||
[ this, memory_size, allignment ]( const std::pair< vk::DeviceSize, vk::DeviceSize >& pair )
|
||||
{
|
||||
auto [ offset, size ] = pair;
|
||||
|
||||
const auto new_offset = align( offset, alignment(), allignment );
|
||||
size -= new_offset - offset;
|
||||
|
||||
return size >= memory_size;
|
||||
} );
|
||||
|
||||
if ( itter == m_free_blocks.end() )
|
||||
{
|
||||
std::cout << "====== !!! OOM !!! ======\n"
|
||||
"====== Allocated Blocks ======\n";
|
||||
|
||||
for ( auto [ offset, size ] : m_free_blocks )
|
||||
{
|
||||
std::cout << "Offset: " << std::hex << offset << " Size: " << std::dec << size << "\n";
|
||||
|
||||
std::cout << "Aligned offset: " << std::hex << align( offset, alignment(), allignment )
|
||||
<< " Size: " << std::dec << size << "\n"
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
std::cout << "=============================\n"
|
||||
<< "Attempted to allocate block of size: "
|
||||
<< fgl::literals::size_literals::to_string( memory_size ) << std::endl;
|
||||
|
||||
throw std::runtime_error( "Failed to find free space" );
|
||||
}
|
||||
|
||||
//Allocate
|
||||
auto [ offset, size ] = *itter;
|
||||
m_free_blocks.erase( itter );
|
||||
|
||||
const auto aligned_offset { align( offset, alignment(), allignment ) };
|
||||
|
||||
if ( aligned_offset != offset )
|
||||
{
|
||||
m_free_blocks.emplace_back( std::make_pair( offset, aligned_offset - offset ) );
|
||||
offset = aligned_offset;
|
||||
size -= aligned_offset - offset;
|
||||
}
|
||||
|
||||
m_suballocations.insert_or_assign( offset, memory_size );
|
||||
|
||||
if ( size - memory_size > 0 )
|
||||
m_free_blocks.emplace_back( std::make_pair( offset + memory_size, size - memory_size ) );
|
||||
|
||||
return { m_handle, offset, memory_size };
|
||||
}
|
||||
|
||||
void free( BufferSuballocationInfo& info )
|
||||
{
|
||||
ZoneScoped;
|
||||
|
||||
{
|
||||
//Find the suballocation
|
||||
auto itter = m_suballocations.find( info.offset );
|
||||
|
||||
if ( itter == m_suballocations.end() ) throw std::runtime_error( "Failed to find suballocation" );
|
||||
|
||||
//Remove the suballocation
|
||||
m_suballocations.erase( itter );
|
||||
}
|
||||
|
||||
// Forward check
|
||||
{
|
||||
ZoneScopedN( "Forward check" );
|
||||
auto itter = std::find_if(
|
||||
m_free_blocks.begin(),
|
||||
m_free_blocks.end(),
|
||||
[ &info ]( const std::pair< vk::DeviceSize, vk::DeviceSize >& pair )
|
||||
{
|
||||
auto& [ offset, size ] = pair;
|
||||
return offset > info.offset && offset == info.offset + info.size;
|
||||
} );
|
||||
|
||||
//If itter is not end then we have found a block where itter->offset > offset
|
||||
|
||||
if ( itter != m_free_blocks.end() )
|
||||
{
|
||||
auto& [ free_offset, free_size ] = *itter;
|
||||
info.size += free_size; // Add their size to ours
|
||||
|
||||
//Nuke block
|
||||
m_free_blocks.erase( itter );
|
||||
}
|
||||
}
|
||||
|
||||
// Backwards check
|
||||
{
|
||||
ZoneScopedN( "Backwards check" );
|
||||
auto prev_block = std::find_if(
|
||||
m_free_blocks.begin(),
|
||||
m_free_blocks.end(),
|
||||
[ &info ]( const std::pair< vk::DeviceSize, vk::DeviceSize >& pair )
|
||||
{
|
||||
auto& [ offset, size ] = pair;
|
||||
return offset + size + 1 == info.offset;
|
||||
} );
|
||||
|
||||
if ( prev_block != m_free_blocks.end() )
|
||||
{
|
||||
auto& [ offset, size ] = *prev_block;
|
||||
size += info.size;
|
||||
}
|
||||
else
|
||||
{
|
||||
//No block before us. We are the free block
|
||||
m_free_blocks.push_back( { info.offset, info.size } );
|
||||
}
|
||||
}
|
||||
}
|
||||
void free( BufferSuballocationHandle& info );
|
||||
};
|
||||
|
||||
void initGlobalStagingBuffer( std::uint64_t size );
|
||||
|
||||
88
src/engine/buffers/BufferSuballocation.cpp
Normal file
88
src/engine/buffers/BufferSuballocation.cpp
Normal file
@@ -0,0 +1,88 @@
|
||||
//
|
||||
// Created by kj16609 on 1/10/24.
|
||||
//
|
||||
|
||||
#include "BufferSuballocation.hpp"
|
||||
|
||||
#include "Buffer.hpp"
|
||||
#include "BufferSuballocationHandle.hpp"
|
||||
#include "SuballocationView.hpp"
|
||||
|
||||
namespace fgl::engine
|
||||
{
|
||||
|
||||
BufferSuballocation& BufferSuballocation::operator=( BufferSuballocation&& other )
|
||||
{
|
||||
m_handle = std::move( other.m_handle );
|
||||
|
||||
m_offset = m_handle->m_offset;
|
||||
m_size = m_handle->m_size;
|
||||
|
||||
other.m_offset = 0;
|
||||
other.m_size = 0;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
BufferSuballocation::BufferSuballocation( std::shared_ptr< BufferSuballocationHandle > handle ) :
|
||||
m_handle( std::move( handle ) )
|
||||
{
|
||||
m_offset = m_handle->m_offset;
|
||||
m_size = m_handle->m_size;
|
||||
}
|
||||
|
||||
BufferSuballocation::BufferSuballocation( BufferSuballocation&& other ) : m_handle( std::move( other.m_handle ) )
|
||||
{
|
||||
m_offset = m_handle->m_offset;
|
||||
m_size = m_handle->m_size;
|
||||
|
||||
other.m_offset = 0;
|
||||
other.m_size = 0;
|
||||
}
|
||||
|
||||
void* BufferSuballocation::ptr() const
|
||||
{
|
||||
return m_handle->mapped;
|
||||
}
|
||||
|
||||
void BufferSuballocation::flush( vk::DeviceSize beg, vk::DeviceSize end )
|
||||
{
|
||||
assert( m_handle->mapped != nullptr && "BufferSuballocationT::flush() called before map()" );
|
||||
vk::MappedMemoryRange range {};
|
||||
range.memory = m_handle->buffer.getMemory();
|
||||
range.offset = m_offset + beg;
|
||||
|
||||
const vk::DeviceSize min_atom_size { Device::getInstance().m_properties.limits.nonCoherentAtomSize };
|
||||
const auto size { end - beg };
|
||||
|
||||
range.size = align( size, min_atom_size );
|
||||
|
||||
if ( range.size > m_size ) range.size = VK_WHOLE_SIZE;
|
||||
|
||||
if ( Device::getInstance().device().flushMappedMemoryRanges( 1, &range ) != vk::Result::eSuccess )
|
||||
throw std::runtime_error( "Failed to flush memory" );
|
||||
}
|
||||
|
||||
Buffer& BufferSuballocation::getBuffer() const
|
||||
{
|
||||
return m_handle->buffer;
|
||||
}
|
||||
|
||||
vk::Buffer BufferSuballocation::getVkBuffer() const
|
||||
{
|
||||
return m_handle->buffer.getVkBuffer();
|
||||
}
|
||||
|
||||
vk::DescriptorBufferInfo BufferSuballocation::descriptorInfo() const
|
||||
{
|
||||
return vk::DescriptorBufferInfo( getVkBuffer(), m_offset, m_size );
|
||||
}
|
||||
|
||||
SuballocationView BufferSuballocation::view( const vk::DeviceSize offset, const vk::DeviceSize size ) const
|
||||
{
|
||||
assert( offset + size <= m_size && "BufferSuballocation::view() called with offset + size > m_size" );
|
||||
|
||||
return { m_handle, offset, size };
|
||||
}
|
||||
|
||||
} // namespace fgl::engine
|
||||
@@ -4,55 +4,29 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Buffer.hpp"
|
||||
#include "engine/Device.hpp"
|
||||
#include "engine/concepts/is_suballocation.hpp"
|
||||
|
||||
namespace fgl::engine
|
||||
{
|
||||
class Buffer;
|
||||
class BufferHandle;
|
||||
class SuballocationView;
|
||||
|
||||
struct BufferSuballocationHandle;
|
||||
|
||||
class BufferSuballocation
|
||||
{
|
||||
protected:
|
||||
|
||||
Buffer& m_buffer;
|
||||
BufferSuballocationInfo m_info {};
|
||||
std::shared_ptr< BufferSuballocationHandle > m_handle;
|
||||
|
||||
void* m_mapped { nullptr };
|
||||
vk::DeviceSize m_offset { 0 };
|
||||
vk::DeviceSize m_size { 0 };
|
||||
|
||||
void flush( vk::DeviceSize beg, vk::DeviceSize end )
|
||||
{
|
||||
assert( m_mapped != nullptr && "BufferSuballocationT::flush() called before map()" );
|
||||
vk::MappedMemoryRange range {};
|
||||
range.memory = m_buffer.getMemory();
|
||||
range.offset = m_info.offset + beg;
|
||||
void flush( vk::DeviceSize beg, vk::DeviceSize end );
|
||||
|
||||
const vk::DeviceSize min_atom_size { Device::getInstance().m_properties.limits.nonCoherentAtomSize };
|
||||
const auto size { end - beg };
|
||||
|
||||
range.size = align( size, min_atom_size );
|
||||
|
||||
if ( range.size > m_info.size ) range.size = VK_WHOLE_SIZE;
|
||||
|
||||
if ( Device::getInstance().device().flushMappedMemoryRanges( 1, &range ) != vk::Result::eSuccess )
|
||||
throw std::runtime_error( "Failed to flush memory" );
|
||||
}
|
||||
|
||||
BufferSuballocation& operator=( BufferSuballocation&& other )
|
||||
{
|
||||
//Free ourselves if we are valid
|
||||
if ( this->m_info.offset != std::numeric_limits< decltype( m_info.offset ) >::max() )
|
||||
m_buffer.free( m_info );
|
||||
|
||||
//Take their info
|
||||
m_info = other.m_info;
|
||||
|
||||
//Set other to be invalid
|
||||
other.m_info.offset = std::numeric_limits< decltype( m_info.offset ) >::max();
|
||||
other.m_info.size = 0;
|
||||
|
||||
return *this;
|
||||
}
|
||||
BufferSuballocation& operator=( BufferSuballocation&& other );
|
||||
|
||||
public:
|
||||
|
||||
@@ -62,57 +36,26 @@ namespace fgl::engine
|
||||
BufferSuballocation( const BufferSuballocation& ) = delete;
|
||||
BufferSuballocation& operator=( const BufferSuballocation& ) = delete;
|
||||
|
||||
BufferSuballocation( BufferSuballocation&& other ) : m_buffer( other.m_buffer )
|
||||
{
|
||||
if ( this->m_info.offset != std::numeric_limits< decltype( m_info.offset ) >::max() )
|
||||
m_buffer.free( m_info );
|
||||
BufferSuballocation( std::shared_ptr< BufferSuballocationHandle > handle );
|
||||
BufferSuballocation( BufferSuballocation&& other );
|
||||
|
||||
m_info = other.m_info;
|
||||
m_mapped = other.m_mapped;
|
||||
SuballocationView view( const vk::DeviceSize offset, const vk::DeviceSize size ) const;
|
||||
|
||||
other.m_info.offset = std::numeric_limits< decltype( m_info.offset ) >::max();
|
||||
other.m_info.size = 0;
|
||||
other.m_mapped = nullptr;
|
||||
}
|
||||
void* ptr() const;
|
||||
|
||||
BufferSuballocation( Buffer& buffer, const std::size_t memory_size, const std::uint32_t alignment = 1 ) :
|
||||
m_buffer( buffer ),
|
||||
m_info( buffer.suballocate( memory_size, alignment ) ),
|
||||
m_mapped( m_buffer.isMappable() ? buffer.map( m_info ) : nullptr )
|
||||
{
|
||||
assert( memory_size != 0 && "BufferSuballocation::BufferSuballocation() called with memory_size == 0" );
|
||||
}
|
||||
vk::DeviceSize size() const { return m_size; }
|
||||
|
||||
BufferSuballocation(
|
||||
std::unique_ptr< Buffer >& buffer_ptr, const std::size_t memory_size, const std::uint32_t alignment = 1 ) :
|
||||
BufferSuballocation( *buffer_ptr.get(), memory_size, alignment )
|
||||
{
|
||||
assert( buffer_ptr != nullptr && "BufferSuballocation::BufferSuballocation() called with nullptr" );
|
||||
}
|
||||
Buffer& getBuffer() const;
|
||||
|
||||
vk::DeviceSize size() const { return m_info.size; }
|
||||
vk::Buffer getVkBuffer() const;
|
||||
|
||||
Buffer& getBuffer() const { return m_buffer; }
|
||||
vk::DeviceSize getOffset() const { return m_offset; }
|
||||
|
||||
vk::Buffer getVkBuffer() const { return m_buffer.getBuffer(); }
|
||||
vk::DeviceSize offset() const { return m_offset; }
|
||||
|
||||
vk::DeviceSize getOffset() const { return m_info.offset; }
|
||||
vk::DescriptorBufferInfo descriptorInfo() const;
|
||||
|
||||
vk::DeviceSize offset() const { return m_info.offset; }
|
||||
|
||||
vk::DescriptorBufferInfo descriptorInfo() const
|
||||
{
|
||||
vk::DescriptorBufferInfo info {};
|
||||
info.buffer = m_buffer.getBuffer();
|
||||
info.offset = m_info.offset;
|
||||
info.range = m_info.size;
|
||||
return info;
|
||||
}
|
||||
|
||||
~BufferSuballocation()
|
||||
{
|
||||
if ( m_info.offset != std::numeric_limits< decltype( m_info.offset ) >::max() ) m_buffer.free( m_info );
|
||||
}
|
||||
~BufferSuballocation() = default;
|
||||
};
|
||||
|
||||
//! Single element allocation of T
|
||||
@@ -126,20 +69,20 @@ namespace fgl::engine
|
||||
HostSingleT( HostSingleT&& ) = delete;
|
||||
HostSingleT& operator=( const HostSingleT& ) = delete;
|
||||
|
||||
HostSingleT( Buffer& buffer ) : BufferSuballocation( buffer, sizeof( T ), alignof( T ) ) {}
|
||||
HostSingleT( Buffer& buffer ) : BufferSuballocation( buffer.suballocate( sizeof( T ), alignof( T ) ) ) {}
|
||||
|
||||
HostSingleT& operator=( T& t )
|
||||
{
|
||||
ZoneScoped;
|
||||
|
||||
*static_cast< T* >( this->m_mapped ) = t;
|
||||
*static_cast< T* >( this->ptr() ) = t;
|
||||
|
||||
flush();
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
void flush() { BufferSuballocation::flush( 0, this->m_info.size ); }
|
||||
void flush() { BufferSuballocation::flush( 0, this->m_size ); }
|
||||
};
|
||||
|
||||
template < typename T >
|
||||
|
||||
31
src/engine/buffers/BufferSuballocationHandle.cpp
Normal file
31
src/engine/buffers/BufferSuballocationHandle.cpp
Normal file
@@ -0,0 +1,31 @@
|
||||
//
|
||||
// Created by kj16609 on 1/10/24.
|
||||
//
|
||||
|
||||
#include "BufferSuballocationHandle.hpp"
|
||||
|
||||
#include "Buffer.hpp"
|
||||
|
||||
namespace fgl::engine
|
||||
{
|
||||
BufferSuballocationHandle::
|
||||
BufferSuballocationHandle( Buffer& buffer, vk::DeviceSize offset, vk::DeviceSize memory_size ) :
|
||||
buffer( buffer ),
|
||||
m_size( memory_size ),
|
||||
m_offset( offset ),
|
||||
mapped( buffer.map( *this ) )
|
||||
{
|
||||
assert( memory_size != 0 && "BufferSuballocation::BufferSuballocation() called with memory_size == 0" );
|
||||
}
|
||||
|
||||
vk::Buffer BufferSuballocationHandle::getVkBuffer() const
|
||||
{
|
||||
return buffer.getVkBuffer();
|
||||
}
|
||||
|
||||
BufferSuballocationHandle::~BufferSuballocationHandle()
|
||||
{
|
||||
buffer.free( *this );
|
||||
}
|
||||
|
||||
} // namespace fgl::engine
|
||||
41
src/engine/buffers/BufferSuballocationHandle.hpp
Normal file
41
src/engine/buffers/BufferSuballocationHandle.hpp
Normal file
@@ -0,0 +1,41 @@
|
||||
//
|
||||
// Created by kj16609 on 1/10/24.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vulkan/vulkan.hpp>
|
||||
|
||||
namespace fgl::engine
|
||||
{
|
||||
class Buffer;
|
||||
|
||||
struct BufferSuballocationHandle
|
||||
{
|
||||
Buffer& buffer;
|
||||
|
||||
//! Size of the buffer this suballocation is a part of
|
||||
vk::DeviceSize m_size;
|
||||
|
||||
//! Offset within buffer
|
||||
vk::DeviceSize m_offset;
|
||||
|
||||
void* mapped { nullptr };
|
||||
|
||||
public:
|
||||
|
||||
BufferSuballocationHandle() = delete;
|
||||
BufferSuballocationHandle( const BufferSuballocationHandle& ) = delete;
|
||||
BufferSuballocationHandle& operator=( const BufferSuballocationHandle& ) = delete;
|
||||
BufferSuballocationHandle( BufferSuballocationHandle&& ) = delete;
|
||||
BufferSuballocationHandle& operator=( BufferSuballocationHandle&& ) = delete;
|
||||
|
||||
BufferSuballocationHandle( Buffer& buffer, vk::DeviceSize memory_size, vk::DeviceSize offset );
|
||||
~BufferSuballocationHandle();
|
||||
|
||||
vk::Buffer getVkBuffer() const;
|
||||
|
||||
vk::DeviceSize getOffset() const { return m_offset; }
|
||||
};
|
||||
|
||||
} // namespace fgl::engine
|
||||
28
src/engine/buffers/SuballocationView.cpp
Normal file
28
src/engine/buffers/SuballocationView.cpp
Normal file
@@ -0,0 +1,28 @@
|
||||
//
|
||||
// Created by kj16609 on 1/10/24.
|
||||
//
|
||||
|
||||
#include "SuballocationView.hpp"
|
||||
|
||||
#include "BufferSuballocation.hpp"
|
||||
#include "BufferSuballocationHandle.hpp"
|
||||
|
||||
namespace fgl::engine
|
||||
{
|
||||
|
||||
vk::Buffer SuballocationView::getVkBuffer()
|
||||
{
|
||||
return m_suballocation->getVkBuffer();
|
||||
}
|
||||
|
||||
vk::DeviceSize SuballocationView::getOffset()
|
||||
{
|
||||
return m_offset + m_suballocation->getOffset();
|
||||
}
|
||||
|
||||
void SuballocationView::setOffset( vk::DeviceSize offset )
|
||||
{
|
||||
m_offset = offset;
|
||||
}
|
||||
|
||||
} // namespace fgl::engine
|
||||
38
src/engine/buffers/SuballocationView.hpp
Normal file
38
src/engine/buffers/SuballocationView.hpp
Normal file
@@ -0,0 +1,38 @@
|
||||
//
|
||||
// Created by kj16609 on 1/10/24.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vulkan/vulkan.hpp>
|
||||
|
||||
namespace fgl::engine
|
||||
{
|
||||
|
||||
struct BufferSuballocationHandle;
|
||||
|
||||
class SuballocationView
|
||||
{
|
||||
std::shared_ptr< BufferSuballocationHandle > m_suballocation;
|
||||
vk::DeviceSize m_offset;
|
||||
vk::DeviceSize m_size;
|
||||
|
||||
public:
|
||||
|
||||
void setOffset( vk::DeviceSize offset );
|
||||
|
||||
SuballocationView(
|
||||
std::shared_ptr< BufferSuballocationHandle > handle, vk::DeviceSize offset, vk::DeviceSize size ) :
|
||||
m_suballocation( handle ),
|
||||
m_offset( offset ),
|
||||
m_size( size )
|
||||
{}
|
||||
|
||||
//! Returns the buffer
|
||||
vk::Buffer getVkBuffer();
|
||||
|
||||
//! Returns the offset of this view within the buffer
|
||||
vk::DeviceSize getOffset();
|
||||
};
|
||||
|
||||
} // namespace fgl::engine
|
||||
@@ -24,18 +24,21 @@ namespace fgl::engine
|
||||
BufferVector() = delete;
|
||||
|
||||
BufferVector( Buffer& buffer, std::uint32_t count, std::uint32_t stride ) :
|
||||
BufferSuballocation( buffer, count * stride, 1 ),
|
||||
BufferSuballocation( buffer.suballocate( count * stride ) ),
|
||||
m_count( count ),
|
||||
m_stride( stride )
|
||||
{}
|
||||
|
||||
BufferVector( const BufferVector& ) = delete;
|
||||
BufferVector( BufferVector&& ) = delete;
|
||||
BufferVector( BufferVector&& ) = default;
|
||||
|
||||
BufferVector& operator=( BufferVector&& other )
|
||||
{
|
||||
this->m_buffer.free( this->m_info );
|
||||
this->m_info = other.m_info;
|
||||
m_count = other.m_count;
|
||||
m_stride = other.m_stride;
|
||||
|
||||
BufferSuballocation::operator=( std::move( other ) );
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
@@ -44,9 +47,9 @@ namespace fgl::engine
|
||||
public:
|
||||
|
||||
//! Returns the offset count from the start of the buffer to the first element
|
||||
[[nodiscard]] std::uint32_t getOffsetCount()
|
||||
[[nodiscard]] std::uint32_t getOffsetCount() const
|
||||
{
|
||||
return static_cast< std::uint32_t >( this->m_info.offset / m_stride );
|
||||
return static_cast< std::uint32_t >( this->m_offset / m_stride );
|
||||
}
|
||||
|
||||
[[nodiscard]] std::uint32_t count() const noexcept { return m_count; }
|
||||
@@ -55,9 +58,9 @@ namespace fgl::engine
|
||||
|
||||
void resize( const std::uint32_t count )
|
||||
{
|
||||
BufferVector other { this->m_buffer, count, m_stride };
|
||||
BufferVector other { this->getBuffer(), count, m_stride };
|
||||
|
||||
Device::getInstance().copyBuffer( this->m_buffer, other.m_buffer, 0, 0, this->size() );
|
||||
Device::getInstance().copyBuffer( this->getBuffer(), other.getBuffer(), 0, 0, this->size() );
|
||||
|
||||
*this = std::move( other );
|
||||
}
|
||||
|
||||
@@ -36,9 +36,9 @@ namespace fgl::engine
|
||||
assert( m_staging_buffer && "DeviceVector::stage() called without staging buffer" );
|
||||
|
||||
//Copy
|
||||
vk::BufferCopy copy_region { m_staging_buffer->offset(), this->m_info.offset, this->m_info.size };
|
||||
vk::BufferCopy copy_region { m_staging_buffer->offset(), this->m_offset, this->m_size };
|
||||
|
||||
command_buffer.copyBuffer( m_staging_buffer->getVkBuffer(), this->m_buffer.getVkBuffer(), copy_region );
|
||||
command_buffer.copyBuffer( m_staging_buffer->getVkBuffer(), this->getVkBuffer(), copy_region );
|
||||
}
|
||||
|
||||
void dropStaging() { m_staging_buffer.reset(); }
|
||||
|
||||
@@ -40,7 +40,7 @@ namespace fgl::engine
|
||||
{
|
||||
if ( this->m_stride == sizeof( T ) )
|
||||
{
|
||||
std::memcpy( this->m_mapped, vec.data(), this->count() * sizeof( T ) );
|
||||
std::memcpy( this->ptr(), vec.data(), this->count() * sizeof( T ) );
|
||||
}
|
||||
else
|
||||
assert( "Stride must be equal to sizeof(T)" );
|
||||
@@ -73,7 +73,7 @@ namespace fgl::engine
|
||||
|
||||
if ( this->m_stride == sizeof( T ) )
|
||||
{
|
||||
std::memcpy( this->m_mapped, vec.data(), this->count() * sizeof( T ) );
|
||||
std::memcpy( this->ptr(), vec.data(), this->count() * sizeof( T ) );
|
||||
}
|
||||
else
|
||||
assert( "Stride must be equal to sizeof(T)" );
|
||||
|
||||
@@ -10,8 +10,6 @@
|
||||
|
||||
namespace fgl::engine
|
||||
{
|
||||
struct BufferSuballocationInfo;
|
||||
|
||||
template < typename T > concept is_buffer = std::same_as< T, Buffer >;
|
||||
|
||||
template < typename T > concept is_buffer_ref = is_buffer< std::remove_reference_t< T > >;
|
||||
|
||||
@@ -72,8 +72,6 @@ namespace fgl::engine
|
||||
std::vector< vk::DrawIndexedIndirectCommand > draw_commands;
|
||||
std::vector< ModelMatrixInfo > model_matrices;
|
||||
|
||||
GameObject* previous_obj { nullptr };
|
||||
|
||||
for ( auto& [ key, obj ] : info.game_objects )
|
||||
{
|
||||
TracyVkZone( info.tracy_ctx, command_buffer, "Render game object" );
|
||||
@@ -82,25 +80,20 @@ namespace fgl::engine
|
||||
ModelMatrixInfo matrix_info { .model_matrix = obj.transform.mat4(),
|
||||
.normal_matrix = obj.transform.normalMatrix() };
|
||||
|
||||
std::vector< vk::DrawIndexedIndirectCommand > cmds { obj.model->getDrawCommand( model_matrices.size() ) };
|
||||
|
||||
model_matrices.push_back( matrix_info );
|
||||
|
||||
//Does the previous draw_command use the same model as us?
|
||||
if ( !draw_commands.empty() && previous_obj && previous_obj->model == obj.model )
|
||||
{
|
||||
//If so, we can just increment the instance count
|
||||
draw_commands.back().instanceCount++;
|
||||
}
|
||||
else
|
||||
{
|
||||
//Otherwise we need to create a new draw command
|
||||
draw_commands.push_back( obj.model->getDrawCommand() );
|
||||
//TODO: Implement batching
|
||||
|
||||
draw_commands.back().firstInstance = static_cast< std::uint32_t >( model_matrices.size() - 1 );
|
||||
}
|
||||
//draw_commands.push_back( obj.model->getDrawCommand() );
|
||||
|
||||
previous_obj = &obj;
|
||||
//Push back draw commands
|
||||
draw_commands.insert( draw_commands.end(), cmds.begin(), cmds.end() );
|
||||
}
|
||||
|
||||
assert( draw_commands.size() > 0 && "No draw commands to render" );
|
||||
|
||||
auto& draw_parameter_buffer { m_draw_parameter_buffers[ info.frame_idx ] };
|
||||
|
||||
if ( draw_parameter_buffer == nullptr || draw_parameter_buffer->count() != draw_commands.size() )
|
||||
|
||||
9
src/objectloaders/CMakeLists.txt
Normal file
9
src/objectloaders/CMakeLists.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
|
||||
file(GLOB_RECURSE CPP_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/**.cpp")
|
||||
|
||||
add_library(FGLLoader STATIC ${CPP_SOURCES})
|
||||
|
||||
target_include_directories(FGLLoader PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/..)
|
||||
set_target_properties(FGLLoader PROPERTIES COMPILE_FLAGS ${FGL_CHILD_FLAGS})
|
||||
|
||||
|
||||
9
src/objectloaders/gltf_impl.cpp
Normal file
9
src/objectloaders/gltf_impl.cpp
Normal file
@@ -0,0 +1,9 @@
|
||||
//
|
||||
// Created by kj16609 on 1/10/24.
|
||||
//
|
||||
|
||||
#define TINYGLTF_IMPLEMENTATION
|
||||
#define STB_IMAGE_IMPLEMENTATION
|
||||
#define STB_IMAGE_WRITE_IMPLEMENTATION
|
||||
#define TINYGLTF_USE_CPP14
|
||||
#include "tiny_gltf.h"
|
||||
26753
src/objectloaders/json.hpp
Normal file
26753
src/objectloaders/json.hpp
Normal file
File diff suppressed because it is too large
Load Diff
6
src/objectloaders/obj_impl.cpp
Normal file
6
src/objectloaders/obj_impl.cpp
Normal file
@@ -0,0 +1,6 @@
|
||||
//
|
||||
// Created by kj16609 on 1/10/24.
|
||||
//
|
||||
|
||||
#define TINYOBJLOADER_IMPLEMENTATION
|
||||
#include "tiny_obj_loader.h"
|
||||
9182
src/objectloaders/stb_image.h
Normal file
9182
src/objectloaders/stb_image.h
Normal file
File diff suppressed because it is too large
Load Diff
2170
src/objectloaders/stb_image_write.h
Normal file
2170
src/objectloaders/stb_image_write.h
Normal file
File diff suppressed because it is too large
Load Diff
8508
src/objectloaders/tiny_gltf.h
Normal file
8508
src/objectloaders/tiny_gltf.h
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user