Commit 0852c893 authored by Florian Oetke's avatar Florian Oetke
Browse files

resource management for descriptor-sets

parent 994cec54
......@@ -18,6 +18,7 @@ file(GLOB_RECURSE HEADER_FILES
add_library(mirrage_graphic STATIC
src/context.cpp
src/descriptor_sets.cpp
src/device.cpp
src/device_memory.cpp
src/ktx_parser.cpp
......
#pragma once
#include <vulkan/vulkan.hpp>
#include <initializer_list>
#include <memory>
#include <mutex>
#include <vector>
namespace mirrage::graphic {
// fwd:
class Device;
class Descriptor_pool;
using Descriptor_pool_chunk_index = std::int_fast32_t;
class DescriptorSet {
public:
DescriptorSet() = default;
DescriptorSet(DescriptorSet&&) noexcept;
DescriptorSet& operator=(DescriptorSet&&) noexcept;
~DescriptorSet();
DescriptorSet(const DescriptorSet&) = delete;
DescriptorSet& operator=(const DescriptorSet&) = delete;
auto get() const noexcept { return _set; }
auto get_ptr() const noexcept { return &_set; }
auto operator*() const noexcept { return get(); }
explicit operator bool() const noexcept { return get(); }
private:
friend class Descriptor_pool;
DescriptorSet(Descriptor_pool*,
Descriptor_pool_chunk_index,
vk::DescriptorSet,
std::uint32_t reserved_bindings);
Descriptor_pool* _pool = nullptr;
Descriptor_pool_chunk_index _chunk;
vk::DescriptorSet _set;
std::uint32_t _reserved_bindings;
};
class Descriptor_pool {
public:
// Allocates a descriptor from the pool
// \param bindings: The estimated number of bindings required by the layout
auto create_descriptor(vk::DescriptorSetLayout, std::uint32_t bindings) -> DescriptorSet;
private:
friend class Device;
friend class DescriptorSet;
vk::Device _device;
std::uint32_t _chunk_size;
std::vector<vk::DescriptorPoolSize> _pool_sizes;
std::vector<vk::UniqueDescriptorPool> _chunks;
std::vector<std::uint32_t> _chunks_free_count;
mutable std::mutex _mutex;
Descriptor_pool(vk::Device device,
std::uint32_t chunk_size,
std::initializer_list<vk::DescriptorType> types);
Descriptor_pool(const Descriptor_pool&) = delete;
Descriptor_pool(Descriptor_pool&&) = delete;
Descriptor_pool& operator=(const Descriptor_pool&) = delete;
Descriptor_pool& operator=(Descriptor_pool&&) = delete;
auto _create_descriptor_pool() -> vk::DescriptorPool;
void _free_descriptor_set(vk::DescriptorSet& set,
Descriptor_pool_chunk_index chunk,
std::uint32_t reserved_bindings);
};
class Image_descriptor_set_layout {
public:
Image_descriptor_set_layout(graphic::Device& device,
vk::Sampler sampler,
std::uint32_t image_number,
vk::ShaderStageFlags = vk::ShaderStageFlagBits::eFragment);
auto layout() const noexcept { return *_layout; }
auto operator*() const noexcept { return *_layout; }
auto create_set(Descriptor_pool& pool, std::initializer_list<vk::ImageView> images) -> DescriptorSet;
void update_set(vk::DescriptorSet, std::initializer_list<vk::ImageView>);
private:
graphic::Device& _device;
vk::Sampler _sampler;
std::uint32_t _image_number;
vk::UniqueDescriptorSetLayout _layout;
};
} // namespace mirrage::graphic
#pragma once
#include <mirrage/graphic/descriptor_sets.hpp>
#include <mirrage/utils/maybe.hpp>
#include <mirrage/utils/purgatory.hpp>
#include <mirrage/utils/ring_buffer.hpp>
......@@ -26,7 +28,6 @@ namespace mirrage::graphic {
// fwd:
class Device;
class Render_pass_builder;
class Device;
class Render_pass;
class Render_pass_builder;
class Subpass_builder;
......@@ -105,83 +106,6 @@ namespace mirrage::graphic {
Command_buffer_pool(const vk::Device& device, vk::UniqueCommandPool pool);
};
class DescriptorSet {
public:
DescriptorSet() = default;
DescriptorSet(vk::Device, vk::DescriptorPool, vk::DescriptorSet, std::mutex&);
DescriptorSet(DescriptorSet&&) noexcept;
DescriptorSet& operator=(DescriptorSet&&) noexcept;
~DescriptorSet();
DescriptorSet(const DescriptorSet&) = delete;
DescriptorSet& operator=(const DescriptorSet&) = delete;
auto get() const noexcept { return _set; }
auto get_ptr() const noexcept { return &_set; }
auto operator*() const noexcept { return get(); }
explicit operator bool() const noexcept { return get(); }
private:
vk::Device _device;
vk::DescriptorPool _pool;
vk::DescriptorSet _set;
std::mutex* _deletion_mutex = nullptr;
void _destroy();
};
// TODO: actual management of DescriptorSet-Objects
class Descriptor_pool {
public:
auto create_descriptor(vk::DescriptorSetLayout) -> DescriptorSet;
private:
friend class Device;
vk::Device _device;
std::uint32_t _chunk_size;
std::vector<vk::DescriptorPoolSize> _pool_sizes;
std::vector<vk::UniqueDescriptorPool> _pools;
std::vector<std::uint32_t> _free;
mutable std::mutex _mutex;
Descriptor_pool(vk::Device device,
std::uint32_t chunk_size,
std::initializer_list<vk::DescriptorType> types);
Descriptor_pool(const Descriptor_pool&) = delete;
Descriptor_pool(Descriptor_pool&&) = delete;
Descriptor_pool& operator=(const Descriptor_pool&) = delete;
Descriptor_pool& operator=(Descriptor_pool&&) = delete;
auto create_descriptor_pool() -> vk::DescriptorPool;
};
class Image_descriptor_set_layout {
public:
Image_descriptor_set_layout(graphic::Device& device,
vk::Sampler sampler,
std::uint32_t image_number,
vk::ShaderStageFlags = vk::ShaderStageFlagBits::eFragment);
auto layout() const noexcept { return *_layout; }
auto operator*() const noexcept { return *_layout; }
auto create_set(Descriptor_pool& pool, std::initializer_list<vk::ImageView> images) -> DescriptorSet {
auto set = pool.create_descriptor(layout());
update_set(*set, images);
return set;
}
void update_set(vk::DescriptorSet, std::initializer_list<vk::ImageView>);
private:
graphic::Device& _device;
vk::Sampler _sampler;
std::uint32_t _image_number;
vk::UniqueDescriptorSetLayout _layout;
};
class Fence {
public:
......
#include <mirrage/graphic/descriptor_sets.hpp>
#include <mirrage/graphic/device.hpp>
#include <mirrage/utils/log.hpp>
#include <gsl/gsl>
namespace mirrage::graphic {
DescriptorSet::DescriptorSet(Descriptor_pool* pool,
Descriptor_pool_chunk_index chunk_index,
vk::DescriptorSet set,
std::uint32_t reserved_bindings)
: _pool(pool), _chunk(chunk_index), _set(set), _reserved_bindings(reserved_bindings) {}
DescriptorSet::DescriptorSet(DescriptorSet&& rhs) noexcept
: _pool(rhs._pool)
, _chunk(rhs._chunk)
, _set(std::move(rhs._set))
, _reserved_bindings(std::move(rhs._reserved_bindings)) {
rhs._pool = nullptr;
}
DescriptorSet& DescriptorSet::operator=(DescriptorSet&& rhs) noexcept {
MIRRAGE_INVARIANT(this != &rhs, "move to self");
if(_pool) {
_pool->_free_descriptor_set(_set, _chunk, _reserved_bindings);
}
_pool = std::move(rhs._pool);
_chunk = std::move(rhs._chunk);
_set = std::move(rhs._set);
_reserved_bindings = std::move(rhs._reserved_bindings);
rhs._pool = nullptr;
return *this;
}
DescriptorSet::~DescriptorSet() {
if(_pool) {
_pool->_free_descriptor_set(_set, _chunk, _reserved_bindings);
}
}
auto Descriptor_pool::create_descriptor(vk::DescriptorSetLayout layout, std::uint32_t bindings)
-> DescriptorSet {
MIRRAGE_INVARIANT(bindings > 0, "No bindings required, really?!?!");
MIRRAGE_INVARIANT(bindings < _chunk_size,
"Requested number of bindings is higher than chunk size ("
<< bindings << " < " << _chunk_size << "). Reevaluate life choices?");
auto lock = std::scoped_lock{_mutex};
auto retries = 2;
do {
retries--;
auto free_iter = std::find_if(begin(_chunks_free_count),
end(_chunks_free_count),
[&](auto count) { return count >= bindings; });
if(free_iter == _chunks_free_count.end()) {
_chunks_free_count.emplace_back(_chunk_size);
_create_descriptor_pool();
free_iter = _chunks_free_count.end() - 1;
}
*free_iter -= bindings;
auto pool_idx = std::distance(_chunks_free_count.begin(), free_iter);
auto& pool = _chunks.at(gsl::narrow<std::size_t>(pool_idx));
auto alloc_info = vk::DescriptorSetAllocateInfo{*pool, 1, &layout};
auto c_alloc_info = VkDescriptorSetAllocateInfo(alloc_info);
auto desc_set = VkDescriptorSet{};
auto ret = vkAllocateDescriptorSets(_device, &c_alloc_info, &desc_set);
if(ret == VK_SUCCESS) {
return DescriptorSet(this, pool_idx, desc_set, bindings);
} else {
MIRRAGE_WARN("Allocated a new descriptorSetPool (shouldn't happen too often!).");
*free_iter = 0;
}
} while(retries > 0);
MIRRAGE_FAIL("Unable to allocate descriptor set!");
}
Descriptor_pool::Descriptor_pool(vk::Device device,
std::uint32_t chunk_size,
std::initializer_list<vk::DescriptorType> types)
: _device(device), _chunk_size(chunk_size) {
_pool_sizes.reserve(types.size());
for(auto type : types) {
_pool_sizes.emplace_back(type, chunk_size);
}
_create_descriptor_pool();
}
auto Descriptor_pool::_create_descriptor_pool() -> vk::DescriptorPool {
auto pool = _device.createDescriptorPoolUnique(
vk::DescriptorPoolCreateInfo{vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet,
_chunk_size,
gsl::narrow<std::uint32_t>(_pool_sizes.size()),
_pool_sizes.data()});
auto pool_ref = *pool;
_chunks.emplace_back(std::move(pool));
return pool_ref;
}
void Descriptor_pool::_free_descriptor_set(vk::DescriptorSet& set,
Descriptor_pool_chunk_index chunk,
std::uint32_t reserved_bindings) {
auto lock = std::scoped_lock{_mutex};
auto idx = gsl::narrow<std::size_t>(chunk);
auto pool = *_chunks.at(idx);
auto c_set = VkDescriptorSet(set);
vkFreeDescriptorSets(_device, pool, 1, &c_set);
set = vk::DescriptorSet{};
auto& free_count = _chunks_free_count.at(idx);
free_count += reserved_bindings;
if(free_count > _chunk_size) {
MIRRAGE_ERROR("Free-count of descriptor pool chunk is higher than max (memory corruption?)!");
}
}
namespace {
auto create_layout(graphic::Device& device,
vk::Sampler sampler,
std::uint32_t image_number,
vk::ShaderStageFlags stages) {
auto bindings = std::vector<vk::DescriptorSetLayoutBinding>();
bindings.reserve(image_number);
auto samplers = std::vector<vk::Sampler>(image_number, sampler);
for(auto i = std::uint32_t(0); i < image_number; i++) {
bindings.emplace_back(
i, vk::DescriptorType::eCombinedImageSampler, 1, stages, samplers.data());
}
return device.create_descriptor_set_layout(bindings);
}
} // namespace
Image_descriptor_set_layout::Image_descriptor_set_layout(graphic::Device& device,
vk::Sampler sampler,
std::uint32_t image_number,
vk::ShaderStageFlags stages)
: _device(device)
, _sampler(sampler)
, _image_number(image_number)
, _layout(create_layout(device, sampler, image_number, stages)) {}
void Image_descriptor_set_layout::update_set(vk::DescriptorSet set,
std::initializer_list<vk::ImageView> images) {
MIRRAGE_INVARIANT(images.size() <= _image_number,
"Number of images (" << images.size() << ") doesn't match size of descriptor set ("
<< _image_number << ")");
auto desc_images = std::vector<vk::DescriptorImageInfo>();
desc_images.reserve(images.size());
for(auto& image : images) {
desc_images.emplace_back(_sampler, image, vk::ImageLayout::eShaderReadOnlyOptimal);
}
auto desc_writes = std::vector<vk::WriteDescriptorSet>();
desc_writes.reserve(images.size());
for(auto& desc_image : desc_images) {
desc_writes.emplace_back(set,
desc_writes.size(),
0,
1,
vk::DescriptorType::eCombinedImageSampler,
&desc_image,
nullptr);
}
_device.vk_device()->updateDescriptorSets(
gsl::narrow<std::uint32_t>(desc_writes.size()), desc_writes.data(), 0, nullptr);
}
auto Image_descriptor_set_layout::create_set(Descriptor_pool& pool,
std::initializer_list<vk::ImageView> images)
-> DescriptorSet {
auto set = pool.create_descriptor(layout(), _image_number);
update_set(*set, images);
return set;
}
} // namespace mirrage::graphic
......@@ -233,11 +233,14 @@ namespace mirrage::graphic {
auto image = _device->createImageUnique(info);
auto mem = [&] {
// called first because bindImageMemory without getImageMemoryRequirements is a warning
// in the validation layers
auto mem_req = _device->getImageMemoryRequirements(*image);
if(dedicated) {
return _memory_allocator.alloc_dedicated(*image, host_visible).get_or_throw();
}
auto mem_req = _device->getImageMemoryRequirements(*image);
return _memory_allocator
.alloc(mem_req.size, mem_req.alignment, mem_req.memoryTypeBits, host_visible, lifetime)
.get_or_throw();
......
......@@ -20,160 +20,6 @@ namespace mirrage::graphic {
}
DescriptorSet::DescriptorSet(vk::Device device,
vk::DescriptorPool pool,
vk::DescriptorSet set,
std::mutex& deletion_mutex)
: _device(device), _pool(pool), _set(set), _deletion_mutex(&deletion_mutex) {}
DescriptorSet::DescriptorSet(DescriptorSet&& rhs) noexcept
: _device(std::move(rhs._device))
, _pool(std::move(rhs._pool))
, _set(std::move(rhs._set))
, _deletion_mutex(std::move(rhs._deletion_mutex)) {
rhs._deletion_mutex = nullptr;
}
DescriptorSet& DescriptorSet::operator=(DescriptorSet&& rhs) noexcept {
MIRRAGE_INVARIANT(this != &rhs, "move to self");
_destroy();
_device = std::move(rhs._device);
_pool = std::move(rhs._pool);
_set = std::move(rhs._set);
_deletion_mutex = std::move(rhs._deletion_mutex);
rhs._deletion_mutex = nullptr;
return *this;
}
DescriptorSet::~DescriptorSet() { _destroy(); }
void DescriptorSet::_destroy() {
if(_deletion_mutex) {
auto lock = std::scoped_lock{*_deletion_mutex};
vkFreeDescriptorSets(_device, _pool, 1, reinterpret_cast<VkDescriptorSet*>(&_set));
_set = vk::DescriptorSet{};
_deletion_mutex = nullptr;
}
}
auto Descriptor_pool::create_descriptor(vk::DescriptorSetLayout layout) -> DescriptorSet {
auto lock = std::scoped_lock{_mutex};
auto retries = 2;
do {
retries--;
auto free_iter = std::find_if(begin(_free), end(_free), [](auto count) { return count > 0; });
if(free_iter == _free.end()) {
_free.emplace_back(_chunk_size);
create_descriptor_pool();
free_iter = _free.end() - 1;
}
*free_iter -= 1;
auto& pool = _pools.at(gsl::narrow<std::size_t>(std::distance(_free.begin(), free_iter)));
auto alloc_info = vk::DescriptorSetAllocateInfo{*pool, 1, &layout};
auto desc_set = VkDescriptorSet{};
auto ret = vkAllocateDescriptorSets(
_device, reinterpret_cast<VkDescriptorSetAllocateInfo*>(&alloc_info), &desc_set);
if(ret == VK_SUCCESS) {
return DescriptorSet(_device, *pool, desc_set, _mutex);
} else if(ret != VK_SUCCESS) {
MIRRAGE_INFO("Allocated a new descriptorSetPool (shouldn't happen too often!).");
*free_iter = 0;
}
} while(retries > 0);
MIRRAGE_FAIL("Unable to allocate descriptor set!");
}
Descriptor_pool::Descriptor_pool(vk::Device device,
std::uint32_t chunk_size,
std::initializer_list<vk::DescriptorType> types)
: _device(device), _chunk_size(chunk_size) {
_pool_sizes.reserve(types.size());
for(auto type : types) {
_pool_sizes.emplace_back(type, chunk_size);
}
create_descriptor_pool();
}
auto Descriptor_pool::create_descriptor_pool() -> vk::DescriptorPool {
auto pool = _device.createDescriptorPoolUnique(
vk::DescriptorPoolCreateInfo{vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet,
_chunk_size,
gsl::narrow<std::uint32_t>(_pool_sizes.size()),
_pool_sizes.data()});
auto pool_ref = *pool;
_pools.emplace_back(std::move(pool));
return pool_ref;
}
namespace {
auto create_layout(graphic::Device& device,
vk::Sampler sampler,
std::uint32_t image_number,
vk::ShaderStageFlags stages) {
auto bindings = std::vector<vk::DescriptorSetLayoutBinding>();
bindings.reserve(image_number);
auto samplers = std::vector<vk::Sampler>(image_number, sampler);
for(auto i = std::uint32_t(0); i < image_number; i++) {
bindings.emplace_back(
i, vk::DescriptorType::eCombinedImageSampler, 1, stages, samplers.data());
}
return device.create_descriptor_set_layout(bindings);
}
} // namespace
Image_descriptor_set_layout::Image_descriptor_set_layout(graphic::Device& device,
vk::Sampler sampler,
std::uint32_t image_number,
vk::ShaderStageFlags stages)
: _device(device)
, _sampler(sampler)
, _image_number(image_number)
, _layout(create_layout(device, sampler, image_number, stages)) {}
void Image_descriptor_set_layout::update_set(vk::DescriptorSet set,
std::initializer_list<vk::ImageView> images) {
MIRRAGE_INVARIANT(images.size() <= _image_number,
"Number of images (" << images.size() << ") doesn't match size of descriptor set ("
<< _image_number << ")");
auto desc_images = std::vector<vk::DescriptorImageInfo>();
desc_images.reserve(images.size());
for(auto& image : images) {
desc_images.emplace_back(_sampler, image, vk::ImageLayout::eShaderReadOnlyOptimal);
}
auto desc_writes = std::vector<vk::WriteDescriptorSet>();
desc_writes.reserve(images.size());
for(auto& desc_image : desc_images) {
desc_writes.emplace_back(set,
desc_writes.size(),
0,
1,
vk::DescriptorType::eCombinedImageSampler,
&desc_image,
nullptr);
}
_device.vk_device()->updateDescriptorSets(desc_writes.size(), desc_writes.data(), 0, nullptr);
}
Fence::operator bool() const { return _device.getFenceStatus(*_fence) == vk::Result::eSuccess; }
void Fence::reset() { _device.resetFences({*_fence}); }