|
|
|
@ -6,6 +6,7 @@ |
|
|
|
#include <optional>
|
|
|
|
#include <tuple>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "common/alignment.h"
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/common_types.h"
|
|
|
|
@ -16,34 +17,32 @@ |
|
|
|
|
|
|
|
namespace Vulkan { |
|
|
|
|
|
|
|
// TODO(Rodrigo): Fine tune this number
|
|
|
|
constexpr u64 ALLOC_CHUNK_SIZE = 64 * 1024 * 1024; |
|
|
|
namespace { |
|
|
|
|
|
|
|
u64 GetAllocationChunkSize(u64 required_size) { |
|
|
|
static constexpr u64 sizes[] = {16ULL << 20, 32ULL << 20, 64ULL << 20, 128ULL << 20}; |
|
|
|
auto it = std::lower_bound(std::begin(sizes), std::end(sizes), required_size); |
|
|
|
return it != std::end(sizes) ? *it : Common::AlignUp(required_size, 256ULL << 20); |
|
|
|
} |
|
|
|
|
|
|
|
} // Anonymous namespace
|
|
|
|
|
|
|
|
class VKMemoryAllocation final { |
|
|
|
public: |
|
|
|
explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory, |
|
|
|
vk::MemoryPropertyFlags properties, u64 alloc_size, u32 type) |
|
|
|
: device{device}, memory{memory}, properties{properties}, alloc_size{alloc_size}, |
|
|
|
shifted_type{ShiftType(type)}, is_mappable{properties & |
|
|
|
vk::MemoryPropertyFlagBits::eHostVisible} { |
|
|
|
if (is_mappable) { |
|
|
|
const auto dev = device.GetLogical(); |
|
|
|
const auto& dld = device.GetDispatchLoader(); |
|
|
|
base_address = static_cast<u8*>(dev.mapMemory(memory, 0, alloc_size, {}, dld)); |
|
|
|
} |
|
|
|
} |
|
|
|
vk::MemoryPropertyFlags properties, u64 allocation_size, u32 type) |
|
|
|
: device{device}, memory{memory}, properties{properties}, allocation_size{allocation_size}, |
|
|
|
shifted_type{ShiftType(type)} {} |
|
|
|
|
|
|
|
~VKMemoryAllocation() { |
|
|
|
const auto dev = device.GetLogical(); |
|
|
|
const auto& dld = device.GetDispatchLoader(); |
|
|
|
if (is_mappable) |
|
|
|
dev.unmapMemory(memory, dld); |
|
|
|
dev.free(memory, nullptr, dld); |
|
|
|
} |
|
|
|
|
|
|
|
VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) { |
|
|
|
auto found = TryFindFreeSection(free_iterator, alloc_size, static_cast<u64>(commit_size), |
|
|
|
static_cast<u64>(alignment)); |
|
|
|
auto found = TryFindFreeSection(free_iterator, allocation_size, |
|
|
|
static_cast<u64>(commit_size), static_cast<u64>(alignment)); |
|
|
|
if (!found) { |
|
|
|
found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size), |
|
|
|
static_cast<u64>(alignment)); |
|
|
|
@ -52,8 +51,7 @@ public: |
|
|
|
return nullptr; |
|
|
|
} |
|
|
|
} |
|
|
|
u8* address = is_mappable ? base_address + *found : nullptr; |
|
|
|
auto commit = std::make_unique<VKMemoryCommitImpl>(this, memory, address, *found, |
|
|
|
auto commit = std::make_unique<VKMemoryCommitImpl>(device, this, memory, *found, |
|
|
|
*found + commit_size); |
|
|
|
commits.push_back(commit.get()); |
|
|
|
|
|
|
|
@ -65,12 +63,10 @@ public: |
|
|
|
|
|
|
|
void Free(const VKMemoryCommitImpl* commit) { |
|
|
|
ASSERT(commit); |
|
|
|
const auto it = |
|
|
|
std::find_if(commits.begin(), commits.end(), |
|
|
|
[&](const auto& stored_commit) { return stored_commit == commit; }); |
|
|
|
|
|
|
|
const auto it = std::find(std::begin(commits), std::end(commits), commit); |
|
|
|
if (it == commits.end()) { |
|
|
|
LOG_CRITICAL(Render_Vulkan, "Freeing unallocated commit!"); |
|
|
|
UNREACHABLE(); |
|
|
|
UNREACHABLE_MSG("Freeing unallocated commit!"); |
|
|
|
return; |
|
|
|
} |
|
|
|
commits.erase(it); |
|
|
|
@ -88,11 +84,11 @@ private: |
|
|
|
} |
|
|
|
|
|
|
|
/// A memory allocator, it may return a free region between "start" and "end" with the solicited
|
|
|
|
/// requeriments.
|
|
|
|
/// requirements.
|
|
|
|
std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const { |
|
|
|
u64 iterator = start; |
|
|
|
while (iterator + size < end) { |
|
|
|
const u64 try_left = Common::AlignUp(iterator, alignment); |
|
|
|
u64 iterator = Common::AlignUp(start, alignment); |
|
|
|
while (iterator + size <= end) { |
|
|
|
const u64 try_left = iterator; |
|
|
|
const u64 try_right = try_left + size; |
|
|
|
|
|
|
|
bool overlap = false; |
|
|
|
@ -100,7 +96,7 @@ private: |
|
|
|
const auto [commit_left, commit_right] = commit->interval; |
|
|
|
if (try_left < commit_right && commit_left < try_right) { |
|
|
|
// There's an overlap, continue the search where the overlapping commit ends.
|
|
|
|
iterator = commit_right; |
|
|
|
iterator = Common::AlignUp(commit_right, alignment); |
|
|
|
overlap = true; |
|
|
|
break; |
|
|
|
} |
|
|
|
@ -110,6 +106,7 @@ private: |
|
|
|
return try_left; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// No free regions where found, return an empty optional.
|
|
|
|
return std::nullopt; |
|
|
|
} |
|
|
|
@ -117,12 +114,8 @@ private: |
|
|
|
const VKDevice& device; ///< Vulkan device.
|
|
|
|
const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
|
|
|
|
const vk::MemoryPropertyFlags properties; ///< Vulkan properties.
|
|
|
|
const u64 alloc_size; ///< Size of this allocation.
|
|
|
|
const u64 allocation_size; ///< Size of this allocation.
|
|
|
|
const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
|
|
|
|
const bool is_mappable; ///< Whether the allocation is mappable.
|
|
|
|
|
|
|
|
/// Base address of the mapped pointer.
|
|
|
|
u8* base_address{}; |
|
|
|
|
|
|
|
/// Hints where the next free region is likely going to be.
|
|
|
|
u64 free_iterator{}; |
|
|
|
@ -132,13 +125,15 @@ private: |
|
|
|
}; |
|
|
|
|
|
|
|
VKMemoryManager::VKMemoryManager(const VKDevice& device) |
|
|
|
: device{device}, props{device.GetPhysical().getMemoryProperties(device.GetDispatchLoader())}, |
|
|
|
is_memory_unified{GetMemoryUnified(props)} {} |
|
|
|
: device{device}, properties{device.GetPhysical().getMemoryProperties( |
|
|
|
device.GetDispatchLoader())}, |
|
|
|
is_memory_unified{GetMemoryUnified(properties)} {} |
|
|
|
|
|
|
|
VKMemoryManager::~VKMemoryManager() = default; |
|
|
|
|
|
|
|
VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool host_visible) { |
|
|
|
ASSERT(reqs.size < ALLOC_CHUNK_SIZE); |
|
|
|
VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& requirements, |
|
|
|
bool host_visible) { |
|
|
|
const u64 chunk_size = GetAllocationChunkSize(requirements.size); |
|
|
|
|
|
|
|
// When a host visible commit is asked, search for host visible and coherent, otherwise search
|
|
|
|
// for a fast device local type.
|
|
|
|
@ -147,32 +142,21 @@ VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool |
|
|
|
? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent |
|
|
|
: vk::MemoryPropertyFlagBits::eDeviceLocal; |
|
|
|
|
|
|
|
const auto TryCommit = [&]() -> VKMemoryCommit { |
|
|
|
for (auto& alloc : allocs) { |
|
|
|
if (!alloc->IsCompatible(wanted_properties, reqs.memoryTypeBits)) |
|
|
|
continue; |
|
|
|
|
|
|
|
if (auto commit = alloc->Commit(reqs.size, reqs.alignment); commit) { |
|
|
|
return commit; |
|
|
|
} |
|
|
|
} |
|
|
|
return {}; |
|
|
|
}; |
|
|
|
|
|
|
|
if (auto commit = TryCommit(); commit) { |
|
|
|
if (auto commit = TryAllocCommit(requirements, wanted_properties)) { |
|
|
|
return commit; |
|
|
|
} |
|
|
|
|
|
|
|
// Commit has failed, allocate more memory.
|
|
|
|
if (!AllocMemory(wanted_properties, reqs.memoryTypeBits, ALLOC_CHUNK_SIZE)) { |
|
|
|
// TODO(Rodrigo): Try to use host memory.
|
|
|
|
LOG_CRITICAL(Render_Vulkan, "Ran out of memory!"); |
|
|
|
UNREACHABLE(); |
|
|
|
if (!AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size)) { |
|
|
|
// TODO(Rodrigo): Handle these situations in some way like flushing to guest memory.
|
|
|
|
// Allocation has failed, panic.
|
|
|
|
UNREACHABLE_MSG("Ran out of VRAM!"); |
|
|
|
return {}; |
|
|
|
} |
|
|
|
|
|
|
|
// Commit again, this time it won't fail since there's a fresh allocation above. If it does,
|
|
|
|
// there's a bug.
|
|
|
|
auto commit = TryCommit(); |
|
|
|
auto commit = TryAllocCommit(requirements, wanted_properties); |
|
|
|
ASSERT(commit); |
|
|
|
return commit; |
|
|
|
} |
|
|
|
@ -180,8 +164,7 @@ VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& reqs, bool |
|
|
|
VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) { |
|
|
|
const auto dev = device.GetLogical(); |
|
|
|
const auto& dld = device.GetDispatchLoader(); |
|
|
|
const auto requeriments = dev.getBufferMemoryRequirements(buffer, dld); |
|
|
|
auto commit = Commit(requeriments, host_visible); |
|
|
|
auto commit = Commit(dev.getBufferMemoryRequirements(buffer, dld), host_visible); |
|
|
|
dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld); |
|
|
|
return commit; |
|
|
|
} |
|
|
|
@ -189,25 +172,23 @@ VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) { |
|
|
|
VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) { |
|
|
|
const auto dev = device.GetLogical(); |
|
|
|
const auto& dld = device.GetDispatchLoader(); |
|
|
|
const auto requeriments = dev.getImageMemoryRequirements(image, dld); |
|
|
|
auto commit = Commit(requeriments, host_visible); |
|
|
|
auto commit = Commit(dev.getImageMemoryRequirements(image, dld), host_visible); |
|
|
|
dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld); |
|
|
|
return commit; |
|
|
|
} |
|
|
|
|
|
|
|
bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, |
|
|
|
u64 size) { |
|
|
|
const u32 type = [&]() { |
|
|
|
for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) { |
|
|
|
const auto flags = props.memoryTypes[type_index].propertyFlags; |
|
|
|
const u32 type = [&] { |
|
|
|
for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) { |
|
|
|
const auto flags = properties.memoryTypes[type_index].propertyFlags; |
|
|
|
if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) { |
|
|
|
// The type matches in type and in the wanted properties.
|
|
|
|
return type_index; |
|
|
|
} |
|
|
|
} |
|
|
|
LOG_CRITICAL(Render_Vulkan, "Couldn't find a compatible memory type!"); |
|
|
|
UNREACHABLE(); |
|
|
|
return 0u; |
|
|
|
UNREACHABLE_MSG("Couldn't find a compatible memory type!"); |
|
|
|
return 0U; |
|
|
|
}(); |
|
|
|
|
|
|
|
const auto dev = device.GetLogical(); |
|
|
|
@ -216,19 +197,33 @@ bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 |
|
|
|
// Try to allocate found type.
|
|
|
|
const vk::MemoryAllocateInfo memory_ai(size, type); |
|
|
|
vk::DeviceMemory memory; |
|
|
|
if (const vk::Result res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld); |
|
|
|
if (const auto res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld); |
|
|
|
res != vk::Result::eSuccess) { |
|
|
|
LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res)); |
|
|
|
return false; |
|
|
|
} |
|
|
|
allocs.push_back( |
|
|
|
allocations.push_back( |
|
|
|
std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type)); |
|
|
|
return true; |
|
|
|
} |
|
|
|
|
|
|
|
/*static*/ bool VKMemoryManager::GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& props) { |
|
|
|
for (u32 heap_index = 0; heap_index < props.memoryHeapCount; ++heap_index) { |
|
|
|
if (!(props.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) { |
|
|
|
VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& requirements, |
|
|
|
vk::MemoryPropertyFlags wanted_properties) { |
|
|
|
for (auto& allocation : allocations) { |
|
|
|
if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) { |
|
|
|
continue; |
|
|
|
} |
|
|
|
if (auto commit = allocation->Commit(requirements.size, requirements.alignment)) { |
|
|
|
return commit; |
|
|
|
} |
|
|
|
} |
|
|
|
return {}; |
|
|
|
} |
|
|
|
|
|
|
|
/*static*/ bool VKMemoryManager::GetMemoryUnified( |
|
|
|
const vk::PhysicalDeviceMemoryProperties& properties) { |
|
|
|
for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) { |
|
|
|
if (!(properties.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) { |
|
|
|
// Memory is considered unified when heaps are device local only.
|
|
|
|
return false; |
|
|
|
} |
|
|
|
@ -236,17 +231,28 @@ bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 |
|
|
|
return true; |
|
|
|
} |
|
|
|
|
|
|
|
VKMemoryCommitImpl::VKMemoryCommitImpl(VKMemoryAllocation* allocation, vk::DeviceMemory memory, |
|
|
|
u8* data, u64 begin, u64 end) |
|
|
|
: interval(std::make_pair(begin, end)), memory{memory}, allocation{allocation}, data{data} {} |
|
|
|
VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation, |
|
|
|
vk::DeviceMemory memory, u64 begin, u64 end) |
|
|
|
: device{device}, interval{begin, end}, memory{memory}, allocation{allocation} {} |
|
|
|
|
|
|
|
VKMemoryCommitImpl::~VKMemoryCommitImpl() { |
|
|
|
allocation->Free(this); |
|
|
|
} |
|
|
|
|
|
|
|
u8* VKMemoryCommitImpl::GetData() const { |
|
|
|
ASSERT_MSG(data != nullptr, "Trying to access an unmapped commit."); |
|
|
|
return data; |
|
|
|
MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const { |
|
|
|
const auto dev = device.GetLogical(); |
|
|
|
const auto address = reinterpret_cast<u8*>( |
|
|
|
dev.mapMemory(memory, interval.first + offset_, size, {}, device.GetDispatchLoader())); |
|
|
|
return MemoryMap{this, address}; |
|
|
|
} |
|
|
|
|
|
|
|
void VKMemoryCommitImpl::Unmap() const { |
|
|
|
const auto dev = device.GetLogical(); |
|
|
|
dev.unmapMemory(memory, device.GetDispatchLoader()); |
|
|
|
} |
|
|
|
|
|
|
|
MemoryMap VKMemoryCommitImpl::Map() const { |
|
|
|
return Map(interval.second - interval.first); |
|
|
|
} |
|
|
|
|
|
|
|
} // namespace Vulkan
|