blob: 29602ed92a1321cd0c0f651213e6dd31fd6f94d7 [file] [log] [blame]
/*
* Copyright (c) 2023-2025 The Khronos Group Inc.
* Copyright (c) 2023-2025 Valve Corporation
* Copyright (c) 2023-2025 LunarG, Inc.
* Copyright (c) 2023-2025 Collabora, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*/
#include <thread>
#include <vector>
#include "../framework/layer_validation_tests.h"
#ifndef VK_USE_PLATFORM_WIN32_KHR
#include <sys/mman.h>
#endif
class PositiveMemory : public VkLayerTest {};
TEST_F(PositiveMemory, MemoryDecompression) {
AddRequiredExtensions(VK_EXT_MEMORY_DECOMPRESSION_EXTENSION_NAME);
AddRequiredExtensions(VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME);
AddRequiredFeature(vkt::Feature::synchronization2);
AddRequiredExtensions(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME);
AddRequiredFeature(vkt::Feature::memoryDecompression);
AddRequiredFeature(vkt::Feature::bufferDeviceAddress);
RETURN_IF_SKIP(Init());
VkPhysicalDeviceMemoryDecompressionPropertiesEXT mem_decomp_props = vku::InitStructHelper();
GetPhysicalDeviceProperties2(mem_decomp_props);
if ((mem_decomp_props.decompressionMethods & VK_MEMORY_DECOMPRESSION_METHOD_GDEFLATE_1_0_BIT_EXT) == 0) {
GTEST_SKIP() << "GDeflate decompression method not supported";
}
static const uint8_t kGdeflate_data[] = {0x47, 0x44, 0x46, 0x4c, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00,
0x00, 0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88};
const std::vector<uint8_t> compressed(std::begin(kGdeflate_data), std::end(kGdeflate_data));
VkDeviceSize buffer_size = static_cast<VkDeviceSize>(compressed.size());
VkDeviceSize decompressed_size = 64;
VkBufferUsageFlags2CreateInfo src_usage2 = vku::InitStructHelper();
src_usage2.usage = VK_BUFFER_USAGE_2_MEMORY_DECOMPRESSION_BIT_EXT;
vkt::Buffer src_buffer(*m_device, buffer_size, src_usage2, vkt::device_address);
VkBufferUsageFlags2CreateInfo dst_usage2 = vku::InitStructHelper();
dst_usage2.usage = VK_BUFFER_USAGE_2_MEMORY_DECOMPRESSION_BIT_EXT;
vkt::Buffer dst_buffer(*m_device, decompressed_size, dst_usage2, vkt::device_address);
void* p = src_buffer.Memory().Map();
std::memcpy(p, compressed.data(), compressed.size());
VkDecompressMemoryRegionEXT decompress_region = {};
decompress_region.srcAddress = src_buffer.Address();
decompress_region.dstAddress = dst_buffer.Address();
decompress_region.compressedSize = buffer_size;
decompress_region.decompressedSize = decompressed_size;
VkDecompressMemoryInfoEXT decompress_info = vku::InitStructHelper();
decompress_info.decompressionMethod = VK_MEMORY_DECOMPRESSION_METHOD_GDEFLATE_1_0_BIT_EXT;
decompress_info.regionCount = 1;
decompress_info.pRegions = &decompress_region;
m_command_buffer.Begin();
VkMemoryBarrier2 mem_barrier = vku::InitStructHelper();
mem_barrier.srcAccessMask = VK_ACCESS_2_MEMORY_WRITE_BIT;
mem_barrier.srcStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
mem_barrier.dstAccessMask = VK_ACCESS_2_MEMORY_DECOMPRESSION_READ_BIT_EXT | VK_ACCESS_2_MEMORY_DECOMPRESSION_WRITE_BIT_EXT;
mem_barrier.dstStageMask = VK_PIPELINE_STAGE_2_MEMORY_DECOMPRESSION_BIT_EXT;
m_command_buffer.BarrierKHR(mem_barrier);
vk::CmdDecompressMemoryEXT(m_command_buffer, &decompress_info);
m_command_buffer.End();
m_default_queue->SubmitAndWait(m_command_buffer);
}
TEST_F(PositiveMemory, MemoryDecompressionIndirectCount) {
AddRequiredExtensions(VK_EXT_MEMORY_DECOMPRESSION_EXTENSION_NAME);
AddRequiredExtensions(VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME);
AddRequiredFeature(vkt::Feature::synchronization2);
AddRequiredExtensions(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME);
AddRequiredFeature(vkt::Feature::memoryDecompression);
AddRequiredFeature(vkt::Feature::bufferDeviceAddress);
RETURN_IF_SKIP(Init());
VkPhysicalDeviceMemoryDecompressionPropertiesEXT mem_decomp_props2 = vku::InitStructHelper();
GetPhysicalDeviceProperties2(mem_decomp_props2);
if ((mem_decomp_props2.decompressionMethods & VK_MEMORY_DECOMPRESSION_METHOD_GDEFLATE_1_0_BIT_EXT) == 0) {
GTEST_SKIP() << "GDeflate decompression method not supported";
}
static const uint8_t kGdeflate_data[] = {0x47, 0x44, 0x46, 0x4c, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00,
0x00, 0x00, 0xaa, 0xbb, 0xcc, 0xdd, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88};
const std::vector<uint8_t> compressed(std::begin(kGdeflate_data), std::end(kGdeflate_data));
VkDeviceSize buffer_size = static_cast<VkDeviceSize>(compressed.size());
VkDeviceSize decompressed_size = 64;
VkBufferUsageFlags2CreateInfo src_usage2_ind = vku::InitStructHelper();
src_usage2_ind.usage = VK_BUFFER_USAGE_2_MEMORY_DECOMPRESSION_BIT_EXT | VK_BUFFER_USAGE_2_INDIRECT_BUFFER_BIT;
vkt::Buffer src_buffer(*m_device, buffer_size, src_usage2_ind, vkt::device_address);
VkBufferUsageFlags2CreateInfo dst_usage2_ind = vku::InitStructHelper();
dst_usage2_ind.usage = VK_BUFFER_USAGE_2_MEMORY_DECOMPRESSION_BIT_EXT;
vkt::Buffer dst_buffer(*m_device, decompressed_size, dst_usage2_ind, vkt::device_address);
void* p = src_buffer.Memory().Map();
std::memcpy(p, compressed.data(), compressed.size());
VkDecompressMemoryRegionEXT decompress_region = {};
decompress_region.srcAddress = src_buffer.Address();
decompress_region.dstAddress = dst_buffer.Address();
decompress_region.compressedSize = buffer_size;
decompress_region.decompressedSize = decompressed_size;
VkDecompressMemoryRegionEXT cmds[2] = {decompress_region, decompress_region};
vkt::Buffer ic_buffer(*m_device, sizeof(cmds), VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, vkt::device_address);
void* p_ic = ic_buffer.Memory().Map();
memcpy(p_ic, cmds, sizeof(cmds));
vkt::Buffer icc_buffer(*m_device, sizeof(uint32_t), VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, vkt::device_address);
uint32_t count = sizeof(cmds) / sizeof(VkDecompressMemoryRegionEXT);
void* p_cnt = icc_buffer.Memory().Map();
memcpy(p_cnt, &count, sizeof(count));
VkPhysicalDeviceMemoryDecompressionPropertiesEXT memory_decompression_props = vku::InitStructHelper();
GetPhysicalDeviceProperties2(memory_decompression_props);
VkMemoryDecompressionMethodFlagsEXT decompression_method = VK_MEMORY_DECOMPRESSION_METHOD_GDEFLATE_1_0_BIT_EXT;
uint32_t max_decompression_count = count;
uint32_t stride = (uint32_t)sizeof(VkDecompressMemoryRegionEXT);
m_command_buffer.Begin();
VkMemoryBarrier2 mem_barrier = vku::InitStructHelper();
mem_barrier.srcAccessMask = VK_ACCESS_2_MEMORY_WRITE_BIT;
mem_barrier.srcStageMask = VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
mem_barrier.dstAccessMask = VK_ACCESS_2_MEMORY_DECOMPRESSION_READ_BIT_EXT | VK_ACCESS_2_MEMORY_DECOMPRESSION_WRITE_BIT_EXT;
mem_barrier.dstStageMask = VK_PIPELINE_STAGE_2_MEMORY_DECOMPRESSION_BIT_EXT;
m_command_buffer.BarrierKHR(mem_barrier);
vk::CmdDecompressMemoryIndirectCountEXT(m_command_buffer, decompression_method, ic_buffer.Address(), icc_buffer.Address(),
max_decompression_count, stride);
m_command_buffer.End();
m_default_queue->SubmitAndWait(m_command_buffer);
}
TEST_F(PositiveMemory, MapMemory2) {
TEST_DESCRIPTION("Validate vkMapMemory2 and vkUnmapMemory2");
AddRequiredExtensions(VK_KHR_MAP_MEMORY_2_EXTENSION_NAME);
RETURN_IF_SKIP(Init());
/* Vulkan doesn't have any requirements on what allocationSize can be
* other than that it must be non-zero. Pick 64KB because that should
* work out to an even number of pages on basically any GPU.
*/
const VkDeviceSize allocation_size = 64 << 10;
VkMemoryAllocateInfo memory_info = vku::InitStructHelper();
memory_info.allocationSize = allocation_size;
bool pass = m_device->Physical().SetMemoryType(vvl::kU32Max, &memory_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
ASSERT_TRUE(pass);
vkt::DeviceMemory memory(*m_device, memory_info);
VkMemoryMapInfo map_info = vku::InitStructHelper();
map_info.memory = memory;
map_info.offset = 0;
map_info.size = memory_info.allocationSize;
VkMemoryUnmapInfoKHR unmap_info = vku::InitStructHelper();
unmap_info.memory = memory;
uint32_t *pData = nullptr;
VkResult err = vk::MapMemory2KHR(device(), &map_info, (void **)&pData);
ASSERT_EQ(VK_SUCCESS, err);
ASSERT_TRUE(pData != nullptr);
err = vk::UnmapMemory2KHR(device(), &unmap_info);
ASSERT_EQ(VK_SUCCESS, err);
map_info.size = VK_WHOLE_SIZE;
pData = nullptr;
err = vk::MapMemory2KHR(device(), &map_info, (void **)&pData);
ASSERT_EQ(VK_SUCCESS, err);
ASSERT_TRUE(pData != nullptr);
err = vk::UnmapMemory2KHR(device(), &unmap_info);
ASSERT_EQ(VK_SUCCESS, err);
}
#ifndef VK_USE_PLATFORM_WIN32_KHR
TEST_F(PositiveMemory, MapMemoryPlaced) {
TEST_DESCRIPTION("Validate placed memory maps");
AddRequiredExtensions(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
AddRequiredExtensions(VK_KHR_MAP_MEMORY_2_EXTENSION_NAME);
AddRequiredExtensions(VK_EXT_MAP_MEMORY_PLACED_EXTENSION_NAME);
AddRequiredFeature(vkt::Feature::memoryMapPlaced);
AddRequiredFeature(vkt::Feature::memoryUnmapReserve);
RETURN_IF_SKIP(Init());
VkPhysicalDeviceMapMemoryPlacedPropertiesEXT map_placed_props = vku::InitStructHelper();
GetPhysicalDeviceProperties2(map_placed_props);
/* Vulkan doesn't have any requirements on what allocationSize can be
* other than that it must be non-zero. Pick 64KB because that should
* work out to an even number of pages on basically any GPU.
*/
const VkDeviceSize allocation_size = map_placed_props.minPlacedMemoryMapAlignment * 16;
VkMemoryAllocateInfo memory_info = vku::InitStructHelper();
memory_info.allocationSize = allocation_size;
bool pass = m_device->Physical().SetMemoryType(vvl::kU32Max, &memory_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
ASSERT_TRUE(pass);
vkt::DeviceMemory memory(*m_device, memory_info);
/* Reserve one more page in case we need to deal with any alignment weirdness. */
size_t reservation_size = allocation_size + map_placed_props.minPlacedMemoryMapAlignment;
void *reservation = mmap(NULL, reservation_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_TRUE(reservation != MAP_FAILED);
/* Align up to minPlacedMemoryMapAlignment */
uintptr_t align_1 = map_placed_props.minPlacedMemoryMapAlignment - 1;
void *addr = reinterpret_cast<void *>((reinterpret_cast<uintptr_t>(reservation) + align_1) & ~align_1);
VkMemoryMapInfo map_info = vku::InitStructHelper();
map_info.memory = memory;
map_info.flags = VK_MEMORY_MAP_PLACED_BIT_EXT;
map_info.offset = 0;
map_info.size = VK_WHOLE_SIZE;
VkMemoryMapPlacedInfoEXT placed_info = vku::InitStructHelper();
placed_info.pPlacedAddress = addr;
map_info.pNext = &placed_info;
void *pData;
VkResult res = vk::MapMemory2KHR(device(), &map_info, &pData);
ASSERT_EQ(VK_SUCCESS, res);
if (IsPlatformMockICD()) {
return; // currently can only validate the output with real driver
}
ASSERT_EQ(pData, addr);
/* Write some data and make sure we don't fault */
memset(pData, 0x5c, allocation_size);
VkMemoryUnmapInfo unmap_info = vku::InitStructHelper();
unmap_info.memory = memory;
unmap_info.flags = VK_MEMORY_UNMAP_RESERVE_BIT_EXT;
res = vk::UnmapMemory2KHR(device(), &unmap_info);
ASSERT_EQ(VK_SUCCESS, res);
/* Test mapping with the whole size but not VK_WHOLE_SIZE */
map_info.size = allocation_size;
res = vk::MapMemory2KHR(device(), &map_info, &pData);
ASSERT_EQ(VK_SUCCESS, res);
res = vk::UnmapMemory2KHR(device(), &unmap_info);
ASSERT_EQ(VK_SUCCESS, res);
map_info.flags = 0;
vk::MapMemory2KHR(device(), &map_info, &pData);
/* We unmapped with RESERVE above so this should be different */
ASSERT_NE(pData, addr);
ASSERT_EQ(static_cast<uint8_t *>(pData)[0], 0x5c);
unmap_info.flags = 0;
res = vk::UnmapMemory2KHR(device(), &unmap_info);
ASSERT_EQ(VK_SUCCESS, res);
}
#endif
TEST_F(PositiveMemory, GetMemoryRequirements2) {
TEST_DESCRIPTION(
"Get memory requirements with VK_KHR_get_memory_requirements2 instead of core entry points and verify layers do not emit "
"errors when objects are bound and used");
AddRequiredExtensions(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
RETURN_IF_SKIP(Init());
vkt::Buffer buffer(
*m_device, vkt::Buffer::CreateInfo(1024, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT), vkt::no_mem);
VkBufferMemoryRequirementsInfo2 buffer_info = vku::InitStructHelper();
buffer_info.buffer = buffer;
VkMemoryRequirements2 buffer_reqs = vku::InitStructHelper();
vk::GetBufferMemoryRequirements2KHR(device(), &buffer_info, &buffer_reqs);
vkt::DeviceMemory buffer_memory(*m_device,
vkt::DeviceMemory::GetResourceAllocInfo(*m_device, buffer_reqs.memoryRequirements, 0));
vk::BindBufferMemory(device(), buffer, buffer_memory, 0);
auto image_ci = vkt::Image::ImageCreateInfo2D(32, 32, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT);
vkt::Image image(*m_device, image_ci, vkt::no_mem);
VkImageMemoryRequirementsInfo2 image_info = vku::InitStructHelper();
image_info.image = image;
VkMemoryRequirements2 image_reqs = vku::InitStructHelper();
vk::GetImageMemoryRequirements2KHR(device(), &image_info, &image_reqs);
vkt::DeviceMemory image_memory(*m_device, vkt::DeviceMemory::GetResourceAllocInfo(*m_device, image_reqs.memoryRequirements, 0));
vk::BindImageMemory(device(), image, image_memory, 0);
// Now execute arbitrary commands that use the test buffer and image
m_command_buffer.Begin();
// Fill buffer with 0
vk::CmdFillBuffer(m_command_buffer, buffer, 0, VK_WHOLE_SIZE, 0);
// Transition and clear image
const VkImageSubresourceRange subresource_range = image.SubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT);
const auto barrier = image.ImageMemoryBarrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_GENERAL, subresource_range);
vk::CmdPipelineBarrier(m_command_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
const VkClearColorValue color = {};
vk::CmdClearColorImage(m_command_buffer, image, VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range);
// Submit and verify no validation errors
m_command_buffer.End();
m_default_queue->SubmitAndWait(m_command_buffer);
}
TEST_F(PositiveMemory, BindMemory2) {
TEST_DESCRIPTION(
"Bind memory with VK_KHR_bind_memory2 instead of core entry points and verify layers do not emit errors when objects are "
"used");
AddRequiredExtensions(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
RETURN_IF_SKIP(Init());
vkt::Buffer buffer(*m_device, vkt::Buffer::CreateInfo(1024, VK_BUFFER_USAGE_TRANSFER_DST_BIT), vkt::no_mem);
vkt::DeviceMemory buffer_memory(*m_device, vkt::DeviceMemory::GetResourceAllocInfo(*m_device, buffer.MemoryRequirements(), 0));
VkBindBufferMemoryInfo buffer_bind_info = {VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR, nullptr, buffer, buffer_memory, 0};
vk::BindBufferMemory2KHR(device(), 1, &buffer_bind_info);
auto image_ci = vkt::Image::ImageCreateInfo2D(32, 32, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT);
vkt::Image image(*m_device, image_ci, vkt::no_mem);
vkt::DeviceMemory image_memory(*m_device, vkt::DeviceMemory::GetResourceAllocInfo(*m_device, image.MemoryRequirements(), 0));
VkBindImageMemoryInfo image_bind_info = {VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR, nullptr, image, image_memory, 0};
vk::BindImageMemory2KHR(device(), 1, &image_bind_info);
// Now execute arbitrary commands that use the test buffer and image
m_command_buffer.Begin();
// Fill buffer with 0
vk::CmdFillBuffer(m_command_buffer, buffer, 0, VK_WHOLE_SIZE, 0);
// Transition and clear image
const VkImageSubresourceRange subresource_range = image.SubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT);
const auto barrier = image.ImageMemoryBarrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_GENERAL, subresource_range);
vk::CmdPipelineBarrier(m_command_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
const VkClearColorValue color = {};
vk::CmdClearColorImage(m_command_buffer, image, VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range);
// Submit and verify no validation errors
m_command_buffer.End();
m_default_queue->SubmitAndWait(m_command_buffer);
}
TEST_F(PositiveMemory, NonCoherentMapping) {
TEST_DESCRIPTION(
"Ensure that validations handling of non-coherent memory mapping while using VK_WHOLE_SIZE does not cause access "
"violations");
VkResult err;
uint8_t *pData;
RETURN_IF_SKIP(Init());
VkMemoryRequirements mem_reqs;
mem_reqs.memoryTypeBits = 0xFFFFFFFF;
const VkDeviceSize atom_size = m_device->Physical().limits_.nonCoherentAtomSize;
VkMemoryAllocateInfo alloc_info = vku::InitStructHelper();
alloc_info.memoryTypeIndex = 0;
static const VkDeviceSize allocation_size = 32 * atom_size;
alloc_info.allocationSize = allocation_size;
// Find a memory configurations WITHOUT a COHERENT bit, otherwise exit
bool pass = m_device->Physical().SetMemoryType(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
pass = m_device->Physical().SetMemoryType(mem_reqs.memoryTypeBits, &alloc_info,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
pass = m_device->Physical().SetMemoryType(
mem_reqs.memoryTypeBits, &alloc_info,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
GTEST_SKIP() << "Couldn't find a memory type wihtout a COHERENT bit";
}
}
}
vkt::DeviceMemory mem(*m_device, alloc_info);
// Map/Flush/Invalidate using WHOLE_SIZE and zero offsets and entire mapped range
err = vk::MapMemory(device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_EQ(VK_SUCCESS, err);
VkMappedMemoryRange mmr = vku::InitStructHelper();
mmr.memory = mem;
mmr.offset = 0;
mmr.size = VK_WHOLE_SIZE;
err = vk::FlushMappedMemoryRanges(device(), 1, &mmr);
ASSERT_EQ(VK_SUCCESS, err);
err = vk::InvalidateMappedMemoryRanges(device(), 1, &mmr);
ASSERT_EQ(VK_SUCCESS, err);
vk::UnmapMemory(device(), mem);
// Map/Flush/Invalidate using WHOLE_SIZE and an offset and entire mapped range
err = vk::MapMemory(device(), mem, 5 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_EQ(VK_SUCCESS, err);
mmr.memory = mem;
mmr.offset = 6 * atom_size;
mmr.size = VK_WHOLE_SIZE;
err = vk::FlushMappedMemoryRanges(device(), 1, &mmr);
ASSERT_EQ(VK_SUCCESS, err);
err = vk::InvalidateMappedMemoryRanges(device(), 1, &mmr);
ASSERT_EQ(VK_SUCCESS, err);
vk::UnmapMemory(device(), mem);
// Map with offset and size
// Flush/Invalidate subrange of mapped area with offset and size
err = vk::MapMemory(device(), mem, 3 * atom_size, 9 * atom_size, 0, (void **)&pData);
ASSERT_EQ(VK_SUCCESS, err);
mmr.memory = mem;
mmr.offset = 4 * atom_size;
mmr.size = 2 * atom_size;
err = vk::FlushMappedMemoryRanges(device(), 1, &mmr);
ASSERT_EQ(VK_SUCCESS, err);
err = vk::InvalidateMappedMemoryRanges(device(), 1, &mmr);
ASSERT_EQ(VK_SUCCESS, err);
vk::UnmapMemory(device(), mem);
// Map without offset and flush WHOLE_SIZE with two separate offsets
err = vk::MapMemory(device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_EQ(VK_SUCCESS, err);
mmr.memory = mem;
mmr.offset = allocation_size - (4 * atom_size);
mmr.size = VK_WHOLE_SIZE;
err = vk::FlushMappedMemoryRanges(device(), 1, &mmr);
ASSERT_EQ(VK_SUCCESS, err);
mmr.offset = allocation_size - (6 * atom_size);
mmr.size = VK_WHOLE_SIZE;
err = vk::FlushMappedMemoryRanges(device(), 1, &mmr);
ASSERT_EQ(VK_SUCCESS, err);
vk::UnmapMemory(device(), mem);
}
TEST_F(PositiveMemory, MappingWithMultiInstanceHeapFlag) {
TEST_DESCRIPTION("Test mapping memory that uses memory heap with VK_MEMORY_HEAP_MULTI_INSTANCE_BIT");
AddRequiredExtensions(VK_KHR_DEVICE_GROUP_EXTENSION_NAME);
RETURN_IF_SKIP(Init());
VkPhysicalDeviceMemoryProperties memory_info;
vk::GetPhysicalDeviceMemoryProperties(Gpu(), &memory_info);
uint32_t memory_index = vvl::kU32Max;
for (uint32_t i = 0; i < memory_info.memoryTypeCount; ++i) {
if ((memory_info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) {
if (memory_info.memoryHeaps[memory_info.memoryTypes[i].heapIndex].flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) {
memory_index = i;
break;
}
}
}
if (memory_index == vvl::kU32Max) {
GTEST_SKIP() << "Did not host visible memory from memory heap with VK_MEMORY_HEAP_MULTI_INSTANCE_BIT bit";
}
VkMemoryAllocateInfo mem_alloc = vku::InitStructHelper();
mem_alloc.allocationSize = 64;
mem_alloc.memoryTypeIndex = memory_index;
vkt::DeviceMemory memory(*m_device, mem_alloc);
uint32_t *pData;
vk::MapMemory(device(), memory, 0, VK_WHOLE_SIZE, 0, (void **)&pData);
vk::UnmapMemory(device(), memory);
}
TEST_F(PositiveMemory, BindImageMemoryMultiThreaded) {
RETURN_IF_SKIP(Init());
if (!IsPlatformMockICD()) {
GTEST_SKIP() << "This test can crash drivers with threading issues";
}
auto image_ci = vkt::Image::ImageCreateInfo2D(32, 32, 1, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT);
// Create an image object, allocate memory, bind memory, and destroy the object
auto worker_thread = [&]() {
for (uint32_t i = 0; i < 1000; ++i) {
vkt::Image image(*m_device, image_ci, vkt::no_mem);
VkMemoryRequirements mem_reqs;
vk::GetImageMemoryRequirements(device(), image, &mem_reqs);
VkMemoryAllocateInfo mem_alloc = vku::InitStructHelper();
mem_alloc.memoryTypeIndex = 0;
mem_alloc.allocationSize = mem_reqs.size;
const bool pass = m_device->Physical().SetMemoryType(mem_reqs.memoryTypeBits, &mem_alloc, 0);
ASSERT_TRUE(pass);
vkt::DeviceMemory mem(*m_device, mem_alloc);
ASSERT_EQ(VK_SUCCESS, vk::BindImageMemory(device(), image, mem, 0));
}
};
constexpr int worker_count = 32;
std::vector<std::thread> workers;
workers.reserve(worker_count);
for (int i = 0; i < worker_count; ++i) {
workers.emplace_back(worker_thread);
}
for (auto &worker : workers) {
worker.join();
}
}
TEST_F(PositiveMemory, DeviceBufferMemoryRequirements) {
TEST_DESCRIPTION("Test vkGetDeviceBufferMemoryRequirements");
SetTargetApiVersion(VK_API_VERSION_1_3);
RETURN_IF_SKIP(Init());
VkBufferCreateInfo buffer_create_info = vku::InitStructHelper();
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
vkt::Buffer buffer(*m_device, buffer_create_info, vkt::no_mem);
VkDeviceBufferMemoryRequirements info = vku::InitStructHelper();
info.pCreateInfo = &buffer_create_info;
VkMemoryRequirements2 memory_reqs2 = vku::InitStructHelper();
vk::GetDeviceBufferMemoryRequirements(device(), &info, &memory_reqs2);
VkMemoryAllocateInfo memory_info = vku::InitStructHelper();
memory_info.allocationSize = memory_reqs2.memoryRequirements.size;
const bool pass = m_device->Physical().SetMemoryType(memory_reqs2.memoryRequirements.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
vkt::DeviceMemory buffer_memory(*m_device, memory_info);
VkResult err = vk::BindBufferMemory(device(), buffer, buffer_memory, 0);
ASSERT_EQ(VK_SUCCESS, err);
}
TEST_F(PositiveMemory, DeviceImageMemoryRequirements) {
TEST_DESCRIPTION("Test vkGetDeviceImageMemoryRequirements");
SetTargetApiVersion(VK_API_VERSION_1_3);
RETURN_IF_SKIP(Init());
VkImageCreateInfo image_create_info =
vkt::Image::ImageCreateInfo2D(32, 32, 1, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT);
vkt::Image image(*m_device, image_create_info, vkt::no_mem);
VkDeviceImageMemoryRequirements info = vku::InitStructHelper();
info.pCreateInfo = &image_create_info;
VkMemoryRequirements2 mem_reqs = vku::InitStructHelper();
vk::GetDeviceImageMemoryRequirements(device(), &info, &mem_reqs);
VkMemoryAllocateInfo mem_alloc = vku::InitStructHelper();
mem_alloc.memoryTypeIndex = 0;
mem_alloc.allocationSize = mem_reqs.memoryRequirements.size;
const bool pass = m_device->Physical().SetMemoryType(mem_reqs.memoryRequirements.memoryTypeBits, &mem_alloc, 0);
ASSERT_TRUE(pass);
vkt::DeviceMemory mem(*m_device, mem_alloc);
VkResult err = vk::BindImageMemory(device(), image, mem, 0);
ASSERT_EQ(VK_SUCCESS, err);
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
TEST_F(PositiveMemory, BindMemoryDX11Handle) {
TEST_DESCRIPTION("Bind memory imported from DX11 resource. Allocation size should be ignored.");
AddRequiredExtensions(VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME);
RETURN_IF_SKIP(Init());
// Mock ICD allows to use fake DX11 handles instead of using DX11 API directly.
if (!IsPlatformMockICD()) {
GTEST_SKIP() << "This test only runs on the mock ICD";
}
VkExternalMemoryImageCreateInfo external_info = vku::InitStructHelper();
external_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT;
auto image_ci = vkt::Image::ImageCreateInfo2D(32, 32, 1, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT);
image_ci.pNext = &external_info;
vkt::Image image(*m_device, image_ci, vkt::no_mem);
VkMemoryRequirements mem_reqs{};
vk::GetImageMemoryRequirements(device(), image, &mem_reqs);
VkImportMemoryWin32HandleInfoKHR memory_import = vku::InitStructHelper();
memory_import.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT;
memory_import.handle = (HANDLE)0x12345678; // Use arbitrary non-zero value as DX11 resource handle
VkMemoryAllocateInfo alloc_info = vku::InitStructHelper(&memory_import); // Set zero allocation size
m_device->Physical().SetMemoryType(mem_reqs.memoryTypeBits, &alloc_info, 0);
vkt::DeviceMemory memory(*m_device, alloc_info);
// This should not trigger VUs that take into accout allocation size (e.g. 01049/01046)
vk::BindImageMemory(device(), image, memory, 0);
}
TEST_F(PositiveMemory, BindMemoryDX12Handle) {
TEST_DESCRIPTION("Bind memory imported from DX12 resource. Allocation size should be ignored.");
AddRequiredExtensions(VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME);
RETURN_IF_SKIP(Init());
// Mock ICD allows to use fake DX12 handles instead of using DX12 API directly.
if (!IsPlatformMockICD()) {
GTEST_SKIP() << "This test only runs on the mock ICD";
}
VkExternalMemoryImageCreateInfo external_info = vku::InitStructHelper();
external_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT;
auto image_ci = vkt::Image::ImageCreateInfo2D(32, 32, 1, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT);
image_ci.pNext = &external_info;
vkt::Image image(*m_device, image_ci, vkt::no_mem);
VkMemoryRequirements mem_reqs{};
vk::GetImageMemoryRequirements(device(), image, &mem_reqs);
VkImportMemoryWin32HandleInfoKHR memory_import = vku::InitStructHelper();
memory_import.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT;
memory_import.handle = (HANDLE)0x12345678; // Use arbitrary non-zero value as DX12 resource handle
VkMemoryAllocateInfo alloc_info = vku::InitStructHelper(&memory_import); // Set zero allocation size
m_device->Physical().SetMemoryType(mem_reqs.memoryTypeBits, &alloc_info, 0);
vkt::DeviceMemory memory(*m_device, alloc_info);
// This should not trigger VUs that take into accout allocation size (e.g. 01049/01046)
vk::BindImageMemory(device(), image, memory, 0);
}
#endif // VK_USE_PLATFORM_WIN32_KHR
TEST_F(PositiveMemory, BindMemoryStatusBuffer) {
TEST_DESCRIPTION("Use VkBindMemoryStatus when binding buffer to memory.");
SetTargetApiVersion(VK_API_VERSION_1_1);
AddRequiredFeature(vkt::Feature::maintenance6);
AddRequiredExtensions(VK_KHR_MAINTENANCE_6_EXTENSION_NAME);
RETURN_IF_SKIP(Init());
if (IsPlatformMockICD()) {
GTEST_SKIP() << "Test not supported by MockICD, skipping";
}
VkBufferCreateInfo buffer_ci = vku::InitStructHelper();
buffer_ci.size = 32u;
buffer_ci.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
vkt::Buffer buffer;
buffer.InitNoMemory(*m_device, buffer_ci);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = vku::InitStructHelper();
alloc_info.allocationSize = mem_reqs.size;
vkt::DeviceMemory memory(*m_device, alloc_info);
VkResult result = VK_RESULT_MAX_ENUM;
VkBindMemoryStatus bind_memory_status = vku::InitStructHelper();
bind_memory_status.pResult = &result;
VkBindBufferMemoryInfo bind_info = vku::InitStructHelper(&bind_memory_status);
bind_info.buffer = buffer;
bind_info.memory = memory;
bind_info.memoryOffset = 0u;
vk::BindBufferMemory2(device(), 1u, &bind_info);
ASSERT_NE(result, VK_RESULT_MAX_ENUM);
}
TEST_F(PositiveMemory, BindMemoryStatusImage) {
TEST_DESCRIPTION("Use VkBindMemoryStatus when binding image to memory.");
SetTargetApiVersion(VK_API_VERSION_1_1);
AddRequiredFeature(vkt::Feature::maintenance6);
AddRequiredExtensions(VK_KHR_MAINTENANCE_6_EXTENSION_NAME);
RETURN_IF_SKIP(Init());
if (IsPlatformMockICD()) {
GTEST_SKIP() << "Test not supported by MockICD, skipping";
}
auto image_ci = vkt::Image::ImageCreateInfo2D(32, 32, 1, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT);
vkt::Image image(*m_device, image_ci, vkt::no_mem);
vkt::DeviceMemory memory(*m_device, vkt::DeviceMemory::GetResourceAllocInfo(*m_device, image.MemoryRequirements(), 0));
VkResult result = VK_RESULT_MAX_ENUM;
VkBindMemoryStatus bind_memory_status = vku::InitStructHelper();
bind_memory_status.pResult = &result;
VkBindImageMemoryInfo bind_info = vku::InitStructHelper(&bind_memory_status);
bind_info.image = image;
bind_info.memory = memory;
bind_info.memoryOffset = 0u;
vk::BindImageMemory2(device(), 1u, &bind_info);
ASSERT_NE(result, VK_RESULT_MAX_ENUM);
}
TEST_F(PositiveMemory, MapMemoryCoherentAtomSize) {
RETURN_IF_SKIP(Init());
if (IsPlatformMockICD()) {
GTEST_SKIP() << "Test not supported by MockICD, MapMemory will fail ASAN";
}
const VkDeviceSize atom_size = m_device->Physical().limits_.nonCoherentAtomSize;
if (atom_size == 1) {
// Some platforms have an atomsize of 1 which makes the test meaningless
GTEST_SKIP() << "nonCoherentAtomSize is 1";
}
VkBufferCreateInfo buffer_ci = vku::InitStructHelper();
buffer_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffer_ci.size = 256;
vkt::Buffer buffer(*m_device, buffer_ci, vkt::no_mem);
VkMemoryRequirements mem_reqs;
vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = vku::InitStructHelper();
alloc_info.memoryTypeIndex = 0;
alloc_info.allocationSize = (atom_size * 4) + 1;
bool pass = m_device->Physical().SetMemoryType(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) {
GTEST_SKIP() << "Failed to set memory type";
}
vkt::DeviceMemory mem(*m_device, alloc_info);
uint8_t *pData;
ASSERT_EQ(VK_SUCCESS, vk::MapMemory(device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData));
// Offset is atom size, but total memory range is not atom size
VkMappedMemoryRange mem_range = vku::InitStructHelper();
mem_range.memory = mem;
mem_range.offset = atom_size;
mem_range.size = VK_WHOLE_SIZE;
vk::FlushMappedMemoryRanges(device(), 1, &mem_range);
vk::UnmapMemory(device(), mem);
}
TEST_F(PositiveMemory, ZeroInitializeDeviceMemory) {
SetTargetApiVersion(VK_API_VERSION_1_3);
AddRequiredExtensions(VK_EXT_ZERO_INITIALIZE_DEVICE_MEMORY_EXTENSION_NAME);
AddRequiredFeature(vkt::Feature::zeroInitializeDeviceMemory);
RETURN_IF_SKIP(Init());
VkImageCreateInfo image_ci =
vkt::Image::ImageCreateInfo2D(4, 4, 1, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
image_ci.initialLayout = VK_IMAGE_LAYOUT_ZERO_INITIALIZED_EXT;
vkt::Image image(*m_device, image_ci, vkt::no_mem);
auto alloc_info = vkt::DeviceMemory::GetResourceAllocInfo(*m_device, image.MemoryRequirements(), 0);
VkMemoryAllocateFlagsInfo alloc_flags = vku::InitStructHelper();
alloc_flags.flags = VK_MEMORY_ALLOCATE_ZERO_INITIALIZE_BIT_EXT;
alloc_info.pNext = &alloc_flags;
vkt::DeviceMemory memory(*m_device, alloc_info);
vk::BindImageMemory(device(), image, memory, 0);
}