| /* |
| * Copyright 2020 Google LLC |
| * SPDX-License-Identifier: MIT |
| */ |
| |
| #include "vkr_device_memory.h" |
| |
| #include <gbm.h> |
| |
| #include "venus-protocol/vn_protocol_renderer_transport.h" |
| |
| #include "vkr_device_memory_gen.h" |
| #include "vkr_physical_device.h" |
| |
| static bool |
| vkr_get_fd_info_from_resource_info(struct vkr_context *ctx, |
| const VkImportMemoryResourceInfoMESA *res_info, |
| VkImportMemoryFdInfoKHR *out) |
| { |
| struct vkr_resource *res = vkr_context_get_resource(ctx, res_info->resourceId); |
| if (!res) { |
| vkr_log("failed to import resource: invalid res_id %u", res_info->resourceId); |
| vkr_context_set_fatal(ctx); |
| return false; |
| } |
| |
| VkExternalMemoryHandleTypeFlagBits handle_type; |
| switch (res->fd_type) { |
| case VIRGL_RESOURCE_FD_DMABUF: |
| handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; |
| break; |
| case VIRGL_RESOURCE_FD_OPAQUE: |
| handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT; |
| break; |
| default: |
| return false; |
| } |
| |
| int fd = os_dupfd_cloexec(res->u.fd); |
| if (fd < 0) |
| return false; |
| |
| *out = (VkImportMemoryFdInfoKHR){ |
| .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, |
| .pNext = res_info->pNext, |
| .fd = fd, |
| .handleType = handle_type, |
| }; |
| return true; |
| } |
| |
| static VkResult |
| vkr_get_fd_info_from_allocation_info(struct vkr_physical_device *physical_dev, |
| const VkMemoryAllocateInfo *alloc_info, |
| struct gbm_bo **out_gbm_bo, |
| VkImportMemoryFdInfoKHR *out_fd_info) |
| { |
| #ifdef MINIGBM |
| const uint32_t gbm_bo_use_flags = |
| GBM_BO_USE_LINEAR | GBM_BO_USE_SW_READ_RARELY | GBM_BO_USE_SW_WRITE_RARELY; |
| #else |
| const uint32_t gbm_bo_use_flags = GBM_BO_USE_LINEAR; |
| #endif |
| |
| struct gbm_bo *gbm_bo; |
| int fd = -1; |
| |
| assert(physical_dev->gbm_device); |
| |
| /* |
| * Reject here for simplicity. Letting VkPhysicalDeviceVulkan11Properties return |
| * min(maxMemoryAllocationSize, UINT32_MAX) will affect unmappable scenarios. |
| */ |
| if (alloc_info->allocationSize > UINT32_MAX) |
| return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| |
| /* 4K alignment is used on all implementations we support. */ |
| gbm_bo = |
| gbm_bo_create(physical_dev->gbm_device, align(alloc_info->allocationSize, 4096), 1, |
| GBM_FORMAT_R8, gbm_bo_use_flags); |
| if (!gbm_bo) |
| return VK_ERROR_OUT_OF_DEVICE_MEMORY; |
| |
| /* gbm_bo_get_fd returns negative error code on failure */ |
| fd = gbm_bo_get_fd(gbm_bo); |
| if (fd < 0) { |
| gbm_bo_destroy(gbm_bo); |
| return fd == -EMFILE ? VK_ERROR_TOO_MANY_OBJECTS : VK_ERROR_OUT_OF_HOST_MEMORY; |
| } |
| |
| *out_gbm_bo = gbm_bo; |
| *out_fd_info = (VkImportMemoryFdInfoKHR){ |
| .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, |
| .pNext = alloc_info->pNext, |
| .fd = fd, |
| .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, |
| }; |
| return VK_SUCCESS; |
| } |
| |
| static void |
| vkr_dispatch_vkAllocateMemory(struct vn_dispatch_context *dispatch, |
| struct vn_command_vkAllocateMemory *args) |
| { |
| struct vkr_context *ctx = dispatch->data; |
| struct vkr_device *dev = vkr_device_from_handle(args->device); |
| struct vkr_physical_device *physical_dev = dev->physical_device; |
| |
| VkMemoryAllocateInfo *alloc_info = (VkMemoryAllocateInfo *)args->pAllocateInfo; |
| const uint32_t mem_type_index = alloc_info->memoryTypeIndex; |
| if (unlikely(mem_type_index >= physical_dev->memory_properties.memoryTypeCount)) { |
| args->ret = VK_ERROR_UNKNOWN; |
| return; |
| } |
| |
| /* translate VkImportMemoryResourceInfoMESA into VkImportMemoryFdInfoKHR in place */ |
| VkImportMemoryFdInfoKHR local_import_info = { .fd = -1 }; |
| VkImportMemoryResourceInfoMESA *res_info = NULL; |
| VkBaseInStructure *prev_of_res_info = vkr_find_prev_struct( |
| alloc_info, VK_STRUCTURE_TYPE_IMPORT_MEMORY_RESOURCE_INFO_MESA); |
| if (prev_of_res_info) { |
| res_info = (VkImportMemoryResourceInfoMESA *)prev_of_res_info->pNext; |
| if (!vkr_get_fd_info_from_resource_info(ctx, res_info, &local_import_info)) { |
| args->ret = VK_ERROR_INVALID_EXTERNAL_HANDLE; |
| return; |
| } |
| |
| prev_of_res_info->pNext = (const struct VkBaseInStructure *)&local_import_info; |
| } |
| |
| /* XXX Force dma_buf/opaque fd export or gbm bo import until a new extension that |
| * supports direct export from host visible memory |
| * |
| * Most VkImage and VkBuffer are non-external while most VkDeviceMemory are external |
| * if allocated with a host visible memory type. We still violate the spec by binding |
| * external memory to non-external image or buffer, which needs spec changes with a |
| * new extension. |
| * |
| * Skip forcing external if a valid VkImportMemoryResourceInfoMESA is provided, since |
| * the mapping will be directly set up from the existing virgl resource. |
| */ |
| const uint32_t property_flags = |
| physical_dev->memory_properties.memoryTypes[mem_type_index].propertyFlags; |
| uint32_t valid_fd_types = 0; |
| struct gbm_bo *gbm_bo = NULL; |
| VkExportMemoryAllocateInfo local_export_info; |
| VkExportMemoryAllocateInfo *export_info = |
| vkr_find_struct(alloc_info->pNext, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO); |
| if ((property_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) && !res_info) { |
| /* An implementation can support dma_buf import along with opaque fd export/import. |
| * If the client driver is using external memory and requesting dma_buf, without |
| * dma_buf fd export support, we must use gbm bo import path instead of forcing |
| * opaque fd export. e.g. the client driver uses external memory for wsi image. |
| */ |
| const bool no_dma_buf_export = |
| !export_info || |
| !(export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT); |
| if (physical_dev->is_dma_buf_fd_export_supported || |
| (physical_dev->is_opaque_fd_export_supported && no_dma_buf_export)) { |
| const VkExternalMemoryHandleTypeFlagBits handle_type = |
| physical_dev->is_dma_buf_fd_export_supported |
| ? VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT |
| : VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT; |
| if (export_info) { |
| export_info->handleTypes |= handle_type; |
| } else { |
| local_export_info = (const VkExportMemoryAllocateInfo){ |
| .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, |
| .pNext = alloc_info->pNext, |
| .handleTypes = handle_type, |
| }; |
| export_info = &local_export_info; |
| alloc_info->pNext = &local_export_info; |
| } |
| } else if (physical_dev->EXT_external_memory_dma_buf) { |
| /* Allocate gbm bo to force dma_buf fd import. */ |
| if (export_info) { |
| /* Strip export info since valid_fd_types can only be dma_buf here. */ |
| VkBaseInStructure *prev_of_export_info = vkr_find_prev_struct( |
| alloc_info, VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO); |
| |
| prev_of_export_info->pNext = export_info->pNext; |
| export_info = NULL; |
| } |
| |
| args->ret = vkr_get_fd_info_from_allocation_info(physical_dev, alloc_info, |
| &gbm_bo, &local_import_info); |
| if (args->ret != VK_SUCCESS) |
| return; |
| |
| alloc_info->pNext = &local_import_info; |
| valid_fd_types = 1 << VIRGL_RESOURCE_FD_DMABUF; |
| } |
| } |
| |
| if (export_info) { |
| if (export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT) |
| valid_fd_types |= 1 << VIRGL_RESOURCE_FD_OPAQUE; |
| if (export_info->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) |
| valid_fd_types |= 1 << VIRGL_RESOURCE_FD_DMABUF; |
| } |
| |
| struct vkr_device_memory *mem = vkr_device_memory_create_and_add(ctx, args); |
| if (!mem) { |
| if (local_import_info.fd >= 0) |
| close(local_import_info.fd); |
| if (gbm_bo) |
| gbm_bo_destroy(gbm_bo); |
| return; |
| } |
| |
| mem->device = dev; |
| mem->property_flags = property_flags; |
| mem->valid_fd_types = valid_fd_types; |
| mem->gbm_bo = gbm_bo; |
| mem->allocation_size = alloc_info->allocationSize; |
| mem->memory_type_index = mem_type_index; |
| } |
| |
| static void |
| vkr_dispatch_vkFreeMemory(struct vn_dispatch_context *dispatch, |
| struct vn_command_vkFreeMemory *args) |
| { |
| struct vkr_device_memory *mem = vkr_device_memory_from_handle(args->memory); |
| if (!mem) |
| return; |
| |
| vkr_device_memory_release(mem); |
| vkr_device_memory_destroy_and_remove(dispatch->data, args); |
| } |
| |
| static void |
| vkr_dispatch_vkGetDeviceMemoryCommitment( |
| UNUSED struct vn_dispatch_context *dispatch, |
| struct vn_command_vkGetDeviceMemoryCommitment *args) |
| { |
| struct vkr_device *dev = vkr_device_from_handle(args->device); |
| struct vn_device_proc_table *vk = &dev->proc_table; |
| |
| vn_replace_vkGetDeviceMemoryCommitment_args_handle(args); |
| vk->GetDeviceMemoryCommitment(args->device, args->memory, |
| args->pCommittedMemoryInBytes); |
| } |
| |
| static void |
| vkr_dispatch_vkGetDeviceMemoryOpaqueCaptureAddress( |
| UNUSED struct vn_dispatch_context *dispatch, |
| struct vn_command_vkGetDeviceMemoryOpaqueCaptureAddress *args) |
| { |
| struct vkr_device *dev = vkr_device_from_handle(args->device); |
| struct vn_device_proc_table *vk = &dev->proc_table; |
| |
| vn_replace_vkGetDeviceMemoryOpaqueCaptureAddress_args_handle(args); |
| args->ret = vk->GetDeviceMemoryOpaqueCaptureAddress(args->device, args->pInfo); |
| } |
| |
| static void |
| vkr_dispatch_vkGetMemoryResourcePropertiesMESA( |
| struct vn_dispatch_context *dispatch, |
| struct vn_command_vkGetMemoryResourcePropertiesMESA *args) |
| { |
| struct vkr_context *ctx = dispatch->data; |
| struct vkr_device *dev = vkr_device_from_handle(args->device); |
| struct vn_device_proc_table *vk = &dev->proc_table; |
| |
| struct vkr_resource *res = vkr_context_get_resource(ctx, args->resourceId); |
| if (!res) { |
| vkr_log("failed to query resource props: invalid res_id %u", args->resourceId); |
| vkr_context_set_fatal(ctx); |
| return; |
| } |
| |
| if (res->fd_type != VIRGL_RESOURCE_FD_DMABUF) { |
| args->ret = VK_ERROR_INVALID_EXTERNAL_HANDLE; |
| return; |
| } |
| |
| static const VkExternalMemoryHandleTypeFlagBits handle_type = |
| VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; |
| VkMemoryFdPropertiesKHR mem_fd_props = { |
| .sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR, |
| .pNext = NULL, |
| .memoryTypeBits = 0, |
| }; |
| vn_replace_vkGetMemoryResourcePropertiesMESA_args_handle(args); |
| args->ret = |
| vk->GetMemoryFdPropertiesKHR(args->device, handle_type, res->u.fd, &mem_fd_props); |
| if (args->ret != VK_SUCCESS) |
| return; |
| |
| args->pMemoryResourceProperties->memoryTypeBits = mem_fd_props.memoryTypeBits; |
| |
| VkMemoryResourceAllocationSizePropertiesMESA *alloc_size_props = |
| vkr_find_struct(args->pMemoryResourceProperties->pNext, |
| VK_STRUCTURE_TYPE_MEMORY_RESOURCE_ALLOCATION_SIZE_PROPERTIES_MESA); |
| if (alloc_size_props) |
| alloc_size_props->allocationSize = res->size; |
| } |
| |
| void |
| vkr_context_init_device_memory_dispatch(struct vkr_context *ctx) |
| { |
| struct vn_dispatch_context *dispatch = &ctx->dispatch; |
| |
| dispatch->dispatch_vkAllocateMemory = vkr_dispatch_vkAllocateMemory; |
| dispatch->dispatch_vkFreeMemory = vkr_dispatch_vkFreeMemory; |
| dispatch->dispatch_vkMapMemory = NULL; |
| dispatch->dispatch_vkUnmapMemory = NULL; |
| dispatch->dispatch_vkFlushMappedMemoryRanges = NULL; |
| dispatch->dispatch_vkInvalidateMappedMemoryRanges = NULL; |
| dispatch->dispatch_vkGetDeviceMemoryCommitment = |
| vkr_dispatch_vkGetDeviceMemoryCommitment; |
| dispatch->dispatch_vkGetDeviceMemoryOpaqueCaptureAddress = |
| vkr_dispatch_vkGetDeviceMemoryOpaqueCaptureAddress; |
| |
| dispatch->dispatch_vkGetMemoryResourcePropertiesMESA = |
| vkr_dispatch_vkGetMemoryResourcePropertiesMESA; |
| } |
| |
| void |
| vkr_device_memory_release(struct vkr_device_memory *mem) |
| { |
| if (mem->gbm_bo) |
| gbm_bo_destroy(mem->gbm_bo); |
| } |
| |
| bool |
| vkr_device_memory_export_blob(struct vkr_device_memory *mem, |
| uint64_t blob_size, |
| uint32_t blob_flags, |
| struct virgl_context_blob *out_blob) |
| { |
| /* a memory can only be exported once; we don't want two resources to point |
| * to the same storage. |
| */ |
| if (mem->exported) { |
| vkr_log("mem has been exported"); |
| return false; |
| } |
| |
| uint32_t map_info = VIRGL_RENDERER_MAP_CACHE_NONE; |
| if (blob_flags & VIRGL_RENDERER_BLOB_FLAG_USE_MAPPABLE) { |
| const bool visible = mem->property_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; |
| const bool coherent = mem->property_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; |
| const bool cached = mem->property_flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT; |
| if (!visible) { |
| vkr_log("mem cannot support mappable blob"); |
| return false; |
| } |
| |
| /* XXX guessed */ |
| map_info = (coherent && cached) ? VIRGL_RENDERER_MAP_CACHE_CACHED |
| : VIRGL_RENDERER_MAP_CACHE_WC; |
| } |
| |
| const bool can_export_dma_buf = mem->valid_fd_types & (1 << VIRGL_RESOURCE_FD_DMABUF); |
| const bool can_export_opaque = mem->valid_fd_types & (1 << VIRGL_RESOURCE_FD_OPAQUE); |
| enum virgl_resource_fd_type fd_type; |
| VkExternalMemoryHandleTypeFlagBits handle_type; |
| struct virgl_resource_vulkan_info vulkan_info; |
| if (blob_flags & VIRGL_RENDERER_BLOB_FLAG_USE_CROSS_DEVICE) { |
| if (!can_export_dma_buf) { |
| vkr_log("mem cannot export to dma_buf for cross device blob sharing"); |
| return false; |
| } |
| fd_type = VIRGL_RESOURCE_FD_DMABUF; |
| handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; |
| } else if (can_export_dma_buf) { |
| /* prefer dmabuf for easier mapping? */ |
| fd_type = VIRGL_RESOURCE_FD_DMABUF; |
| handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT; |
| } else if (can_export_opaque) { |
| /* prefer opaque for performance? */ |
| fd_type = VIRGL_RESOURCE_FD_OPAQUE; |
| handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT; |
| |
| STATIC_ASSERT(sizeof(vulkan_info.device_uuid) == VK_UUID_SIZE); |
| STATIC_ASSERT(sizeof(vulkan_info.driver_uuid) == VK_UUID_SIZE); |
| |
| const VkPhysicalDeviceIDProperties *id_props = |
| &mem->device->physical_device->id_properties; |
| memcpy(vulkan_info.device_uuid, id_props->deviceUUID, VK_UUID_SIZE); |
| memcpy(vulkan_info.driver_uuid, id_props->driverUUID, VK_UUID_SIZE); |
| |
| vulkan_info.allocation_size = mem->allocation_size; |
| vulkan_info.memory_type_index = mem->memory_type_index; |
| } else { |
| vkr_log("mem is not exportable"); |
| return false; |
| } |
| |
| int fd; |
| if (mem->gbm_bo) { |
| assert(handle_type == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT); |
| assert(can_export_dma_buf && !can_export_opaque); |
| |
| /* gbm_bo_get_fd returns negative error code on failure */ |
| fd = gbm_bo_get_fd(mem->gbm_bo); |
| if (fd < 0) { |
| vkr_log("mem gbm_bo_get_fd failed (ret %d)", fd); |
| return false; |
| } |
| } else { |
| struct vn_device_proc_table *vk = &mem->device->proc_table; |
| const VkMemoryGetFdInfoKHR fd_info = { |
| .sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, |
| .memory = mem->base.handle.device_memory, |
| .handleType = handle_type, |
| }; |
| VkResult ret = vk->GetMemoryFdKHR(mem->device->base.handle.device, &fd_info, &fd); |
| if (ret != VK_SUCCESS) { |
| vkr_log("mem fd export failed (vk ret %d)", ret); |
| return false; |
| } |
| } |
| |
| if (fd_type == VIRGL_RESOURCE_FD_DMABUF) { |
| const off_t dma_buf_size = lseek(fd, 0, SEEK_END); |
| if (dma_buf_size < 0 || (uint64_t)dma_buf_size < blob_size) { |
| vkr_log("mem dma_buf_size %lld < blob_size %" PRIu64, (long long)dma_buf_size, |
| blob_size); |
| close(fd); |
| return false; |
| } |
| } |
| |
| mem->exported = true; |
| |
| *out_blob = (struct virgl_context_blob){ |
| .type = fd_type, |
| .u.fd = fd, |
| .map_info = map_info, |
| .vulkan_info = vulkan_info, |
| }; |
| |
| return true; |
| } |