| // |
| // Copyright 2016 The ANGLE Project Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| // |
| // ContextVk.cpp: |
| // Implements the class methods for ContextVk. |
| // |
| |
| #include "libANGLE/renderer/vulkan/ContextVk.h" |
| |
| #include "common/bitset_utils.h" |
| #include "common/debug.h" |
| #include "common/utilities.h" |
| #include "libANGLE/Context.h" |
| #include "libANGLE/Display.h" |
| #include "libANGLE/Program.h" |
| #include "libANGLE/Semaphore.h" |
| #include "libANGLE/Surface.h" |
| #include "libANGLE/angletypes.h" |
| #include "libANGLE/renderer/renderer_utils.h" |
| #include "libANGLE/renderer/vulkan/BufferVk.h" |
| #include "libANGLE/renderer/vulkan/CompilerVk.h" |
| #include "libANGLE/renderer/vulkan/DisplayVk.h" |
| #include "libANGLE/renderer/vulkan/FenceNVVk.h" |
| #include "libANGLE/renderer/vulkan/FramebufferVk.h" |
| #include "libANGLE/renderer/vulkan/MemoryObjectVk.h" |
| #include "libANGLE/renderer/vulkan/OverlayVk.h" |
| #include "libANGLE/renderer/vulkan/ProgramPipelineVk.h" |
| #include "libANGLE/renderer/vulkan/ProgramVk.h" |
| #include "libANGLE/renderer/vulkan/QueryVk.h" |
| #include "libANGLE/renderer/vulkan/RenderbufferVk.h" |
| #include "libANGLE/renderer/vulkan/RendererVk.h" |
| #include "libANGLE/renderer/vulkan/SamplerVk.h" |
| #include "libANGLE/renderer/vulkan/SemaphoreVk.h" |
| #include "libANGLE/renderer/vulkan/ShaderVk.h" |
| #include "libANGLE/renderer/vulkan/SurfaceVk.h" |
| #include "libANGLE/renderer/vulkan/SyncVk.h" |
| #include "libANGLE/renderer/vulkan/TextureVk.h" |
| #include "libANGLE/renderer/vulkan/TransformFeedbackVk.h" |
| #include "libANGLE/renderer/vulkan/VertexArrayVk.h" |
| |
| #include "libANGLE/trace.h" |
| |
| #include <iostream> |
| |
| namespace rx |
| { |
| |
| namespace |
| { |
| // For DesciptorSetUpdates |
| constexpr size_t kDescriptorBufferInfosInitialSize = 8; |
| constexpr size_t kDescriptorImageInfosInitialSize = 4; |
| constexpr size_t kDescriptorWriteInfosInitialSize = |
| kDescriptorBufferInfosInitialSize + kDescriptorImageInfosInitialSize; |
| // If the total size of copyBufferToImage commands in the outside command buffer reaches the |
| // threshold below, the latter is flushed. |
| static constexpr VkDeviceSize kMaxBufferToImageCopySize = 64 * 1024 * 1024; |
| constexpr size_t kDescriptorBufferViewsInitialSize = 0; |
| |
| // For shader uniforms such as gl_DepthRange and the viewport size. |
| struct GraphicsDriverUniforms |
| { |
| std::array<float, 4> viewport; |
| |
| // 32 bits for 32 clip planes |
| uint32_t enabledClipPlanes; |
| |
| uint32_t advancedBlendEquation; |
| int32_t xfbVerticesPerInstance; |
| |
| // Used to replace gl_NumSamples. Because gl_NumSamples cannot be recognized in SPIR-V. |
| int32_t numSamples; |
| |
| std::array<int32_t, 4> xfbBufferOffsets; |
| |
| // .xy contain packed 8-bit values for atomic counter buffer offsets. These offsets are |
| // within Vulkan's minStorageBufferOffsetAlignment limit and are used to support unaligned |
| // offsets allowed in GL. |
| // |
| // .zw are unused. |
| std::array<uint32_t, 4> acbBufferOffsets; |
| |
| // We'll use x, y, z for near / far / diff respectively. |
| std::array<float, 4> depthRange; |
| }; |
| static_assert(sizeof(GraphicsDriverUniforms) % (sizeof(uint32_t) * 4) == 0, |
| "GraphicsDriverUniforms should 16bytes aligned"); |
| |
| // TODO: http://issuetracker.google.com/173636783 Once the bug is fixed, we should remove this. |
| struct GraphicsDriverUniformsExtended |
| { |
| GraphicsDriverUniforms common; |
| |
| // Used to flip gl_FragCoord (both .xy for Android pre-rotation; only .y for desktop) |
| std::array<float, 2> halfRenderArea; |
| std::array<float, 2> flipXY; |
| std::array<float, 2> negFlipXY; |
| uint32_t dither; |
| uint32_t padding; |
| |
| // Used to pre-rotate gl_FragCoord for swapchain images on Android (a mat2, which is padded to |
| // the size of two vec4's). |
| std::array<float, 8> fragRotation; |
| }; |
| |
| struct ComputeDriverUniforms |
| { |
| // Atomic counter buffer offsets with the same layout as in GraphicsDriverUniforms. |
| std::array<uint32_t, 4> acbBufferOffsets; |
| }; |
| |
| GLenum DefaultGLErrorCode(VkResult result) |
| { |
| switch (result) |
| { |
| case VK_ERROR_OUT_OF_HOST_MEMORY: |
| case VK_ERROR_OUT_OF_DEVICE_MEMORY: |
| case VK_ERROR_TOO_MANY_OBJECTS: |
| return GL_OUT_OF_MEMORY; |
| default: |
| return GL_INVALID_OPERATION; |
| } |
| } |
| |
| constexpr gl::ShaderMap<vk::ImageLayout> kShaderReadOnlyImageLayouts = { |
| {gl::ShaderType::Vertex, vk::ImageLayout::VertexShaderReadOnly}, |
| {gl::ShaderType::TessControl, vk::ImageLayout::PreFragmentShadersReadOnly}, |
| {gl::ShaderType::TessEvaluation, vk::ImageLayout::PreFragmentShadersReadOnly}, |
| {gl::ShaderType::Geometry, vk::ImageLayout::PreFragmentShadersReadOnly}, |
| {gl::ShaderType::Fragment, vk::ImageLayout::FragmentShaderReadOnly}, |
| {gl::ShaderType::Compute, vk::ImageLayout::ComputeShaderReadOnly}}; |
| |
| constexpr gl::ShaderMap<vk::ImageLayout> kShaderWriteImageLayouts = { |
| {gl::ShaderType::Vertex, vk::ImageLayout::VertexShaderWrite}, |
| {gl::ShaderType::TessControl, vk::ImageLayout::PreFragmentShadersWrite}, |
| {gl::ShaderType::TessEvaluation, vk::ImageLayout::PreFragmentShadersWrite}, |
| {gl::ShaderType::Geometry, vk::ImageLayout::PreFragmentShadersWrite}, |
| {gl::ShaderType::Fragment, vk::ImageLayout::FragmentShaderWrite}, |
| {gl::ShaderType::Compute, vk::ImageLayout::ComputeShaderWrite}}; |
| |
| constexpr VkBufferUsageFlags kVertexBufferUsage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; |
| constexpr size_t kDynamicVertexDataSize = 16 * 1024; |
| constexpr size_t kDriverUniformsAllocatorPageSize = 4 * 1024; |
| |
| bool CanMultiDrawIndirectUseCmd(ContextVk *contextVk, |
| VertexArrayVk *vertexArray, |
| gl::PrimitiveMode mode, |
| GLsizei drawcount, |
| GLsizei stride) |
| { |
| // Use the generic implementation if multiDrawIndirect is disabled, if line loop is being used |
| // for multiDraw, if drawcount is greater than maxDrawIndirectCount, or if there are streaming |
| // vertex attributes. |
| ASSERT(drawcount > 1); |
| const bool supportsMultiDrawIndirect = |
| contextVk->getFeatures().supportsMultiDrawIndirect.enabled; |
| const bool isMultiDrawLineLoop = (mode == gl::PrimitiveMode::LineLoop); |
| const bool isDrawCountBeyondLimit = |
| (static_cast<uint32_t>(drawcount) > |
| contextVk->getRenderer()->getPhysicalDeviceProperties().limits.maxDrawIndirectCount); |
| const bool isMultiDrawWithStreamingAttribs = vertexArray->getStreamingVertexAttribsMask().any(); |
| |
| const bool canMultiDrawIndirectUseCmd = supportsMultiDrawIndirect && !isMultiDrawLineLoop && |
| !isDrawCountBeyondLimit && |
| !isMultiDrawWithStreamingAttribs; |
| return canMultiDrawIndirectUseCmd; |
| } |
| |
| uint32_t GetCoverageSampleCount(const gl::State &glState, FramebufferVk *drawFramebuffer) |
| { |
| if (!glState.isSampleCoverageEnabled()) |
| { |
| return 0; |
| } |
| |
| // Get a fraction of the samples based on the coverage parameters. |
| // There are multiple ways to obtain an integer value from a float - |
| // truncation, ceil and round |
| // |
| // round() provides a more even distribution of values but doesn't seem to play well |
| // with all vendors (AMD). A way to work around this is to increase the comparison threshold |
| // of deqp tests. Though this takes care of deqp tests other apps would still have issues. |
| // |
| // Truncation provides an uneven distribution near the edges of the interval but seems to |
| // play well with all vendors. |
| // |
| // We are going with truncation for expediency. |
| return static_cast<uint32_t>(glState.getSampleCoverageValue() * drawFramebuffer->getSamples()); |
| } |
| |
| void ApplySampleCoverage(const gl::State &glState, uint32_t coverageSampleCount, uint32_t *maskOut) |
| { |
| if (!glState.isSampleCoverageEnabled()) |
| { |
| return; |
| } |
| |
| uint32_t coverageMask = angle::BitMask<uint32_t>(coverageSampleCount); |
| |
| if (glState.getSampleCoverageInvert()) |
| { |
| coverageMask = ~coverageMask; |
| } |
| |
| *maskOut &= coverageMask; |
| } |
| |
| bool IsRenderPassStartedAndUsesImage(const vk::RenderPassCommandBufferHelper &renderPassCommands, |
| const vk::ImageHelper &image) |
| { |
| return renderPassCommands.started() && renderPassCommands.usesImage(image); |
| } |
| |
| bool IsRenderPassStartedAndTransitionsImageLayout( |
| const vk::RenderPassCommandBufferHelper &renderPassCommands, |
| vk::ImageHelper &image) |
| { |
| return renderPassCommands.started() && renderPassCommands.isImageWithLayoutTransition(image); |
| } |
| |
| // When an Android surface is rotated differently than the device's native orientation, ANGLE must |
| // rotate gl_Position in the last pre-rasterization shader and gl_FragCoord in the fragment shader. |
| // Rotation of gl_Position is done in SPIR-V. The following are the rotation matrices for the |
| // fragment shader. |
| // |
| // Note: these are mat2's that are appropriately padded (4 floats per row). |
| using PreRotationMatrixValues = std::array<float, 8>; |
| constexpr angle::PackedEnumMap<rx::SurfaceRotation, PreRotationMatrixValues> kFragRotationMatrices = |
| {{{SurfaceRotation::Identity, {{1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}}}, |
| {SurfaceRotation::Rotated90Degrees, {{0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}}}, |
| {SurfaceRotation::Rotated180Degrees, {{1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}}}, |
| {SurfaceRotation::Rotated270Degrees, {{0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}}}, |
| {SurfaceRotation::FlippedIdentity, {{1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}}}, |
| {SurfaceRotation::FlippedRotated90Degrees, |
| {{0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}}}, |
| {SurfaceRotation::FlippedRotated180Degrees, |
| {{1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}}}, |
| {SurfaceRotation::FlippedRotated270Degrees, |
| {{0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}}}}}; |
| |
| bool IsRotatedAspectRatio(SurfaceRotation rotation) |
| { |
| return ((rotation == SurfaceRotation::Rotated90Degrees) || |
| (rotation == SurfaceRotation::Rotated270Degrees) || |
| (rotation == SurfaceRotation::FlippedRotated90Degrees) || |
| (rotation == SurfaceRotation::FlippedRotated270Degrees)); |
| } |
| |
| SurfaceRotation DetermineSurfaceRotation(gl::Framebuffer *framebuffer, |
| WindowSurfaceVk *windowSurface) |
| { |
| if (windowSurface && framebuffer->isDefault()) |
| { |
| switch (windowSurface->getPreTransform()) |
| { |
| case VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR: |
| // Do not rotate gl_Position (surface matches the device's orientation): |
| return SurfaceRotation::Identity; |
| case VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR: |
| // Rotate gl_Position 90 degrees: |
| return SurfaceRotation::Rotated90Degrees; |
| case VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR: |
| // Rotate gl_Position 180 degrees: |
| return SurfaceRotation::Rotated180Degrees; |
| case VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR: |
| // Rotate gl_Position 270 degrees: |
| return SurfaceRotation::Rotated270Degrees; |
| default: |
| UNREACHABLE(); |
| return SurfaceRotation::Identity; |
| } |
| } |
| else |
| { |
| // Do not rotate gl_Position (offscreen framebuffer): |
| return SurfaceRotation::Identity; |
| } |
| } |
| |
| // Should not generate a copy with modern C++. |
| EventName GetTraceEventName(const char *title, uint32_t counter) |
| { |
| EventName buf; |
| snprintf(buf.data(), kMaxGpuEventNameLen - 1, "%s %u", title, counter); |
| return buf; |
| } |
| |
| vk::ResourceAccess GetColorAccess(const gl::State &state, |
| const gl::FramebufferState &framebufferState, |
| const gl::DrawBufferMask &emulatedAlphaMask, |
| bool hasFramebufferFetch, |
| size_t colorIndexGL) |
| { |
| // No access if draw buffer is disabled altogether |
| // Without framebuffer fetch: |
| // No access if color output is masked, or rasterizer discard is enabled |
| // With framebuffer fetch: |
| // Read access if color output is masked, or rasterizer discard is enabled |
| |
| if (!framebufferState.getEnabledDrawBuffers().test(colorIndexGL)) |
| { |
| return vk::ResourceAccess::Unused; |
| } |
| |
| const gl::BlendStateExt &blendStateExt = state.getBlendStateExt(); |
| uint8_t colorMask = gl::BlendStateExt::ColorMaskStorage::GetValueIndexed( |
| colorIndexGL, blendStateExt.getColorMaskBits()); |
| if (emulatedAlphaMask[colorIndexGL]) |
| { |
| colorMask &= ~VK_COLOR_COMPONENT_A_BIT; |
| } |
| const bool isOutputMasked = colorMask == 0 || state.isRasterizerDiscardEnabled(); |
| |
| if (isOutputMasked) |
| { |
| return hasFramebufferFetch ? vk::ResourceAccess::ReadOnly : vk::ResourceAccess::Unused; |
| } |
| |
| return vk::ResourceAccess::Write; |
| } |
| |
| vk::ResourceAccess GetDepthAccess(const gl::DepthStencilState &dsState, |
| UpdateDepthFeedbackLoopReason reason) |
| { |
| // Skip if depth/stencil not actually accessed. |
| if (reason == UpdateDepthFeedbackLoopReason::None) |
| { |
| return vk::ResourceAccess::Unused; |
| } |
| |
| // Note that clear commands don't respect depth test enable, only the mask |
| // Note Other state can be stated here too in the future, such as rasterizer discard. |
| if (!dsState.depthTest && reason != UpdateDepthFeedbackLoopReason::Clear) |
| { |
| return vk::ResourceAccess::Unused; |
| } |
| return dsState.isDepthMaskedOut() ? vk::ResourceAccess::ReadOnly : vk::ResourceAccess::Write; |
| } |
| |
| vk::ResourceAccess GetStencilAccess(const gl::DepthStencilState &dsState, |
| UpdateDepthFeedbackLoopReason reason) |
| { |
| // Skip if depth/stencil not actually accessed. |
| if (reason == UpdateDepthFeedbackLoopReason::None) |
| { |
| return vk::ResourceAccess::Unused; |
| } |
| |
| // Note that clear commands don't respect stencil test enable, only the mask |
| // Note Other state can be stated here too in the future, such as rasterizer discard. |
| if (!dsState.stencilTest && reason != UpdateDepthFeedbackLoopReason::Clear) |
| { |
| return vk::ResourceAccess::Unused; |
| } |
| |
| return dsState.isStencilNoOp() && dsState.isStencilBackNoOp() ? vk::ResourceAccess::ReadOnly |
| : vk::ResourceAccess::Write; |
| } |
| |
| egl::ContextPriority GetContextPriority(const gl::State &state) |
| { |
| return egl::FromEGLenum<egl::ContextPriority>(state.getContextPriority()); |
| } |
| |
| vk::ImageLayout GetImageReadLayout(TextureVk *textureVk, |
| const gl::ProgramExecutable &executable, |
| size_t textureUnit, |
| PipelineType pipelineType) |
| { |
| vk::ImageHelper &image = textureVk->getImage(); |
| |
| // If this texture has been bound as image and currently executable program accesses images, |
| // we consider this image's layout as writeable. |
| if (textureVk->hasBeenBoundAsImage() && executable.hasImages()) |
| { |
| return pipelineType == PipelineType::Compute ? vk::ImageLayout::ComputeShaderWrite |
| : vk::ImageLayout::AllGraphicsShadersWrite; |
| } |
| |
| gl::ShaderBitSet remainingShaderBits = |
| executable.getSamplerShaderBitsForTextureUnitIndex(textureUnit); |
| ASSERT(remainingShaderBits.any()); |
| gl::ShaderType firstShader = remainingShaderBits.first(); |
| gl::ShaderType lastShader = remainingShaderBits.last(); |
| remainingShaderBits.reset(firstShader); |
| remainingShaderBits.reset(lastShader); |
| |
| if (image.hasRenderPassUsageFlag(vk::RenderPassUsage::RenderTargetAttachment)) |
| { |
| // Right now we set this flag only when RenderTargetAttachment is set since we do |
| // not track all textures in the renderpass. |
| image.setRenderPassUsageFlag(vk::RenderPassUsage::TextureSampler); |
| |
| if (image.isDepthOrStencil()) |
| { |
| if (image.hasRenderPassUsageFlag(vk::RenderPassUsage::ReadOnlyAttachment)) |
| { |
| if (firstShader == gl::ShaderType::Fragment) |
| { |
| ASSERT(remainingShaderBits.none() && lastShader == firstShader); |
| return vk::ImageLayout::DSAttachmentReadAndFragmentShaderRead; |
| } |
| return vk::ImageLayout::DSAttachmentReadAndAllShadersRead; |
| } |
| |
| return firstShader == gl::ShaderType::Fragment |
| ? vk::ImageLayout::DSAttachmentWriteAndFragmentShaderRead |
| : vk::ImageLayout::DSAttachmentWriteAndAllShadersRead; |
| } |
| |
| return firstShader == gl::ShaderType::Fragment |
| ? vk::ImageLayout::ColorAttachmentAndFragmentShaderRead |
| : vk::ImageLayout::ColorAttachmentAndAllShadersRead; |
| } |
| |
| if (image.isDepthOrStencil()) |
| { |
| // We always use a depth-stencil read-only layout for any depth Textures to simplify |
| // our implementation's handling of depth-stencil read-only mode. We don't have to |
| // split a RenderPass to transition a depth texture from shader-read to read-only. |
| // This improves performance in Manhattan. Future optimizations are likely possible |
| // here including using specialized barriers without breaking the RenderPass. |
| if (firstShader == gl::ShaderType::Fragment) |
| { |
| ASSERT(remainingShaderBits.none() && lastShader == firstShader); |
| return vk::ImageLayout::DSAttachmentReadAndFragmentShaderRead; |
| } |
| return vk::ImageLayout::DSAttachmentReadAndAllShadersRead; |
| } |
| |
| // We barrier against either: |
| // - Vertex only |
| // - Fragment only |
| // - Pre-fragment only (vertex, geometry and tessellation together) |
| if (remainingShaderBits.any() || firstShader != lastShader) |
| { |
| return lastShader == gl::ShaderType::Fragment ? vk::ImageLayout::AllGraphicsShadersReadOnly |
| : vk::ImageLayout::PreFragmentShadersReadOnly; |
| } |
| |
| return kShaderReadOnlyImageLayouts[firstShader]; |
| } |
| |
| vk::ImageLayout GetImageWriteLayoutAndSubresource(const gl::ImageUnit &imageUnit, |
| vk::ImageHelper &image, |
| gl::ShaderBitSet shaderStages, |
| gl::LevelIndex *levelOut, |
| uint32_t *layerStartOut, |
| uint32_t *layerCountOut) |
| { |
| *levelOut = gl::LevelIndex(static_cast<uint32_t>(imageUnit.level)); |
| |
| *layerStartOut = 0; |
| *layerCountOut = image.getLayerCount(); |
| if (imageUnit.layered) |
| { |
| *layerStartOut = imageUnit.layered; |
| *layerCountOut = 1; |
| } |
| |
| gl::ShaderType firstShader = shaderStages.first(); |
| gl::ShaderType lastShader = shaderStages.last(); |
| shaderStages.reset(firstShader); |
| shaderStages.reset(lastShader); |
| // We barrier against either: |
| // - Vertex only |
| // - Fragment only |
| // - Pre-fragment only (vertex, geometry and tessellation together) |
| if (shaderStages.any() || firstShader != lastShader) |
| { |
| return lastShader == gl::ShaderType::Fragment ? vk::ImageLayout::AllGraphicsShadersWrite |
| : vk::ImageLayout::PreFragmentShadersWrite; |
| } |
| |
| return kShaderWriteImageLayouts[firstShader]; |
| } |
| |
| void OnTextureBufferRead(ContextVk *contextVk, |
| BufferVk *bufferVk, |
| gl::ShaderBitSet stages, |
| vk::CommandBufferHelperCommon *commandBufferHelper) |
| { |
| vk::BufferHelper &buffer = bufferVk->getBuffer(); |
| |
| ASSERT(stages.any()); |
| |
| // TODO: accept multiple stages in bufferRead. http://anglebug.com/3573 |
| for (gl::ShaderType stage : stages) |
| { |
| // Note: if another range of the same buffer is simultaneously used for storage, |
| // such as for transform feedback output, or SSBO, unnecessary barriers can be |
| // generated. |
| commandBufferHelper->bufferRead(contextVk, VK_ACCESS_SHADER_READ_BIT, |
| vk::GetPipelineStage(stage), &buffer); |
| } |
| } |
| |
| void OnImageBufferWrite(ContextVk *contextVk, |
| BufferVk *bufferVk, |
| gl::ShaderBitSet stages, |
| vk::CommandBufferHelperCommon *commandBufferHelper) |
| { |
| vk::BufferHelper &buffer = bufferVk->getBuffer(); |
| |
| // TODO: accept multiple stages in bufferWrite. http://anglebug.com/3573 |
| for (gl::ShaderType stage : stages) |
| { |
| commandBufferHelper->bufferWrite( |
| contextVk, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, |
| vk::GetPipelineStage(stage), vk::AliasingMode::Disallowed, &buffer); |
| } |
| } |
| |
| constexpr angle::PackedEnumMap<RenderPassClosureReason, const char *> kRenderPassClosureReason = {{ |
| {RenderPassClosureReason::AlreadySpecifiedElsewhere, nullptr}, |
| {RenderPassClosureReason::ContextDestruction, "Render pass closed due to context destruction"}, |
| {RenderPassClosureReason::ContextChange, "Render pass closed due to context change"}, |
| {RenderPassClosureReason::GLFlush, "Render pass closed due to glFlush()"}, |
| {RenderPassClosureReason::GLFinish, "Render pass closed due to glFinish()"}, |
| {RenderPassClosureReason::EGLSwapBuffers, "Render pass closed due to eglSwapBuffers()"}, |
| {RenderPassClosureReason::EGLWaitClient, "Render pass closed due to eglWaitClient()"}, |
| {RenderPassClosureReason::FramebufferBindingChange, |
| "Render pass closed due to framebuffer binding change"}, |
| {RenderPassClosureReason::FramebufferChange, "Render pass closed due to framebuffer change"}, |
| {RenderPassClosureReason::NewRenderPass, |
| "Render pass closed due to starting a new render pass"}, |
| {RenderPassClosureReason::BufferUseThenXfbWrite, |
| "Render pass closed due to buffer use as transform feedback output after prior use in render " |
| "pass"}, |
| {RenderPassClosureReason::XfbWriteThenVertexIndexBuffer, |
| "Render pass closed due to transform feedback buffer use as vertex/index input"}, |
| {RenderPassClosureReason::XfbWriteThenIndirectDrawBuffer, |
| "Render pass closed due to indirect draw buffer previously used as transform feedback output " |
| "in render pass"}, |
| {RenderPassClosureReason::XfbResumeAfterDrawBasedClear, |
| "Render pass closed due to transform feedback resume after clear through draw"}, |
| {RenderPassClosureReason::DepthStencilUseInFeedbackLoop, |
| "Render pass closed due to depth/stencil attachment use under feedback loop"}, |
| {RenderPassClosureReason::DepthStencilWriteAfterFeedbackLoop, |
| "Render pass closed due to depth/stencil attachment write after feedback loop"}, |
| {RenderPassClosureReason::PipelineBindWhileXfbActive, |
| "Render pass closed due to graphics pipeline change while transform feedback is active"}, |
| {RenderPassClosureReason::BufferWriteThenMap, |
| "Render pass closed due to mapping buffer being written to by said render pass"}, |
| {RenderPassClosureReason::BufferUseThenOutOfRPRead, |
| "Render pass closed due to non-render-pass read of buffer that was written to in render pass"}, |
| {RenderPassClosureReason::BufferUseThenOutOfRPWrite, |
| "Render pass closed due to non-render-pass write of buffer that was used in render pass"}, |
| {RenderPassClosureReason::ImageUseThenOutOfRPRead, |
| "Render pass closed due to non-render-pass read of image that was used in render pass"}, |
| {RenderPassClosureReason::ImageUseThenOutOfRPWrite, |
| "Render pass closed due to non-render-pass write of image that was used in render pass"}, |
| {RenderPassClosureReason::XfbWriteThenComputeRead, |
| "Render pass closed due to compute read of buffer previously used as transform feedback " |
| "output in render pass"}, |
| {RenderPassClosureReason::XfbWriteThenIndirectDispatchBuffer, |
| "Render pass closed due to indirect dispatch buffer previously used as transform feedback " |
| "output in render pass"}, |
| {RenderPassClosureReason::ImageAttachmentThenComputeRead, |
| "Render pass closed due to compute read of image previously used as framebuffer attachment in " |
| "render pass"}, |
| {RenderPassClosureReason::GetQueryResult, "Render pass closed due to getting query result"}, |
| {RenderPassClosureReason::BeginNonRenderPassQuery, |
| "Render pass closed due to non-render-pass query begin"}, |
| {RenderPassClosureReason::EndNonRenderPassQuery, |
| "Render pass closed due to non-render-pass query end"}, |
| {RenderPassClosureReason::TimestampQuery, "Render pass closed due to timestamp query"}, |
| {RenderPassClosureReason::GLReadPixels, "Render pass closed due to glReadPixels()"}, |
| {RenderPassClosureReason::BufferUseThenReleaseToExternal, |
| "Render pass closed due to buffer (used by render pass) release to external"}, |
| {RenderPassClosureReason::ImageUseThenReleaseToExternal, |
| "Render pass closed due to image (used by render pass) release to external"}, |
| {RenderPassClosureReason::BufferInUseWhenSynchronizedMap, |
| "Render pass closed due to mapping buffer in use by GPU without GL_MAP_UNSYNCHRONIZED_BIT"}, |
| {RenderPassClosureReason::ImageOrphan, "Render pass closed due to EGL image being orphaned"}, |
| {RenderPassClosureReason::GLMemoryBarrierThenStorageResource, |
| "Render pass closed due to glMemoryBarrier before storage output in render pass"}, |
| {RenderPassClosureReason::StorageResourceUseThenGLMemoryBarrier, |
| "Render pass closed due to glMemoryBarrier after storage output in render pass"}, |
| {RenderPassClosureReason::ExternalSemaphoreSignal, |
| "Render pass closed due to external semaphore signal"}, |
| {RenderPassClosureReason::SyncObjectInit, "Render pass closed due to sync object insertion"}, |
| {RenderPassClosureReason::SyncObjectWithFdInit, |
| "Render pass closed due to sync object with fd insertion"}, |
| {RenderPassClosureReason::SyncObjectClientWait, |
| "Render pass closed due to sync object client wait"}, |
| {RenderPassClosureReason::SyncObjectServerWait, |
| "Render pass closed due to sync object server wait"}, |
| {RenderPassClosureReason::XfbPause, "Render pass closed due to transform feedback pause"}, |
| {RenderPassClosureReason::FramebufferFetchEmulation, |
| "Render pass closed due to framebuffer fetch emulation"}, |
| {RenderPassClosureReason::ColorBufferInvalidate, |
| "Render pass closed due to glInvalidateFramebuffer() on a color buffer"}, |
| {RenderPassClosureReason::GenerateMipmapOnCPU, |
| "Render pass closed due to fallback to CPU when generating mipmaps"}, |
| {RenderPassClosureReason::CopyTextureOnCPU, |
| "Render pass closed due to fallback to CPU when copying texture"}, |
| {RenderPassClosureReason::TextureReformatToRenderable, |
| "Render pass closed due to reformatting texture to a renderable fallback"}, |
| {RenderPassClosureReason::DeviceLocalBufferMap, |
| "Render pass closed due to mapping device local buffer"}, |
| {RenderPassClosureReason::PrepareForBlit, "Render pass closed prior to draw-based blit"}, |
| {RenderPassClosureReason::PrepareForImageCopy, |
| "Render pass closed prior to draw-based image copy"}, |
| {RenderPassClosureReason::TemporaryForImageClear, |
| "Temporary render pass used for image clear closed"}, |
| {RenderPassClosureReason::TemporaryForImageCopy, |
| "Temporary render pass used for image copy closed"}, |
| {RenderPassClosureReason::TemporaryForOverlayDraw, |
| "Temporary render pass used for overlay draw closed"}, |
| }}; |
| |
| VkDependencyFlags GetLocalDependencyFlags(ContextVk *contextVk) |
| { |
| VkDependencyFlags dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; |
| if (contextVk->getCurrentViewCount() > 0) |
| { |
| dependencyFlags |= VK_DEPENDENCY_VIEW_LOCAL_BIT; |
| } |
| return dependencyFlags; |
| } |
| |
| void DumpPipelineCacheGraph(const std::ostringstream &graph) |
| { |
| std::ostream &out = std::cout; |
| |
| out << "digraph {\n" |
| << " node [shape=point]\n"; |
| out << graph.str(); |
| out << "}\n"; |
| } |
| |
| } // anonymous namespace |
| |
| // Not necessary once upgraded to C++17. |
| constexpr ContextVk::DirtyBits ContextVk::kIndexAndVertexDirtyBits; |
| constexpr ContextVk::DirtyBits ContextVk::kPipelineDescAndBindingDirtyBits; |
| constexpr ContextVk::DirtyBits ContextVk::kTexturesAndDescSetDirtyBits; |
| constexpr ContextVk::DirtyBits ContextVk::kResourcesAndDescSetDirtyBits; |
| constexpr ContextVk::DirtyBits ContextVk::kXfbBuffersAndDescSetDirtyBits; |
| constexpr ContextVk::DirtyBits ContextVk::kDriverUniformsAndBindingDirtyBits; |
| |
| void ContextVk::flushDescriptorSetUpdates() |
| { |
| mPerfCounters.writeDescriptorSets += |
| mUpdateDescriptorSetsBuilder.flushDescriptorSetUpdates(getDevice()); |
| } |
| |
| ANGLE_INLINE void ContextVk::onRenderPassFinished(RenderPassClosureReason reason) |
| { |
| pauseRenderPassQueriesIfActive(); |
| |
| if (mRenderPassCommandBuffer != nullptr) |
| { |
| // If reason is specified, add it to the command buffer right before ending the render pass, |
| // so it will show up in GPU debuggers. |
| const char *reasonText = kRenderPassClosureReason[reason]; |
| if (reasonText) |
| { |
| insertEventMarkerImpl(GL_DEBUG_SOURCE_API, reasonText); |
| } |
| } |
| |
| mRenderPassCommandBuffer = nullptr; |
| mGraphicsDirtyBits.set(DIRTY_BIT_RENDER_PASS); |
| } |
| |
| ContextVk::DriverUniformsDescriptorSet::DriverUniformsDescriptorSet() |
| : descriptorSet(VK_NULL_HANDLE), currentBuffer(nullptr) |
| {} |
| |
| ContextVk::DriverUniformsDescriptorSet::~DriverUniformsDescriptorSet() = default; |
| |
| void ContextVk::DriverUniformsDescriptorSet::init(RendererVk *rendererVk) |
| { |
| size_t minAlignment = static_cast<size_t>( |
| rendererVk->getPhysicalDeviceProperties().limits.minUniformBufferOffsetAlignment); |
| dynamicBuffer.init(rendererVk, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, minAlignment, |
| kDriverUniformsAllocatorPageSize, true); |
| } |
| |
| void ContextVk::DriverUniformsDescriptorSet::destroy(RendererVk *renderer) |
| { |
| descriptorSetLayout.reset(); |
| descriptorPoolBinding.reset(); |
| dynamicBuffer.destroy(renderer); |
| } |
| |
| // ContextVk implementation. |
| ContextVk::ContextVk(const gl::State &state, gl::ErrorSet *errorSet, RendererVk *renderer) |
| : ContextImpl(state, errorSet), |
| vk::Context(renderer), |
| mGraphicsDirtyBitHandlers{}, |
| mComputeDirtyBitHandlers{}, |
| mRenderPassCommandBuffer(nullptr), |
| mCurrentGraphicsPipeline(nullptr), |
| mCurrentComputePipeline(nullptr), |
| mCurrentDrawMode(gl::PrimitiveMode::InvalidEnum), |
| mCurrentWindowSurface(nullptr), |
| mCurrentRotationDrawFramebuffer(SurfaceRotation::Identity), |
| mCurrentRotationReadFramebuffer(SurfaceRotation::Identity), |
| mActiveRenderPassQueries{}, |
| mLastIndexBufferOffset(nullptr), |
| mCurrentIndexBufferOffset(0), |
| mCurrentDrawElementsType(gl::DrawElementsType::InvalidEnum), |
| mXfbBaseVertex(0), |
| mXfbVertexCountPerInstance(0), |
| mClearColorValue{}, |
| mClearDepthStencilValue{}, |
| mClearColorMasks(0), |
| mFlipYForCurrentSurface(false), |
| mFlipViewportForDrawFramebuffer(false), |
| mFlipViewportForReadFramebuffer(false), |
| mIsAnyHostVisibleBufferWritten(false), |
| mEmulateSeamfulCubeMapSampling(false), |
| mOutsideRenderPassCommands(nullptr), |
| mRenderPassCommands(nullptr), |
| mQueryEventType(GraphicsEventCmdBuf::NotInQueryCmd), |
| mGpuEventsEnabled(false), |
| mPrimaryBufferEventCounter(0), |
| mHasDeferredFlush(false), |
| mHasAnyCommandsPendingSubmission(false), |
| mTotalBufferToImageCopySize(0), |
| mGpuClockSync{std::numeric_limits<double>::max(), std::numeric_limits<double>::max()}, |
| mGpuEventTimestampOrigin(0), |
| mContextPriority(renderer->getDriverPriority(GetContextPriority(state))), |
| mShareGroupVk(vk::GetImpl(state.getShareGroup())) |
| { |
| ANGLE_TRACE_EVENT0("gpu.angle", "ContextVk::ContextVk"); |
| memset(&mClearColorValue, 0, sizeof(mClearColorValue)); |
| memset(&mClearDepthStencilValue, 0, sizeof(mClearDepthStencilValue)); |
| memset(&mViewport, 0, sizeof(mViewport)); |
| memset(&mScissor, 0, sizeof(mScissor)); |
| |
| // Ensure viewport is within Vulkan requirements |
| vk::ClampViewport(&mViewport); |
| |
| mNonIndexedDirtyBitsMask.set(); |
| mNonIndexedDirtyBitsMask.reset(DIRTY_BIT_INDEX_BUFFER); |
| |
| mIndexedDirtyBitsMask.set(); |
| |
| // Once a command buffer is ended, all bindings (through |vkCmdBind*| calls) are lost per Vulkan |
| // spec. Once a new command buffer is allocated, we must make sure every previously bound |
| // resource is bound again. |
| // |
| // Note that currently these dirty bits are set every time a new render pass command buffer is |
| // begun. However, using ANGLE's SecondaryCommandBuffer, the Vulkan command buffer (which is |
| // the primary command buffer) is not ended, so technically we don't need to rebind these. |
| mNewGraphicsCommandBufferDirtyBits = DirtyBits{ |
| DIRTY_BIT_RENDER_PASS, |
| DIRTY_BIT_COLOR_ACCESS, |
| DIRTY_BIT_DEPTH_STENCIL_ACCESS, |
| DIRTY_BIT_PIPELINE_BINDING, |
| DIRTY_BIT_TEXTURES, |
| DIRTY_BIT_VERTEX_BUFFERS, |
| DIRTY_BIT_INDEX_BUFFER, |
| DIRTY_BIT_SHADER_RESOURCES, |
| DIRTY_BIT_DESCRIPTOR_SETS, |
| DIRTY_BIT_DRIVER_UNIFORMS_BINDING, |
| }; |
| if (getFeatures().supportsTransformFeedbackExtension.enabled) |
| { |
| mNewGraphicsCommandBufferDirtyBits.set(DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS); |
| } |
| |
| mNewComputeCommandBufferDirtyBits = |
| DirtyBits{DIRTY_BIT_PIPELINE_BINDING, DIRTY_BIT_TEXTURES, DIRTY_BIT_SHADER_RESOURCES, |
| DIRTY_BIT_DESCRIPTOR_SETS, DIRTY_BIT_DRIVER_UNIFORMS_BINDING}; |
| |
| mDynamicStateDirtyBits = DirtyBits{ |
| DIRTY_BIT_DYNAMIC_VIEWPORT, DIRTY_BIT_DYNAMIC_SCISSOR, |
| DIRTY_BIT_DYNAMIC_LINE_WIDTH, DIRTY_BIT_DYNAMIC_DEPTH_BIAS, |
| DIRTY_BIT_DYNAMIC_BLEND_CONSTANTS, DIRTY_BIT_DYNAMIC_STENCIL_COMPARE_MASK, |
| DIRTY_BIT_DYNAMIC_STENCIL_WRITE_MASK, DIRTY_BIT_DYNAMIC_STENCIL_REFERENCE, |
| }; |
| if (getFeatures().supportsExtendedDynamicState.enabled) |
| { |
| mDynamicStateDirtyBits |= DirtyBits{ |
| DIRTY_BIT_DYNAMIC_CULL_MODE, |
| DIRTY_BIT_DYNAMIC_FRONT_FACE, |
| DIRTY_BIT_VERTEX_BUFFERS, |
| DIRTY_BIT_DYNAMIC_DEPTH_TEST_ENABLE, |
| DIRTY_BIT_DYNAMIC_DEPTH_WRITE_ENABLE, |
| DIRTY_BIT_DYNAMIC_DEPTH_COMPARE_OP, |
| DIRTY_BIT_DYNAMIC_STENCIL_TEST_ENABLE, |
| DIRTY_BIT_DYNAMIC_STENCIL_OP, |
| }; |
| } |
| if (getFeatures().supportsExtendedDynamicState2.enabled) |
| { |
| mDynamicStateDirtyBits |= DirtyBits{ |
| DIRTY_BIT_DYNAMIC_RASTERIZER_DISCARD_ENABLE, |
| DIRTY_BIT_DYNAMIC_DEPTH_BIAS_ENABLE, |
| DIRTY_BIT_DYNAMIC_PRIMITIVE_RESTART_ENABLE, |
| }; |
| } |
| if (getFeatures().supportsFragmentShadingRate.enabled) |
| { |
| mDynamicStateDirtyBits.set(DIRTY_BIT_DYNAMIC_FRAGMENT_SHADING_RATE); |
| } |
| |
| mNewGraphicsCommandBufferDirtyBits |= mDynamicStateDirtyBits; |
| |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_MEMORY_BARRIER] = |
| &ContextVk::handleDirtyGraphicsMemoryBarrier; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DEFAULT_ATTRIBS] = |
| &ContextVk::handleDirtyGraphicsDefaultAttribs; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_PIPELINE_DESC] = |
| &ContextVk::handleDirtyGraphicsPipelineDesc; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_READ_ONLY_DEPTH_FEEDBACK_LOOP_MODE] = |
| &ContextVk::handleDirtyGraphicsReadOnlyDepthFeedbackLoopMode; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_RENDER_PASS] = &ContextVk::handleDirtyGraphicsRenderPass; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_EVENT_LOG] = &ContextVk::handleDirtyGraphicsEventLog; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_COLOR_ACCESS] = &ContextVk::handleDirtyGraphicsColorAccess; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DEPTH_STENCIL_ACCESS] = |
| &ContextVk::handleDirtyGraphicsDepthStencilAccess; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_PIPELINE_BINDING] = |
| &ContextVk::handleDirtyGraphicsPipelineBinding; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_TEXTURES] = &ContextVk::handleDirtyGraphicsTextures; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_VERTEX_BUFFERS] = |
| &ContextVk::handleDirtyGraphicsVertexBuffers; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_INDEX_BUFFER] = &ContextVk::handleDirtyGraphicsIndexBuffer; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_UNIFORMS] = &ContextVk::handleDirtyGraphicsUniforms; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DRIVER_UNIFORMS] = |
| &ContextVk::handleDirtyGraphicsDriverUniforms; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DRIVER_UNIFORMS_BINDING] = |
| &ContextVk::handleDirtyGraphicsDriverUniformsBinding; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_SHADER_RESOURCES] = |
| &ContextVk::handleDirtyGraphicsShaderResources; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_FRAMEBUFFER_FETCH_BARRIER] = |
| &ContextVk::handleDirtyGraphicsFramebufferFetchBarrier; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_BLEND_BARRIER] = |
| &ContextVk::handleDirtyGraphicsBlendBarrier; |
| if (getFeatures().supportsTransformFeedbackExtension.enabled) |
| { |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS] = |
| &ContextVk::handleDirtyGraphicsTransformFeedbackBuffersExtension; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME] = |
| &ContextVk::handleDirtyGraphicsTransformFeedbackResume; |
| } |
| else if (getFeatures().emulateTransformFeedback.enabled) |
| { |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS] = |
| &ContextVk::handleDirtyGraphicsTransformFeedbackBuffersEmulation; |
| } |
| |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DESCRIPTOR_SETS] = |
| &ContextVk::handleDirtyGraphicsDescriptorSets; |
| |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_VIEWPORT] = |
| &ContextVk::handleDirtyGraphicsDynamicViewport; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_SCISSOR] = |
| &ContextVk::handleDirtyGraphicsDynamicScissor; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_LINE_WIDTH] = |
| &ContextVk::handleDirtyGraphicsDynamicLineWidth; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_DEPTH_BIAS] = |
| &ContextVk::handleDirtyGraphicsDynamicDepthBias; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_BLEND_CONSTANTS] = |
| &ContextVk::handleDirtyGraphicsDynamicBlendConstants; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_STENCIL_COMPARE_MASK] = |
| &ContextVk::handleDirtyGraphicsDynamicStencilCompareMask; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_STENCIL_WRITE_MASK] = |
| &ContextVk::handleDirtyGraphicsDynamicStencilWriteMask; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_STENCIL_REFERENCE] = |
| &ContextVk::handleDirtyGraphicsDynamicStencilReference; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_CULL_MODE] = |
| &ContextVk::handleDirtyGraphicsDynamicCullMode; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_FRONT_FACE] = |
| &ContextVk::handleDirtyGraphicsDynamicFrontFace; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_DEPTH_TEST_ENABLE] = |
| &ContextVk::handleDirtyGraphicsDynamicDepthTestEnable; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_DEPTH_WRITE_ENABLE] = |
| &ContextVk::handleDirtyGraphicsDynamicDepthWriteEnable; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_DEPTH_COMPARE_OP] = |
| &ContextVk::handleDirtyGraphicsDynamicDepthCompareOp; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_STENCIL_TEST_ENABLE] = |
| &ContextVk::handleDirtyGraphicsDynamicStencilTestEnable; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_STENCIL_OP] = |
| &ContextVk::handleDirtyGraphicsDynamicStencilOp; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_RASTERIZER_DISCARD_ENABLE] = |
| &ContextVk::handleDirtyGraphicsDynamicRasterizerDiscardEnable; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_DEPTH_BIAS_ENABLE] = |
| &ContextVk::handleDirtyGraphicsDynamicDepthBiasEnable; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_PRIMITIVE_RESTART_ENABLE] = |
| &ContextVk::handleDirtyGraphicsDynamicPrimitiveRestartEnable; |
| mGraphicsDirtyBitHandlers[DIRTY_BIT_DYNAMIC_FRAGMENT_SHADING_RATE] = |
| &ContextVk::handleDirtyGraphicsDynamicFragmentShadingRate; |
| |
| mComputeDirtyBitHandlers[DIRTY_BIT_MEMORY_BARRIER] = |
| &ContextVk::handleDirtyComputeMemoryBarrier; |
| mComputeDirtyBitHandlers[DIRTY_BIT_EVENT_LOG] = &ContextVk::handleDirtyComputeEventLog; |
| mComputeDirtyBitHandlers[DIRTY_BIT_PIPELINE_DESC] = &ContextVk::handleDirtyComputePipelineDesc; |
| mComputeDirtyBitHandlers[DIRTY_BIT_PIPELINE_BINDING] = |
| &ContextVk::handleDirtyComputePipelineBinding; |
| mComputeDirtyBitHandlers[DIRTY_BIT_TEXTURES] = &ContextVk::handleDirtyComputeTextures; |
| mComputeDirtyBitHandlers[DIRTY_BIT_UNIFORMS] = &ContextVk::handleDirtyComputeUniforms; |
| mComputeDirtyBitHandlers[DIRTY_BIT_DRIVER_UNIFORMS] = |
| &ContextVk::handleDirtyComputeDriverUniforms; |
| mComputeDirtyBitHandlers[DIRTY_BIT_DRIVER_UNIFORMS_BINDING] = |
| &ContextVk::handleDirtyComputeDriverUniformsBinding; |
| mComputeDirtyBitHandlers[DIRTY_BIT_SHADER_RESOURCES] = |
| &ContextVk::handleDirtyComputeShaderResources; |
| mComputeDirtyBitHandlers[DIRTY_BIT_DESCRIPTOR_SETS] = |
| &ContextVk::handleDirtyComputeDescriptorSets; |
| |
| mGraphicsDirtyBits = mNewGraphicsCommandBufferDirtyBits; |
| mComputeDirtyBits = mNewComputeCommandBufferDirtyBits; |
| |
| mActiveImages.fill(nullptr); |
| |
| // The following dirty bits don't affect the program pipeline: |
| // |
| // - READ_FRAMEBUFFER_BINDING only affects operations that read from said framebuffer, |
| // - CLEAR_* only affect following clear calls, |
| // - PACK/UNPACK_STATE only affect texture data upload/download, |
| // - *_BINDING only affect descriptor sets. |
| // |
| // Additionally, state that is set dynamically doesn't invalidate the program pipeline. |
| // |
| mPipelineDirtyBitsMask.set(); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_READ_FRAMEBUFFER_BINDING); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_CLEAR_COLOR); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_CLEAR_DEPTH); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_CLEAR_STENCIL); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_UNPACK_STATE); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_UNPACK_BUFFER_BINDING); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_PACK_STATE); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_PACK_BUFFER_BINDING); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_RENDERBUFFER_BINDING); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_DRAW_INDIRECT_BUFFER_BINDING); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_DISPATCH_INDIRECT_BUFFER_BINDING); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_SAMPLER_BINDINGS); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_TEXTURE_BINDINGS); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_IMAGE_BINDINGS); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_TRANSFORM_FEEDBACK_BINDING); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_UNIFORM_BUFFER_BINDINGS); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_SHADER_STORAGE_BUFFER_BINDING); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING); |
| |
| // Dynamic state in core Vulkan 1.0: |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_VIEWPORT); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_SCISSOR_TEST_ENABLED); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_SCISSOR); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_LINE_WIDTH); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_POLYGON_OFFSET); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_BLEND_COLOR); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_STENCIL_WRITEMASK_FRONT); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_STENCIL_WRITEMASK_BACK); |
| |
| // Dynamic state in VK_EXT_extended_dynamic_state: |
| if (getFeatures().supportsExtendedDynamicState.enabled) |
| { |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_CULL_FACE_ENABLED); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_CULL_FACE); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_FRONT_FACE); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_DEPTH_TEST_ENABLED); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_DEPTH_MASK); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_DEPTH_FUNC); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_STENCIL_TEST_ENABLED); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_STENCIL_FUNCS_FRONT); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_STENCIL_FUNCS_BACK); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_STENCIL_OPS_FRONT); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_STENCIL_OPS_BACK); |
| } |
| |
| if (getFeatures().supportsExtendedDynamicState2.enabled) |
| { |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_RASTERIZER_DISCARD_ENABLED); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_POLYGON_OFFSET_FILL_ENABLED); |
| mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_PRIMITIVE_RESTART_ENABLED); |
| } |
| |
| angle::PerfMonitorCounterGroup vulkanGroup; |
| vulkanGroup.name = "vulkan"; |
| |
| #define ANGLE_ADD_PERF_MONITOR_COUNTER_GROUP(COUNTER) \ |
| { \ |
| angle::PerfMonitorCounter counter; \ |
| counter.name = #COUNTER; \ |
| counter.value = 0; \ |
| vulkanGroup.counters.push_back(counter); \ |
| } |
| |
| ANGLE_VK_PERF_COUNTERS_X(ANGLE_ADD_PERF_MONITOR_COUNTER_GROUP) |
| |
| #undef ANGLE_ADD_PERF_MONITOR_COUNTER_GROUP |
| |
| mPerfMonitorCounters.push_back(vulkanGroup); |
| } |
| |
| ContextVk::~ContextVk() |
| { |
| if (!mPipelineCacheGraph.str().empty()) |
| { |
| DumpPipelineCacheGraph(mPipelineCacheGraph); |
| } |
| } |
| |
| void ContextVk::onDestroy(const gl::Context *context) |
| { |
| // Remove context from the share group |
| mShareGroupVk->removeContext(this); |
| |
| // This will not destroy any resources. It will release them to be collected after finish. |
| mIncompleteTextures.onDestroy(context); |
| |
| // Flush and complete current outstanding work before destruction. |
| (void)finishImpl(RenderPassClosureReason::ContextDestruction); |
| |
| VkDevice device = getDevice(); |
| |
| for (DriverUniformsDescriptorSet &driverUniforms : mDriverUniforms) |
| { |
| driverUniforms.destroy(mRenderer); |
| } |
| |
| for (vk::DynamicDescriptorPool &dynamicDescriptorPool : mDriverUniformsDescriptorPools) |
| { |
| dynamicDescriptorPool.destroy(mRenderer, VulkanCacheType::DriverUniformsDescriptors); |
| } |
| |
| mDefaultUniformStorage.release(mRenderer); |
| mEmptyBuffer.release(mRenderer); |
| |
| for (vk::DynamicBuffer &defaultBuffer : mStreamedVertexBuffers) |
| { |
| defaultBuffer.destroy(mRenderer); |
| } |
| |
| for (vk::DynamicQueryPool &queryPool : mQueryPools) |
| { |
| queryPool.destroy(device); |
| } |
| |
| // Recycle current commands buffers. |
| mRenderer->recycleOutsideRenderPassCommandBufferHelper(device, &mOutsideRenderPassCommands); |
| mRenderer->recycleRenderPassCommandBufferHelper(device, &mRenderPassCommands); |
| |
| mUtils.destroy(mRenderer); |
| |
| mRenderPassCache.destroy(mRenderer); |
| mShaderLibrary.destroy(device); |
| mGpuEventQueryPool.destroy(device); |
| mCommandPools.outsideRenderPassPool.destroy(device); |
| mCommandPools.renderPassPool.destroy(device); |
| |
| ASSERT(mCurrentGarbage.empty()); |
| } |
| |
| VertexArrayVk *ContextVk::getVertexArray() const |
| { |
| return vk::GetImpl(mState.getVertexArray()); |
| } |
| |
| FramebufferVk *ContextVk::getDrawFramebuffer() const |
| { |
| return vk::GetImpl(mState.getDrawFramebuffer()); |
| } |
| |
| ProgramVk *ContextVk::getProgram() const |
| { |
| return vk::SafeGetImpl(mState.getProgram()); |
| } |
| |
| ProgramPipelineVk *ContextVk::getProgramPipeline() const |
| { |
| return vk::SafeGetImpl(mState.getProgramPipeline()); |
| } |
| |
| angle::Result ContextVk::getIncompleteTexture(const gl::Context *context, |
| gl::TextureType type, |
| gl::SamplerFormat format, |
| gl::Texture **textureOut) |
| { |
| return mIncompleteTextures.getIncompleteTexture(context, type, format, this, textureOut); |
| } |
| |
| angle::Result ContextVk::initialize() |
| { |
| ANGLE_TRACE_EVENT0("gpu.angle", "ContextVk::initialize"); |
| |
| ANGLE_TRY(mQueryPools[gl::QueryType::AnySamples].init(this, VK_QUERY_TYPE_OCCLUSION, |
| vk::kDefaultOcclusionQueryPoolSize)); |
| ANGLE_TRY(mQueryPools[gl::QueryType::AnySamplesConservative].init( |
| this, VK_QUERY_TYPE_OCCLUSION, vk::kDefaultOcclusionQueryPoolSize)); |
| |
| // Only initialize the timestamp query pools if the extension is available. |
| if (mRenderer->getQueueFamilyProperties().timestampValidBits > 0) |
| { |
| ANGLE_TRY(mQueryPools[gl::QueryType::Timestamp].init(this, VK_QUERY_TYPE_TIMESTAMP, |
| vk::kDefaultTimestampQueryPoolSize)); |
| ANGLE_TRY(mQueryPools[gl::QueryType::TimeElapsed].init(this, VK_QUERY_TYPE_TIMESTAMP, |
| vk::kDefaultTimestampQueryPoolSize)); |
| } |
| |
| if (getFeatures().supportsTransformFeedbackExtension.enabled) |
| { |
| ANGLE_TRY(mQueryPools[gl::QueryType::TransformFeedbackPrimitivesWritten].init( |
| this, VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT, |
| vk::kDefaultTransformFeedbackQueryPoolSize)); |
| } |
| |
| // The primitives generated query is provided through the Vulkan pipeline statistics query if |
| // supported. TODO: If VK_EXT_primitives_generated_query is supported, use that instead. |
| // http://anglebug.com/5430 |
| if (getFeatures().supportsPipelineStatisticsQuery.enabled) |
| { |
| ANGLE_TRY(mQueryPools[gl::QueryType::PrimitivesGenerated].init( |
| this, VK_QUERY_TYPE_PIPELINE_STATISTICS, vk::kDefaultPrimitivesGeneratedQueryPoolSize)); |
| } |
| |
| // Init GLES to Vulkan index type map. |
| initIndexTypeMap(); |
| |
| // Init driver uniforms and get the descriptor set layouts. |
| for (PipelineType pipeline : angle::AllEnums<PipelineType>()) |
| { |
| mDriverUniforms[pipeline].init(mRenderer); |
| |
| vk::DescriptorSetLayoutDesc desc = getDriverUniformsDescriptorSetDesc(); |
| ANGLE_TRY(getDescriptorSetLayoutCache().getDescriptorSetLayout( |
| this, desc, &mDriverUniforms[pipeline].descriptorSetLayout)); |
| |
| vk::DescriptorSetLayoutBindingVector bindingVector; |
| std::vector<VkSampler> immutableSamplers; |
| desc.unpackBindings(&bindingVector, &immutableSamplers); |
| std::vector<VkDescriptorPoolSize> descriptorPoolSizes; |
| |
| for (const VkDescriptorSetLayoutBinding &binding : bindingVector) |
| { |
| if (binding.descriptorCount > 0) |
| { |
| VkDescriptorPoolSize poolSize = {}; |
| |
| poolSize.type = binding.descriptorType; |
| poolSize.descriptorCount = binding.descriptorCount; |
| descriptorPoolSizes.emplace_back(poolSize); |
| } |
| } |
| if (!descriptorPoolSizes.empty()) |
| { |
| ANGLE_TRY(mDriverUniformsDescriptorPools[pipeline].init( |
| this, descriptorPoolSizes.data(), descriptorPoolSizes.size(), |
| mDriverUniforms[pipeline].descriptorSetLayout.get().getHandle())); |
| } |
| } |
| |
| mGraphicsPipelineDesc.reset(new vk::GraphicsPipelineDesc()); |
| mGraphicsPipelineDesc->initDefaults(this); |
| |
| // Initialize current value/default attribute buffers. |
| for (vk::DynamicBuffer &buffer : mStreamedVertexBuffers) |
| { |
| buffer.init(mRenderer, kVertexBufferUsage, 1, kDynamicVertexDataSize, true); |
| } |
| |
| #if ANGLE_ENABLE_VULKAN_GPU_TRACE_EVENTS |
| angle::PlatformMethods *platform = ANGLEPlatformCurrent(); |
| ASSERT(platform); |
| |
| // GPU tracing workaround for anglebug.com/2927. The renderer should not emit gpu events |
| // during platform discovery. |
| const unsigned char *gpuEventsEnabled = |
| platform->getTraceCategoryEnabledFlag(platform, "gpu.angle.gpu"); |
| mGpuEventsEnabled = gpuEventsEnabled && *gpuEventsEnabled; |
| #endif |
| |
| mEmulateSeamfulCubeMapSampling = shouldEmulateSeamfulCubeMapSampling(); |
| |
| // Assign initial command buffers from queue |
| ANGLE_TRY(vk::OutsideRenderPassCommandBuffer::InitializeCommandPool( |
| this, &mCommandPools.outsideRenderPassPool, mRenderer->getDeviceQueueIndex(), |
| hasProtectedContent())); |
| ANGLE_TRY(vk::RenderPassCommandBuffer::InitializeCommandPool( |
| this, &mCommandPools.renderPassPool, mRenderer->getDeviceQueueIndex(), |
| hasProtectedContent())); |
| ANGLE_TRY(mRenderer->getOutsideRenderPassCommandBufferHelper( |
| this, &mCommandPools.outsideRenderPassPool, &mOutsideRenderPassCommands)); |
| ANGLE_TRY(mRenderer->getRenderPassCommandBufferHelper(this, &mCommandPools.renderPassPool, |
| &mRenderPassCommands)); |
| |
| if (mGpuEventsEnabled) |
| { |
| // GPU events should only be available if timestamp queries are available. |
| ASSERT(mRenderer->getQueueFamilyProperties().timestampValidBits > 0); |
| // Calculate the difference between CPU and GPU clocks for GPU event reporting. |
| ANGLE_TRY(mGpuEventQueryPool.init(this, VK_QUERY_TYPE_TIMESTAMP, |
| vk::kDefaultTimestampQueryPoolSize)); |
| ANGLE_TRY(synchronizeCpuGpuTime()); |
| |
| EventName eventName = GetTraceEventName("Primary", mPrimaryBufferEventCounter); |
| ANGLE_TRY(traceGpuEvent(&mOutsideRenderPassCommands->getCommandBuffer(), |
| TRACE_EVENT_PHASE_BEGIN, eventName)); |
| } |
| |
| size_t minAlignment = static_cast<size_t>( |
| mRenderer->getPhysicalDeviceProperties().limits.minUniformBufferOffsetAlignment); |
| mDefaultUniformStorage.init(mRenderer, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, minAlignment, |
| mRenderer->getDefaultUniformBufferSize(), true); |
| |
| // Initialize an "empty" buffer for use with default uniform blocks where there are no uniforms, |
| // or atomic counter buffer array indices that are unused. |
| constexpr VkBufferUsageFlags kEmptyBufferUsage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | |
| VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | |
| VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; |
| VkBufferCreateInfo emptyBufferInfo = {}; |
| emptyBufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; |
| emptyBufferInfo.flags = 0; |
| emptyBufferInfo.size = 16; |
| emptyBufferInfo.usage = kEmptyBufferUsage; |
| emptyBufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; |
| emptyBufferInfo.queueFamilyIndexCount = 0; |
| emptyBufferInfo.pQueueFamilyIndices = nullptr; |
| constexpr VkMemoryPropertyFlags kMemoryType = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; |
| ANGLE_TRY(mEmptyBuffer.init(this, emptyBufferInfo, kMemoryType)); |
| |
| // Add context into the share group |
| mShareGroupVk->addContext(this); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::flush(const gl::Context *context) |
| { |
| // Skip the flush if there's nothing recorded. |
| if (!mHasAnyCommandsPendingSubmission && !hasStartedRenderPass() && |
| mOutsideRenderPassCommands->empty()) |
| { |
| return angle::Result::Continue; |
| } |
| |
| const bool isSingleBuffer = |
| (mCurrentWindowSurface != nullptr) && mCurrentWindowSurface->isSharedPresentMode(); |
| |
| // Don't defer flushes in single-buffer mode. In this mode, the application is not required to |
| // call eglSwapBuffers(), and glFlush() is expected to ensure that work is submitted. |
| if (mRenderer->getFeatures().deferFlushUntilEndRenderPass.enabled && hasStartedRenderPass() && |
| !isSingleBuffer) |
| { |
| mHasDeferredFlush = true; |
| return angle::Result::Continue; |
| } |
| |
| if (mRenderer->getFeatures().swapbuffersOnFlushOrFinishWithSingleBuffer.enabled && |
| isSingleBuffer) |
| { |
| return mCurrentWindowSurface->onSharedPresentContextFlush(context); |
| } |
| |
| return flushImpl(nullptr, RenderPassClosureReason::GLFlush); |
| } |
| |
| angle::Result ContextVk::finish(const gl::Context *context) |
| { |
| if (mRenderer->getFeatures().swapbuffersOnFlushOrFinishWithSingleBuffer.enabled && |
| (mCurrentWindowSurface != nullptr) && mCurrentWindowSurface->isSharedPresentMode()) |
| { |
| ANGLE_TRY(mCurrentWindowSurface->onSharedPresentContextFlush(context)); |
| } |
| else |
| { |
| ANGLE_TRY(finishImpl(RenderPassClosureReason::GLFinish)); |
| } |
| |
| syncObjectPerfCounters(mRenderer->getCommandQueuePerfCounters()); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::setupDraw(const gl::Context *context, |
| gl::PrimitiveMode mode, |
| GLint firstVertexOrInvalid, |
| GLsizei vertexOrIndexCount, |
| GLsizei instanceCount, |
| gl::DrawElementsType indexTypeOrInvalid, |
| const void *indices, |
| DirtyBits dirtyBitMask) |
| { |
| // Set any dirty bits that depend on draw call parameters or other objects. |
| if (mode != mCurrentDrawMode) |
| { |
| invalidateCurrentGraphicsPipeline(); |
| mCurrentDrawMode = mode; |
| mGraphicsPipelineDesc->updateTopology(&mGraphicsPipelineTransition, mCurrentDrawMode); |
| } |
| |
| // Must be called before the command buffer is started. Can call finish. |
| VertexArrayVk *vertexArrayVk = getVertexArray(); |
| if (vertexArrayVk->getStreamingVertexAttribsMask().any()) |
| { |
| // All client attribs & any emulated buffered attribs will be updated |
| ANGLE_TRY(vertexArrayVk->updateStreamedAttribs(context, firstVertexOrInvalid, |
| vertexOrIndexCount, instanceCount, |
| indexTypeOrInvalid, indices)); |
| |
| mGraphicsDirtyBits.set(DIRTY_BIT_VERTEX_BUFFERS); |
| } |
| |
| ProgramExecutableVk *programExecutableVk = getExecutable(); |
| if (programExecutableVk->hasDirtyUniforms()) |
| { |
| mGraphicsDirtyBits.set(DIRTY_BIT_UNIFORMS); |
| } |
| |
| // Update transform feedback offsets on every draw call when emulating transform feedback. This |
| // relies on the fact that no geometry/tessellation, indirect or indexed calls are supported in |
| // ES3.1 (and emulation is not done for ES3.2). |
| if (getFeatures().emulateTransformFeedback.enabled && |
| mState.isTransformFeedbackActiveUnpaused()) |
| { |
| ASSERT(firstVertexOrInvalid != -1); |
| mXfbBaseVertex = firstVertexOrInvalid; |
| mXfbVertexCountPerInstance = vertexOrIndexCount; |
| invalidateGraphicsDriverUniforms(); |
| } |
| |
| DirtyBits dirtyBits = mGraphicsDirtyBits & dirtyBitMask; |
| |
| if (dirtyBits.none()) |
| { |
| ASSERT(mRenderPassCommandBuffer); |
| return angle::Result::Continue; |
| } |
| |
| // Flush any relevant dirty bits. |
| for (DirtyBits::Iterator dirtyBitIter = dirtyBits.begin(); dirtyBitIter != dirtyBits.end(); |
| ++dirtyBitIter) |
| { |
| ASSERT(mGraphicsDirtyBitHandlers[*dirtyBitIter]); |
| ANGLE_TRY((this->*mGraphicsDirtyBitHandlers[*dirtyBitIter])(&dirtyBitIter, dirtyBitMask)); |
| } |
| |
| mGraphicsDirtyBits &= ~dirtyBitMask; |
| |
| // Render pass must be always available at this point. |
| ASSERT(mRenderPassCommandBuffer); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::setupIndexedDraw(const gl::Context *context, |
| gl::PrimitiveMode mode, |
| GLsizei indexCount, |
| GLsizei instanceCount, |
| gl::DrawElementsType indexType, |
| const void *indices) |
| { |
| ASSERT(mode != gl::PrimitiveMode::LineLoop); |
| |
| if (indexType != mCurrentDrawElementsType) |
| { |
| mCurrentDrawElementsType = indexType; |
| ANGLE_TRY(onIndexBufferChange(nullptr)); |
| } |
| |
| VertexArrayVk *vertexArrayVk = getVertexArray(); |
| const gl::Buffer *elementArrayBuffer = vertexArrayVk->getState().getElementArrayBuffer(); |
| if (!elementArrayBuffer) |
| { |
| BufferBindingDirty bindingDirty; |
| ANGLE_TRY(vertexArrayVk->convertIndexBufferCPU(this, indexType, indexCount, indices, |
| &bindingDirty)); |
| |
| // We only set dirty bit when the bound buffer actually changed. |
| if (bindingDirty == BufferBindingDirty::Yes) |
| { |
| mGraphicsDirtyBits.set(DIRTY_BIT_INDEX_BUFFER); |
| mCurrentIndexBufferOffset = 0; |
| } |
| } |
| else |
| { |
| mCurrentIndexBufferOffset = reinterpret_cast<VkDeviceSize>(indices); |
| |
| if (indices != mLastIndexBufferOffset) |
| { |
| mGraphicsDirtyBits.set(DIRTY_BIT_INDEX_BUFFER); |
| mLastIndexBufferOffset = indices; |
| } |
| |
| // When you draw with LineLoop mode or GL_UNSIGNED_BYTE type, we may allocate its own |
| // element buffer and modify mCurrentElementArrayBuffer. When we switch out of that draw |
| // mode, we must reset mCurrentElementArrayBuffer back to the vertexArray's element buffer. |
| // Since in either case we set DIRTY_BIT_INDEX_BUFFER dirty bit, we use this bit to re-sync |
| // mCurrentElementArrayBuffer. |
| if (mGraphicsDirtyBits[DIRTY_BIT_INDEX_BUFFER]) |
| { |
| vertexArrayVk->updateCurrentElementArrayBuffer(); |
| } |
| |
| if (shouldConvertUint8VkIndexType(indexType) && mGraphicsDirtyBits[DIRTY_BIT_INDEX_BUFFER]) |
| { |
| ANGLE_VK_PERF_WARNING(this, GL_DEBUG_SEVERITY_LOW, |
| "Potential inefficiency emulating uint8 vertex attributes due to " |
| "lack of hardware support"); |
| |
| BufferVk *bufferVk = vk::GetImpl(elementArrayBuffer); |
| vk::BufferHelper &bufferHelper = bufferVk->getBuffer(); |
| |
| if (bufferHelper.isHostVisible() && |
| !bufferHelper.isCurrentlyInUse(getLastCompletedQueueSerial())) |
| { |
| uint8_t *src = nullptr; |
| ANGLE_TRY( |
| bufferVk->mapImpl(this, GL_MAP_READ_BIT, reinterpret_cast<void **>(&src))); |
| // Note: bufferOffset is not added here because mapImpl already adds it. |
| src += reinterpret_cast<uintptr_t>(indices); |
| const size_t byteCount = static_cast<size_t>(elementArrayBuffer->getSize()) - |
| reinterpret_cast<uintptr_t>(indices); |
| BufferBindingDirty bindingDirty; |
| ANGLE_TRY(vertexArrayVk->convertIndexBufferCPU(this, indexType, byteCount, src, |
| &bindingDirty)); |
| ANGLE_TRY(bufferVk->unmapImpl(this)); |
| } |
| else |
| { |
| ANGLE_TRY(vertexArrayVk->convertIndexBufferGPU(this, bufferVk, indices)); |
| } |
| |
| mCurrentIndexBufferOffset = 0; |
| } |
| } |
| |
| return setupDraw(context, mode, 0, indexCount, instanceCount, indexType, indices, |
| mIndexedDirtyBitsMask); |
| } |
| |
| angle::Result ContextVk::setupIndirectDraw(const gl::Context *context, |
| gl::PrimitiveMode mode, |
| DirtyBits dirtyBitMask, |
| vk::BufferHelper *indirectBuffer) |
| { |
| GLint firstVertex = -1; |
| GLsizei vertexCount = 0; |
| GLsizei instanceCount = 1; |
| |
| // Break the render pass if the indirect buffer was previously used as the output from transform |
| // feedback. |
| if (mCurrentTransformFeedbackBuffers.contains(indirectBuffer)) |
| { |
| ANGLE_TRY( |
| flushCommandsAndEndRenderPass(RenderPassClosureReason::XfbWriteThenIndirectDrawBuffer)); |
| } |
| |
| ANGLE_TRY(setupDraw(context, mode, firstVertex, vertexCount, instanceCount, |
| gl::DrawElementsType::InvalidEnum, nullptr, dirtyBitMask)); |
| |
| // Process indirect buffer after render pass has started. |
| mRenderPassCommands->bufferRead(this, VK_ACCESS_INDIRECT_COMMAND_READ_BIT, |
| vk::PipelineStage::DrawIndirect, indirectBuffer); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::setupIndexedIndirectDraw(const gl::Context *context, |
| gl::PrimitiveMode mode, |
| gl::DrawElementsType indexType, |
| vk::BufferHelper *indirectBuffer) |
| { |
| ASSERT(mode != gl::PrimitiveMode::LineLoop); |
| |
| if (indexType != mCurrentDrawElementsType) |
| { |
| mCurrentDrawElementsType = indexType; |
| ANGLE_TRY(onIndexBufferChange(nullptr)); |
| } |
| |
| return setupIndirectDraw(context, mode, mIndexedDirtyBitsMask, indirectBuffer); |
| } |
| |
| angle::Result ContextVk::setupLineLoopIndexedIndirectDraw(const gl::Context *context, |
| gl::PrimitiveMode mode, |
| gl::DrawElementsType indexType, |
| vk::BufferHelper *srcIndirectBuf, |
| VkDeviceSize indirectBufferOffset, |
| vk::BufferHelper **indirectBufferOut) |
| { |
| ASSERT(mode == gl::PrimitiveMode::LineLoop); |
| |
| vk::BufferHelper *dstIndirectBuf = nullptr; |
| |
| VertexArrayVk *vertexArrayVk = getVertexArray(); |
| ANGLE_TRY(vertexArrayVk->handleLineLoopIndexIndirect(this, indexType, srcIndirectBuf, |
| indirectBufferOffset, &dstIndirectBuf)); |
| |
| *indirectBufferOut = dstIndirectBuf; |
| |
| if (indexType != mCurrentDrawElementsType) |
| { |
| mCurrentDrawElementsType = indexType; |
| ANGLE_TRY(onIndexBufferChange(nullptr)); |
| } |
| |
| return setupIndirectDraw(context, mode, mIndexedDirtyBitsMask, dstIndirectBuf); |
| } |
| |
| angle::Result ContextVk::setupLineLoopIndirectDraw(const gl::Context *context, |
| gl::PrimitiveMode mode, |
| vk::BufferHelper *indirectBuffer, |
| VkDeviceSize indirectBufferOffset, |
| vk::BufferHelper **indirectBufferOut) |
| { |
| ASSERT(mode == gl::PrimitiveMode::LineLoop); |
| |
| vk::BufferHelper *indirectBufferHelperOut = nullptr; |
| |
| VertexArrayVk *vertexArrayVk = getVertexArray(); |
| ANGLE_TRY(vertexArrayVk->handleLineLoopIndirectDraw( |
| context, indirectBuffer, indirectBufferOffset, &indirectBufferHelperOut)); |
| |
| *indirectBufferOut = indirectBufferHelperOut; |
| |
| if (gl::DrawElementsType::UnsignedInt != mCurrentDrawElementsType) |
| { |
| mCurrentDrawElementsType = gl::DrawElementsType::UnsignedInt; |
| ANGLE_TRY(onIndexBufferChange(nullptr)); |
| } |
| |
| return setupIndirectDraw(context, mode, mIndexedDirtyBitsMask, indirectBufferHelperOut); |
| } |
| |
| angle::Result ContextVk::setupLineLoopDraw(const gl::Context *context, |
| gl::PrimitiveMode mode, |
| GLint firstVertex, |
| GLsizei vertexOrIndexCount, |
| gl::DrawElementsType indexTypeOrInvalid, |
| const void *indices, |
| uint32_t *numIndicesOut) |
| { |
| mCurrentIndexBufferOffset = 0; |
| VertexArrayVk *vertexArrayVk = getVertexArray(); |
| ANGLE_TRY(vertexArrayVk->handleLineLoop(this, firstVertex, vertexOrIndexCount, |
| indexTypeOrInvalid, indices, numIndicesOut)); |
| ANGLE_TRY(onIndexBufferChange(nullptr)); |
| mCurrentDrawElementsType = indexTypeOrInvalid != gl::DrawElementsType::InvalidEnum |
| ? indexTypeOrInvalid |
| : gl::DrawElementsType::UnsignedInt; |
| return setupDraw(context, mode, firstVertex, vertexOrIndexCount, 1, indexTypeOrInvalid, indices, |
| mIndexedDirtyBitsMask); |
| } |
| |
| angle::Result ContextVk::setupDispatch(const gl::Context *context) |
| { |
| // Note: numerous tests miss a glMemoryBarrier call between the initial texture data upload and |
| // the dispatch call. Flush the outside render pass command buffer as a workaround. |
| // TODO: Remove this and fix tests. http://anglebug.com/5070 |
| ANGLE_TRY(flushOutsideRenderPassCommands()); |
| |
| ProgramExecutableVk *programExecutableVk = getExecutable(); |
| if (programExecutableVk->hasDirtyUniforms()) |
| { |
| mComputeDirtyBits.set(DIRTY_BIT_UNIFORMS); |
| mComputeDirtyBits.set(DIRTY_BIT_DESCRIPTOR_SETS); |
| } |
| |
| DirtyBits dirtyBits = mComputeDirtyBits; |
| |
| // Flush any relevant dirty bits. |
| for (size_t dirtyBit : dirtyBits) |
| { |
| ASSERT(mComputeDirtyBitHandlers[dirtyBit]); |
| ANGLE_TRY((this->*mComputeDirtyBitHandlers[dirtyBit])()); |
| } |
| |
| mComputeDirtyBits.reset(); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsMemoryBarrier(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| return handleDirtyMemoryBarrierImpl(dirtyBitsIterator, dirtyBitMask); |
| } |
| |
| angle::Result ContextVk::handleDirtyComputeMemoryBarrier() |
| { |
| return handleDirtyMemoryBarrierImpl(nullptr, {}); |
| } |
| |
| bool ContextVk::renderPassUsesStorageResources() const |
| { |
| const gl::ProgramExecutable *executable = mState.getProgramExecutable(); |
| ASSERT(executable); |
| |
| // Storage images: |
| for (size_t imageUnitIndex : executable->getActiveImagesMask()) |
| { |
| const gl::Texture *texture = mState.getImageUnit(imageUnitIndex).texture.get(); |
| if (texture == nullptr) |
| { |
| continue; |
| } |
| |
| TextureVk *textureVk = vk::GetImpl(texture); |
| |
| if (texture->getType() == gl::TextureType::Buffer) |
| { |
| vk::BufferHelper &buffer = vk::GetImpl(textureVk->getBuffer().get())->getBuffer(); |
| if (mRenderPassCommands->usesBuffer(buffer)) |
| { |
| return true; |
| } |
| } |
| else |
| { |
| vk::ImageHelper &image = textureVk->getImage(); |
| // Images only need to close the render pass if they need a layout transition. Outside |
| // render pass command buffer doesn't need closing as the layout transition barriers are |
| // recorded in sequence with the rest of the commands. |
| if (IsRenderPassStartedAndUsesImage(*mRenderPassCommands, image)) |
| { |
| return true; |
| } |
| } |
| } |
| |
| // Storage buffers: |
| const std::vector<gl::InterfaceBlock> &blocks = executable->getShaderStorageBlocks(); |
| for (uint32_t bufferIndex = 0; bufferIndex < blocks.size(); ++bufferIndex) |
| { |
| uint32_t binding = blocks[bufferIndex].binding; |
| const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding = |
| mState.getIndexedShaderStorageBuffer(binding); |
| |
| if (bufferBinding.get() == nullptr) |
| { |
| continue; |
| } |
| |
| vk::BufferHelper &buffer = vk::GetImpl(bufferBinding.get())->getBuffer(); |
| if (mRenderPassCommands->usesBuffer(buffer)) |
| { |
| return true; |
| } |
| } |
| |
| // Atomic counters: |
| const std::vector<gl::AtomicCounterBuffer> &atomicCounterBuffers = |
| executable->getAtomicCounterBuffers(); |
| for (uint32_t bufferIndex = 0; bufferIndex < atomicCounterBuffers.size(); ++bufferIndex) |
| { |
| uint32_t binding = atomicCounterBuffers[bufferIndex].binding; |
| const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding = |
| mState.getIndexedAtomicCounterBuffer(binding); |
| |
| if (bufferBinding.get() == nullptr) |
| { |
| continue; |
| } |
| |
| vk::BufferHelper &buffer = vk::GetImpl(bufferBinding.get())->getBuffer(); |
| if (mRenderPassCommands->usesBuffer(buffer)) |
| { |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| angle::Result ContextVk::handleDirtyMemoryBarrierImpl(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const gl::ProgramExecutable *executable = mState.getProgramExecutable(); |
| ASSERT(executable); |
| |
| const bool hasImages = executable->hasImages(); |
| const bool hasStorageBuffers = executable->hasStorageBuffers(); |
| const bool hasAtomicCounters = executable->hasAtomicCounterBuffers(); |
| |
| if (!hasImages && !hasStorageBuffers && !hasAtomicCounters) |
| { |
| return angle::Result::Continue; |
| } |
| |
| // Break the render pass if necessary. This is only needed for write-after-read situations, and |
| // is done by checking whether current storage buffers and images are used in the render pass. |
| if (renderPassUsesStorageResources()) |
| { |
| // Either set later bits (if called during handling of graphics dirty bits), or set the |
| // dirty bits directly (if called during handling of compute dirty bits). |
| if (dirtyBitsIterator) |
| { |
| return flushDirtyGraphicsRenderPass( |
| dirtyBitsIterator, dirtyBitMask, |
| RenderPassClosureReason::GLMemoryBarrierThenStorageResource); |
| } |
| else |
| { |
| return flushCommandsAndEndRenderPass( |
| RenderPassClosureReason::GLMemoryBarrierThenStorageResource); |
| } |
| } |
| |
| // Flushing outside render pass commands is cheap. If a memory barrier has been issued in its |
| // life time, just flush it instead of wasting time trying to figure out if it's necessary. |
| if (mOutsideRenderPassCommands->hasGLMemoryBarrierIssued()) |
| { |
| ANGLE_TRY(flushOutsideRenderPassCommands()); |
| } |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsEventLog(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| return handleDirtyEventLogImpl(mRenderPassCommandBuffer); |
| } |
| |
| angle::Result ContextVk::handleDirtyComputeEventLog() |
| { |
| return handleDirtyEventLogImpl(&mOutsideRenderPassCommands->getCommandBuffer()); |
| } |
| |
| template <typename CommandBufferT> |
| angle::Result ContextVk::handleDirtyEventLogImpl(CommandBufferT *commandBuffer) |
| { |
| // This method is called when a draw or dispatch command is being processed. It's purpose is |
| // to call the vkCmd*DebugUtilsLabelEXT functions in order to communicate to debuggers |
| // (e.g. AGI) the OpenGL ES commands that the application uses. |
| |
| // Exit early if no OpenGL ES commands have been logged, or if no command buffer (for a no-op |
| // draw), or if calling the vkCmd*DebugUtilsLabelEXT functions is not enabled. |
| if (mEventLog.empty() || commandBuffer == nullptr || !mRenderer->angleDebuggerMode()) |
| { |
| return angle::Result::Continue; |
| } |
| |
| // Insert OpenGL ES commands into debug label. We create a 3-level cascade here for |
| // OpenGL-ES-first debugging in AGI. Here's the general outline of commands: |
| // -glDrawCommand |
| // --vkCmdBeginDebugUtilsLabelEXT() #1 for "glDrawCommand" |
| // --OpenGL ES Commands |
| // ---vkCmdBeginDebugUtilsLabelEXT() #2 for "OpenGL ES Commands" |
| // ---Individual OpenGL ES Commands leading up to glDrawCommand |
| // ----vkCmdBeginDebugUtilsLabelEXT() #3 for each individual OpenGL ES Command |
| // ----vkCmdEndDebugUtilsLabelEXT() #3 for each individual OpenGL ES Command |
| // ----...More Individual OGL Commands... |
| // ----Final Individual OGL command will be the same glDrawCommand shown in #1 above |
| // ---vkCmdEndDebugUtilsLabelEXT() #2 for "OpenGL ES Commands" |
| // --VK SetupDraw & Draw-related commands will be embedded here under glDraw #1 |
| // --vkCmdEndDebugUtilsLabelEXT() #1 is called after each vkDraw* or vkDispatch* call |
| |
| // AGI desires no parameters on the top-level of the hierarchy. |
| std::string topLevelCommand = mEventLog.back(); |
| size_t startOfParameters = topLevelCommand.find("("); |
| if (startOfParameters != std::string::npos) |
| { |
| topLevelCommand = topLevelCommand.substr(0, startOfParameters); |
| } |
| VkDebugUtilsLabelEXT label = {VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT, |
| nullptr, |
| topLevelCommand.c_str(), |
| {0.0f, 0.0f, 0.0f, 0.0f}}; |
| // This is #1 from comment above |
| commandBuffer->beginDebugUtilsLabelEXT(label); |
| std::string oglCmds = "OpenGL ES Commands"; |
| label.pLabelName = oglCmds.c_str(); |
| // This is #2 from comment above |
| commandBuffer->beginDebugUtilsLabelEXT(label); |
| for (uint32_t i = 0; i < mEventLog.size(); ++i) |
| { |
| label.pLabelName = mEventLog[i].c_str(); |
| // NOTE: We have to use a begin/end pair here because AGI does not promote the |
| // pLabelName from an insertDebugUtilsLabelEXT() call to the Commands panel. |
| // Internal bug b/169243237 is tracking this and once the insert* call shows the |
| // pLabelName similar to begin* call, we can switch these to insert* calls instead. |
| // This is #3 from comment above. |
| commandBuffer->beginDebugUtilsLabelEXT(label); |
| commandBuffer->endDebugUtilsLabelEXT(); |
| } |
| commandBuffer->endDebugUtilsLabelEXT(); |
| // The final end* call for #1 above is made in the ContextVk::draw* or |
| // ContextVk::dispatch* function calls. |
| |
| mEventLog.clear(); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDefaultAttribs(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| ASSERT(mDirtyDefaultAttribsMask.any()); |
| |
| VertexArrayVk *vertexArrayVk = getVertexArray(); |
| for (size_t attribIndex : mDirtyDefaultAttribsMask) |
| { |
| ANGLE_TRY(vertexArrayVk->updateDefaultAttrib(this, attribIndex)); |
| } |
| |
| mDirtyDefaultAttribsMask.reset(); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsPipelineDesc(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const VkPipeline previousPipeline = mCurrentGraphicsPipeline |
| ? mCurrentGraphicsPipeline->getPipeline().getHandle() |
| : VK_NULL_HANDLE; |
| |
| ASSERT(mState.getProgramExecutable() != nullptr); |
| const gl::ProgramExecutable &glExecutable = *mState.getProgramExecutable(); |
| ProgramExecutableVk *executableVk = getExecutable(); |
| ASSERT(executableVk); |
| |
| if (!mCurrentGraphicsPipeline) |
| { |
| const vk::GraphicsPipelineDesc *descPtr; |
| |
| // The desc's specialization constant depends on program's |
| // specConstUsageBits. We need to update it if program has changed. |
| SpecConstUsageBits usageBits = getCurrentProgramSpecConstUsageBits(); |
| updateGraphicsPipelineDescWithSpecConstUsageBits(usageBits); |
| |
| // Draw call shader patching, shader compilation, and pipeline cache query. |
| ANGLE_TRY(executableVk->getGraphicsPipeline(this, mCurrentDrawMode, *mGraphicsPipelineDesc, |
| glExecutable, &descPtr, |
| &mCurrentGraphicsPipeline)); |
| mGraphicsPipelineTransition.reset(); |
| } |
| else if (mGraphicsPipelineTransition.any()) |
| { |
| ASSERT(mCurrentGraphicsPipeline->valid()); |
| if (!mCurrentGraphicsPipeline->findTransition( |
| mGraphicsPipelineTransition, *mGraphicsPipelineDesc, &mCurrentGraphicsPipeline)) |
| { |
| vk::PipelineHelper *oldPipeline = mCurrentGraphicsPipeline; |
| const vk::GraphicsPipelineDesc *descPtr; |
| |
| ANGLE_TRY(executableVk->getGraphicsPipeline(this, mCurrentDrawMode, |
| *mGraphicsPipelineDesc, glExecutable, |
| &descPtr, &mCurrentGraphicsPipeline)); |
| |
| oldPipeline->addTransition(mGraphicsPipelineTransition, descPtr, |
| mCurrentGraphicsPipeline); |
| } |
| |
| mGraphicsPipelineTransition.reset(); |
| } |
| // Update the queue serial for the pipeline object. |
| ASSERT(mCurrentGraphicsPipeline && mCurrentGraphicsPipeline->valid()); |
| |
| mCurrentGraphicsPipeline->retain(&mRenderPassCommands->getResourceUseList()); |
| |
| const VkPipeline newPipeline = mCurrentGraphicsPipeline->getPipeline().getHandle(); |
| |
| // If there's no change in pipeline, avoid rebinding it later. If the rebind is due to a new |
| // command buffer or UtilsVk, it will happen anyway with DIRTY_BIT_PIPELINE_BINDING. |
| if (newPipeline == previousPipeline) |
| { |
| return angle::Result::Continue; |
| } |
| |
| // VK_EXT_transform_feedback disallows binding pipelines while transform feedback is active. |
| // If a new pipeline needs to be bound, the render pass should necessarily be broken (which |
| // implicitly pauses transform feedback), as resuming requires a barrier on the transform |
| // feedback counter buffer. |
| if (mRenderPassCommands->started() && mRenderPassCommands->isTransformFeedbackActiveUnpaused()) |
| { |
| ANGLE_TRY(flushDirtyGraphicsRenderPass( |
| dirtyBitsIterator, dirtyBitMask, RenderPassClosureReason::PipelineBindWhileXfbActive)); |
| |
| dirtyBitsIterator->setLaterBit(DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME); |
| } |
| |
| // The pipeline needs to rebind because it's changed. |
| dirtyBitsIterator->setLaterBit(DIRTY_BIT_PIPELINE_BINDING); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::updateRenderPassDepthFeedbackLoopMode( |
| UpdateDepthFeedbackLoopReason depthReason, |
| UpdateDepthFeedbackLoopReason stencilReason) |
| { |
| return updateRenderPassDepthFeedbackLoopModeImpl(nullptr, {}, depthReason, stencilReason); |
| } |
| |
| angle::Result ContextVk::updateRenderPassDepthFeedbackLoopModeImpl( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask, |
| UpdateDepthFeedbackLoopReason depthReason, |
| UpdateDepthFeedbackLoopReason stencilReason) |
| { |
| FramebufferVk *drawFramebufferVk = getDrawFramebuffer(); |
| if (!hasStartedRenderPass() || drawFramebufferVk->getDepthStencilRenderTarget() == nullptr) |
| { |
| return angle::Result::Continue; |
| } |
| |
| const gl::DepthStencilState &dsState = mState.getDepthStencilState(); |
| vk::ResourceAccess depthAccess = GetDepthAccess(dsState, depthReason); |
| vk::ResourceAccess stencilAccess = GetStencilAccess(dsState, stencilReason); |
| |
| if ((depthAccess == vk::ResourceAccess::Write || stencilAccess == vk::ResourceAccess::Write) && |
| drawFramebufferVk->isReadOnlyDepthFeedbackLoopMode()) |
| { |
| // If we are switching out of read only mode and we are in feedback loop, we must end |
| // renderpass here. Otherwise, updating it to writeable layout will produce a writable |
| // feedback loop that is illegal in vulkan and will trigger validation errors that depth |
| // texture is using the writable layout. |
| if (dirtyBitsIterator) |
| { |
| ANGLE_TRY(flushDirtyGraphicsRenderPass( |
| dirtyBitsIterator, dirtyBitMask, |
| RenderPassClosureReason::DepthStencilWriteAfterFeedbackLoop)); |
| } |
| else |
| { |
| ANGLE_TRY(flushCommandsAndEndRenderPass( |
| RenderPassClosureReason::DepthStencilWriteAfterFeedbackLoop)); |
| } |
| // Clear read-only depth feedback mode. |
| drawFramebufferVk->setReadOnlyDepthFeedbackLoopMode(false); |
| } |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsReadOnlyDepthFeedbackLoopMode( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| return updateRenderPassDepthFeedbackLoopModeImpl(dirtyBitsIterator, dirtyBitMask, |
| UpdateDepthFeedbackLoopReason::Draw, |
| UpdateDepthFeedbackLoopReason::Draw); |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsRenderPass(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| // If the render pass needs to be recreated, close it using the special mid-dirty-bit-handling |
| // function, so later dirty bits can be set. |
| if (mRenderPassCommands->started()) |
| { |
| ANGLE_TRY(flushDirtyGraphicsRenderPass(dirtyBitsIterator, |
| dirtyBitMask & ~DirtyBits{DIRTY_BIT_RENDER_PASS}, |
| RenderPassClosureReason::AlreadySpecifiedElsewhere)); |
| } |
| |
| FramebufferVk *drawFramebufferVk = getDrawFramebuffer(); |
| gl::Rectangle scissoredRenderArea = drawFramebufferVk->getRotatedScissoredRenderArea(this); |
| bool renderPassDescChanged = false; |
| |
| ANGLE_TRY(startRenderPass(scissoredRenderArea, nullptr, &renderPassDescChanged)); |
| |
| // The render pass desc can change when starting the render pass, for example due to |
| // multisampled-render-to-texture needs based on loadOps. In that case, recreate the graphics |
| // pipeline. |
| if (renderPassDescChanged) |
| { |
| ANGLE_TRY(handleDirtyGraphicsPipelineDesc(dirtyBitsIterator, dirtyBitMask)); |
| } |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsColorAccess(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| FramebufferVk *drawFramebufferVk = getDrawFramebuffer(); |
| const gl::FramebufferState &framebufferState = drawFramebufferVk->getState(); |
| |
| // Update color attachment accesses |
| vk::PackedAttachmentIndex colorIndexVk(0); |
| for (size_t colorIndexGL : framebufferState.getColorAttachmentsMask()) |
| { |
| if (framebufferState.getEnabledDrawBuffers().test(colorIndexGL)) |
| { |
| vk::ResourceAccess colorAccess = GetColorAccess( |
| mState, framebufferState, drawFramebufferVk->getEmulatedAlphaAttachmentMask(), |
| drawFramebufferVk->hasFramebufferFetch(), colorIndexGL); |
| mRenderPassCommands->onColorAccess(colorIndexVk, colorAccess); |
| } |
| ++colorIndexVk; |
| } |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDepthStencilAccess( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| FramebufferVk *drawFramebufferVk = getDrawFramebuffer(); |
| if (drawFramebufferVk->getDepthStencilRenderTarget() == nullptr) |
| { |
| return angle::Result::Continue; |
| } |
| |
| // Update depth/stencil attachment accesses |
| const gl::DepthStencilState &dsState = mState.getDepthStencilState(); |
| vk::ResourceAccess depthAccess = GetDepthAccess(dsState, UpdateDepthFeedbackLoopReason::Draw); |
| vk::ResourceAccess stencilAccess = |
| GetStencilAccess(dsState, UpdateDepthFeedbackLoopReason::Draw); |
| mRenderPassCommands->onDepthAccess(depthAccess); |
| mRenderPassCommands->onStencilAccess(stencilAccess); |
| |
| drawFramebufferVk->updateRenderPassReadOnlyDepthMode(this, mRenderPassCommands); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsPipelineBinding(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| ASSERT(mCurrentGraphicsPipeline); |
| |
| mRenderPassCommandBuffer->bindGraphicsPipeline(mCurrentGraphicsPipeline->getPipeline()); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyComputePipelineDesc() |
| { |
| if (!mCurrentComputePipeline) |
| { |
| ProgramExecutableVk *executableVk = getExecutable(); |
| ASSERT(executableVk); |
| ANGLE_TRY(executableVk->getComputePipeline(this, &mCurrentComputePipeline)); |
| } |
| |
| ASSERT(mComputeDirtyBits.test(DIRTY_BIT_PIPELINE_BINDING)); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyComputePipelineBinding() |
| { |
| ASSERT(mCurrentComputePipeline); |
| |
| mOutsideRenderPassCommands->getCommandBuffer().bindComputePipeline( |
| mCurrentComputePipeline->getPipeline()); |
| mCurrentComputePipeline->retain(&mOutsideRenderPassCommands->getResourceUseList()); |
| |
| return angle::Result::Continue; |
| } |
| |
| template <typename CommandBufferHelperT> |
| ANGLE_INLINE angle::Result ContextVk::handleDirtyTexturesImpl( |
| CommandBufferHelperT *commandBufferHelper, |
| PipelineType pipelineType) |
| { |
| const gl::ProgramExecutable *executable = mState.getProgramExecutable(); |
| ASSERT(executable); |
| const gl::ActiveTextureMask &activeTextures = executable->getActiveSamplersMask(); |
| |
| for (size_t textureUnit : activeTextures) |
| { |
| TextureVk *textureVk = mActiveTextures[textureUnit]; |
| |
| // If it's a texture buffer, get the attached buffer. |
| if (textureVk->getBuffer().get() != nullptr) |
| { |
| BufferVk *bufferVk = vk::GetImpl(textureVk->getBuffer().get()); |
| const gl::ShaderBitSet stages = |
| executable->getSamplerShaderBitsForTextureUnitIndex(textureUnit); |
| |
| OnTextureBufferRead(this, bufferVk, stages, commandBufferHelper); |
| |
| textureVk->retainBufferViews(&commandBufferHelper->getResourceUseList()); |
| continue; |
| } |
| |
| // The image should be flushed and ready to use at this point. There may still be |
| // lingering staged updates in its staging buffer for unused texture mip levels or |
| // layers. Therefore we can't verify it has no staged updates right here. |
| vk::ImageHelper &image = textureVk->getImage(); |
| |
| const vk::ImageLayout imageLayout = |
| GetImageReadLayout(textureVk, *executable, textureUnit, pipelineType); |
| |
| // Ensure the image is in the desired layout |
| commandBufferHelper->imageRead(this, image.getAspectFlags(), imageLayout, &image); |
| } |
| |
| if (executable->hasTextures()) |
| { |
| UpdatePreCacheActiveTextures(executable->getActiveSamplersMask(), mActiveTextures, |
| mState.getSamplers(), &mActiveTexturesDesc); |
| |
| ProgramExecutableVk *executableVk = getExecutable(); |
| vk::ResourceUseList &resourceUseList = commandBufferHelper->getResourceUseList(); |
| |
| ANGLE_TRY(executableVk->updateTexturesDescriptorSet( |
| this, *executable, mActiveTextures, mState.getSamplers(), |
| mEmulateSeamfulCubeMapSampling, pipelineType, &mUpdateDescriptorSetsBuilder, |
| &resourceUseList, mActiveTexturesDesc)); |
| } |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsTextures(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| return handleDirtyTexturesImpl(mRenderPassCommands, PipelineType::Graphics); |
| } |
| |
| angle::Result ContextVk::handleDirtyComputeTextures() |
| { |
| return handleDirtyTexturesImpl(mOutsideRenderPassCommands, PipelineType::Compute); |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsVertexBuffers(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const gl::ProgramExecutable *executable = mState.getProgramExecutable(); |
| VertexArrayVk *vertexArrayVk = getVertexArray(); |
| uint32_t maxAttrib = mState.getProgramExecutable()->getMaxActiveAttribLocation(); |
| const gl::AttribArray<VkBuffer> &bufferHandles = vertexArrayVk->getCurrentArrayBufferHandles(); |
| const gl::AttribArray<VkDeviceSize> &bufferOffsets = |
| vertexArrayVk->getCurrentArrayBufferOffsets(); |
| |
| if (getFeatures().supportsExtendedDynamicState.enabled) |
| { |
| const gl::AttribArray<GLuint> &bufferStrides = |
| vertexArrayVk->getCurrentArrayBufferStrides(); |
| const gl::AttribArray<angle::FormatID> &bufferFormats = |
| vertexArrayVk->getCurrentArrayBufferFormats(); |
| gl::AttribArray<VkDeviceSize> strides = {}; |
| |
| // Set stride to 0 for mismatching formats between the program's declared attribute and that |
| // which is specified in glVertexAttribPointer. See comment in vk_cache_utils.cpp |
| // (initializePipeline) for more details. |
| const gl::AttributesMask &activeAttribLocations = |
| executable->getNonBuiltinAttribLocationsMask(); |
| const gl::ComponentTypeMask &programAttribsTypeMask = executable->getAttributesTypeMask(); |
| |
| for (size_t attribIndex : activeAttribLocations) |
| { |
| const angle::Format &intendedFormat = |
| mRenderer->getFormat(bufferFormats[attribIndex]).getIntendedFormat(); |
| |
| const gl::ComponentType attribType = GetVertexAttributeComponentType( |
| intendedFormat.isPureInt(), intendedFormat.vertexAttribType); |
| const gl::ComponentType programAttribType = |
| gl::GetComponentTypeMask(programAttribsTypeMask, attribIndex); |
| |
| const bool mismatchingType = |
| attribType != programAttribType && (programAttribType == gl::ComponentType::Float || |
| attribType == gl::ComponentType::Float); |
| strides[attribIndex] = mismatchingType ? 0 : bufferStrides[attribIndex]; |
| } |
| |
| // TODO: Use the sizes parameters here to fix the robustness issue worked around in |
| // crbug.com/1310038 |
| mRenderPassCommandBuffer->bindVertexBuffers2(0, maxAttrib, bufferHandles.data(), |
| bufferOffsets.data(), nullptr, strides.data()); |
| } |
| else |
| { |
| mRenderPassCommandBuffer->bindVertexBuffers(0, maxAttrib, bufferHandles.data(), |
| bufferOffsets.data()); |
| } |
| |
| const gl::AttribArray<vk::BufferHelper *> &arrayBufferResources = |
| vertexArrayVk->getCurrentArrayBuffers(); |
| |
| // Mark all active vertex buffers as accessed. |
| const gl::AttributesMask attribsMask = executable->getActiveAttribLocationsMask(); |
| for (size_t attribIndex : attribsMask) |
| { |
| vk::BufferHelper *arrayBuffer = arrayBufferResources[attribIndex]; |
| if (arrayBuffer) |
| { |
| mRenderPassCommands->bufferRead(this, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, |
| vk::PipelineStage::VertexInput, arrayBuffer); |
| } |
| } |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsIndexBuffer(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| VertexArrayVk *vertexArrayVk = getVertexArray(); |
| vk::BufferHelper *elementArrayBuffer = vertexArrayVk->getCurrentElementArrayBuffer(); |
| ASSERT(elementArrayBuffer != nullptr); |
| |
| VkDeviceSize bufferOffset; |
| const vk::Buffer &buffer = elementArrayBuffer->getBufferForVertexArray( |
| this, elementArrayBuffer->getSize(), &bufferOffset); |
| |
| mRenderPassCommandBuffer->bindIndexBuffer(buffer, bufferOffset + mCurrentIndexBufferOffset, |
| getVkIndexType(mCurrentDrawElementsType)); |
| |
| mRenderPassCommands->bufferRead(this, VK_ACCESS_INDEX_READ_BIT, vk::PipelineStage::VertexInput, |
| elementArrayBuffer); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsFramebufferFetchBarrier( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| VkMemoryBarrier memoryBarrier = {}; |
| memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; |
| memoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; |
| memoryBarrier.dstAccessMask = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT; |
| |
| mRenderPassCommandBuffer->pipelineBarrier( |
| VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, |
| GetLocalDependencyFlags(this), 1, &memoryBarrier, 0, nullptr, 0, nullptr); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsBlendBarrier(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| VkMemoryBarrier memoryBarrier = {}; |
| memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; |
| memoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; |
| memoryBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT; |
| |
| mRenderPassCommandBuffer->pipelineBarrier(VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, |
| VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, |
| GetLocalDependencyFlags(this), 1, &memoryBarrier, 0, |
| nullptr, 0, nullptr); |
| |
| return angle::Result::Continue; |
| } |
| |
| template <typename CommandBufferHelperT> |
| angle::Result ContextVk::handleDirtyShaderResourcesImpl(CommandBufferHelperT *commandBufferHelper, |
| PipelineType pipelineType) |
| { |
| const gl::ProgramExecutable *executable = mState.getProgramExecutable(); |
| ASSERT(executable); |
| |
| const bool hasImages = executable->hasImages(); |
| const bool hasStorageBuffers = |
| executable->hasStorageBuffers() || executable->hasAtomicCounterBuffers(); |
| const bool hasUniformBuffers = executable->hasUniformBuffers(); |
| |
| if (!hasUniformBuffers && !hasStorageBuffers && !hasImages && |
| !executable->usesFramebufferFetch()) |
| { |
| return angle::Result::Continue; |
| } |
| |
| if (hasImages) |
| { |
| ANGLE_TRY(updateActiveImages(commandBufferHelper)); |
| } |
| |
| handleDirtyShaderBufferResourcesImpl(commandBufferHelper); |
| |
| ANGLE_TRY(updateShaderResourcesDescriptorDesc(pipelineType)); |
| |
| ProgramExecutableVk *executableVk = getExecutable(); |
| vk::ResourceUseList &resourceUseList = commandBufferHelper->getResourceUseList(); |
| |
| ANGLE_TRY(executableVk->updateShaderResourcesDescriptorSet( |
| this, &mUpdateDescriptorSetsBuilder, &resourceUseList, mShaderBuffersDescriptorDesc)); |
| |
| // Record usage of storage buffers and images in the command buffer to aid handling of |
| // glMemoryBarrier. |
| if (hasImages || hasStorageBuffers) |
| { |
| commandBufferHelper->setHasShaderStorageOutput(); |
| } |
| |
| return angle::Result::Continue; |
| } |
| |
| void ContextVk::handleDirtyShaderBufferResourcesImpl( |
| vk::CommandBufferHelperCommon *commandBufferHelper) |
| { |
| const gl::ProgramExecutable *executable = mState.getProgramExecutable(); |
| ASSERT(executable); |
| |
| // Process buffer barriers. |
| for (const gl::ShaderType shaderType : executable->getLinkedShaderStages()) |
| { |
| const std::vector<gl::InterfaceBlock> &ubos = executable->getUniformBlocks(); |
| |
| for (const gl::InterfaceBlock &ubo : ubos) |
| { |
| const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding = |
| mState.getIndexedUniformBuffer(ubo.binding); |
| |
| if (!ubo.isActive(shaderType)) |
| { |
| continue; |
| } |
| |
| if (bufferBinding.get() == nullptr) |
| { |
| continue; |
| } |
| |
| BufferVk *bufferVk = vk::GetImpl(bufferBinding.get()); |
| vk::BufferHelper &bufferHelper = bufferVk->getBuffer(); |
| |
| commandBufferHelper->bufferRead(this, VK_ACCESS_UNIFORM_READ_BIT, |
| vk::GetPipelineStage(shaderType), &bufferHelper); |
| } |
| |
| const std::vector<gl::InterfaceBlock> &ssbos = executable->getShaderStorageBlocks(); |
| for (const gl::InterfaceBlock &ssbo : ssbos) |
| { |
| const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding = |
| mState.getIndexedShaderStorageBuffer(ssbo.binding); |
| |
| if (!ssbo.isActive(shaderType)) |
| { |
| continue; |
| } |
| |
| if (bufferBinding.get() == nullptr) |
| { |
| continue; |
| } |
| |
| BufferVk *bufferVk = vk::GetImpl(bufferBinding.get()); |
| vk::BufferHelper &bufferHelper = bufferVk->getBuffer(); |
| |
| // We set the SHADER_READ_BIT to be conservative. |
| VkAccessFlags accessFlags = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT; |
| commandBufferHelper->bufferWrite(this, accessFlags, vk::GetPipelineStage(shaderType), |
| vk::AliasingMode::Allowed, &bufferHelper); |
| } |
| |
| const std::vector<gl::AtomicCounterBuffer> &acbs = executable->getAtomicCounterBuffers(); |
| for (const gl::AtomicCounterBuffer &atomicCounterBuffer : acbs) |
| { |
| uint32_t binding = atomicCounterBuffer.binding; |
| const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding = |
| mState.getIndexedAtomicCounterBuffer(binding); |
| |
| if (bufferBinding.get() == nullptr) |
| { |
| continue; |
| } |
| |
| BufferVk *bufferVk = vk::GetImpl(bufferBinding.get()); |
| vk::BufferHelper &bufferHelper = bufferVk->getBuffer(); |
| |
| // We set SHADER_READ_BIT to be conservative. |
| commandBufferHelper->bufferWrite( |
| this, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, |
| vk::GetPipelineStage(shaderType), vk::AliasingMode::Allowed, &bufferHelper); |
| } |
| } |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsShaderResources(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| return handleDirtyShaderResourcesImpl(mRenderPassCommands, PipelineType::Graphics); |
| } |
| |
| angle::Result ContextVk::handleDirtyComputeShaderResources() |
| { |
| return handleDirtyShaderResourcesImpl(mOutsideRenderPassCommands, PipelineType::Compute); |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsTransformFeedbackBuffersEmulation( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const gl::ProgramExecutable *executable = mState.getProgramExecutable(); |
| ASSERT(executable); |
| |
| if (!executable->hasTransformFeedbackOutput()) |
| { |
| return angle::Result::Continue; |
| } |
| |
| TransformFeedbackVk *transformFeedbackVk = vk::GetImpl(mState.getCurrentTransformFeedback()); |
| |
| if (mState.isTransformFeedbackActiveUnpaused()) |
| { |
| size_t bufferCount = executable->getTransformFeedbackBufferCount(); |
| const gl::TransformFeedbackBuffersArray<vk::BufferHelper *> &bufferHelpers = |
| transformFeedbackVk->getBufferHelpers(); |
| |
| for (size_t bufferIndex = 0; bufferIndex < bufferCount; ++bufferIndex) |
| { |
| vk::BufferHelper *bufferHelper = bufferHelpers[bufferIndex]; |
| ASSERT(bufferHelper); |
| mRenderPassCommands->bufferWrite(this, VK_ACCESS_SHADER_WRITE_BIT, |
| vk::PipelineStage::VertexShader, |
| vk::AliasingMode::Disallowed, bufferHelper); |
| } |
| } |
| |
| ProgramExecutableVk *executableVk = getExecutable(); |
| vk::BufferHelper *currentUniformBuffer = mDefaultUniformStorage.getCurrentBuffer(); |
| vk::ResourceUseList &resourceUseList = mRenderPassCommands->getResourceUseList(); |
| |
| vk::DescriptorSetDescBuilder uniformsAndXfbDesc; |
| uniformsAndXfbDesc.updateUniformsAndXfb( |
| this, *executable, *executableVk, currentUniformBuffer, mEmptyBuffer, |
| mState.isTransformFeedbackActiveUnpaused(), transformFeedbackVk); |
| |
| return executableVk->updateUniformsAndXfbDescriptorSet(this, &mUpdateDescriptorSetsBuilder, |
| &resourceUseList, currentUniformBuffer, |
| uniformsAndXfbDesc); |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsTransformFeedbackBuffersExtension( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const gl::ProgramExecutable *executable = mState.getProgramExecutable(); |
| ASSERT(executable); |
| |
| if (!executable->hasTransformFeedbackOutput() || !mState.isTransformFeedbackActive()) |
| { |
| return angle::Result::Continue; |
| } |
| |
| TransformFeedbackVk *transformFeedbackVk = vk::GetImpl(mState.getCurrentTransformFeedback()); |
| size_t bufferCount = executable->getTransformFeedbackBufferCount(); |
| |
| const gl::TransformFeedbackBuffersArray<vk::BufferHelper *> &buffers = |
| transformFeedbackVk->getBufferHelpers(); |
| gl::TransformFeedbackBuffersArray<vk::BufferHelper> &counterBuffers = |
| transformFeedbackVk->getCounterBufferHelpers(); |
| |
| // Issue necessary barriers for the transform feedback buffers. |
| for (size_t bufferIndex = 0; bufferIndex < bufferCount; ++bufferIndex) |
| { |
| vk::BufferHelper *bufferHelper = buffers[bufferIndex]; |
| ASSERT(bufferHelper); |
| mRenderPassCommands->bufferWrite(this, VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT, |
| vk::PipelineStage::TransformFeedback, |
| vk::AliasingMode::Disallowed, bufferHelper); |
| } |
| |
| // Issue necessary barriers for the transform feedback counter buffer. Note that the barrier is |
| // issued only on the first buffer (which uses a global memory barrier), as all the counter |
| // buffers of the transform feedback object are used together. The rest of the buffers are |
| // simply retained so they don't get deleted too early. |
| ASSERT(counterBuffers[0].valid()); |
| mRenderPassCommands->bufferWrite(this, |
| VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT | |
| VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT, |
| vk::PipelineStage::TransformFeedback, |
| vk::AliasingMode::Disallowed, &counterBuffers[0]); |
| for (size_t bufferIndex = 1; bufferIndex < bufferCount; ++bufferIndex) |
| { |
| counterBuffers[bufferIndex].retainReadWrite(&mRenderPassCommands->getResourceUseList()); |
| } |
| |
| const gl::TransformFeedbackBuffersArray<VkBuffer> &bufferHandles = |
| transformFeedbackVk->getBufferHandles(); |
| const gl::TransformFeedbackBuffersArray<VkDeviceSize> &bufferOffsets = |
| transformFeedbackVk->getBufferOffsets(); |
| const gl::TransformFeedbackBuffersArray<VkDeviceSize> &bufferSizes = |
| transformFeedbackVk->getBufferSizes(); |
| |
| mRenderPassCommandBuffer->bindTransformFeedbackBuffers( |
| 0, static_cast<uint32_t>(bufferCount), bufferHandles.data(), bufferOffsets.data(), |
| bufferSizes.data()); |
| |
| if (!mState.isTransformFeedbackActiveUnpaused()) |
| { |
| return angle::Result::Continue; |
| } |
| |
| // We should have same number of counter buffers as xfb buffers have |
| const gl::TransformFeedbackBuffersArray<VkBuffer> &counterBufferHandles = |
| transformFeedbackVk->getCounterBufferHandles(); |
| const gl::TransformFeedbackBuffersArray<VkDeviceSize> &counterBufferOffsets = |
| transformFeedbackVk->getCounterBufferOffsets(); |
| |
| bool rebindBuffers = transformFeedbackVk->getAndResetBufferRebindState(); |
| |
| mRenderPassCommands->beginTransformFeedback(bufferCount, counterBufferHandles.data(), |
| counterBufferOffsets.data(), rebindBuffers); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsTransformFeedbackResume( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| if (mRenderPassCommands->isTransformFeedbackStarted()) |
| { |
| mRenderPassCommands->resumeTransformFeedback(); |
| } |
| |
| ANGLE_TRY(resumeXfbRenderPassQueriesIfActive()); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDescriptorSets(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| return handleDirtyDescriptorSetsImpl(mRenderPassCommands, PipelineType::Graphics); |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsUniforms(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| dirtyBitsIterator->setLaterBit(DIRTY_BIT_DESCRIPTOR_SETS); |
| return handleDirtyUniformsImpl(&mRenderPassCommands->getResourceUseList()); |
| } |
| |
| angle::Result ContextVk::handleDirtyComputeUniforms() |
| { |
| return handleDirtyUniformsImpl(&mOutsideRenderPassCommands->getResourceUseList()); |
| } |
| |
| angle::Result ContextVk::handleDirtyUniformsImpl(vk::ResourceUseList *resourceUseList) |
| { |
| ProgramExecutableVk *programExecutableVk = getExecutable(); |
| TransformFeedbackVk *transformFeedbackVk = |
| vk::SafeGetImpl(mState.getCurrentTransformFeedback()); |
| ANGLE_TRY(programExecutableVk->updateUniforms( |
| this, &mUpdateDescriptorSetsBuilder, resourceUseList, &mEmptyBuffer, |
| *mState.getProgramExecutable(), &mDefaultUniformStorage, |
| mState.isTransformFeedbackActiveUnpaused(), transformFeedbackVk)); |
| |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDynamicViewport(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| mRenderPassCommandBuffer->setViewport(0, 1, &mViewport); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDynamicScissor(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| handleDirtyGraphicsDynamicScissorImpl(mState.isQueryActive(gl::QueryType::PrimitivesGenerated)); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDynamicLineWidth(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| // Clamp line width to min/max allowed values. It's not invalid GL to |
| // provide out-of-range line widths, but it _is_ invalid Vulkan. |
| const float lineWidth = gl::clamp(mState.getLineWidth(), mState.getCaps().minAliasedLineWidth, |
| mState.getCaps().maxAliasedLineWidth); |
| mRenderPassCommandBuffer->setLineWidth(lineWidth); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDynamicDepthBias(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const gl::RasterizerState &rasterState = mState.getRasterizerState(); |
| // Note: depth bias clamp is only exposed in EXT_polygon_offset_clamp. |
| mRenderPassCommandBuffer->setDepthBias(rasterState.polygonOffsetUnits, 0, |
| rasterState.polygonOffsetFactor); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDynamicBlendConstants( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const gl::ColorF &color = mState.getBlendColor(); |
| mRenderPassCommandBuffer->setBlendConstants(color.data()); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDynamicStencilCompareMask( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const gl::DepthStencilState &depthStencilState = mState.getDepthStencilState(); |
| mRenderPassCommandBuffer->setStencilCompareMask(depthStencilState.stencilMask, |
| depthStencilState.stencilBackMask); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDynamicStencilWriteMask( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const gl::DepthStencilState &depthStencilState = mState.getDepthStencilState(); |
| const gl::Framebuffer *drawFramebuffer = mState.getDrawFramebuffer(); |
| uint32_t frontWritemask = 0; |
| uint32_t backWritemask = 0; |
| // Don't write to stencil buffers that should not exist |
| if (drawFramebuffer->hasStencil()) |
| { |
| frontWritemask = depthStencilState.stencilWritemask; |
| backWritemask = depthStencilState.stencilBackWritemask; |
| } |
| |
| mRenderPassCommandBuffer->setStencilWriteMask(frontWritemask, backWritemask); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDynamicStencilReference( |
| DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| mRenderPassCommandBuffer->setStencilReference(mState.getStencilRef(), |
| mState.getStencilBackRef()); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDynamicCullMode(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const gl::RasterizerState &rasterState = mState.getRasterizerState(); |
| mRenderPassCommandBuffer->setCullMode(gl_vk::GetCullMode(rasterState)); |
| return angle::Result::Continue; |
| } |
| |
| angle::Result ContextVk::handleDirtyGraphicsDynamicFrontFace(DirtyBits::Iterator *dirtyBitsIterator, |
| DirtyBits dirtyBitMask) |
| { |
| const gl::RasterizerState &rasterState = mState.getRasterizerState(); |
| mRenderPassCommandBuffer->setFrontFace( |
| gl_vk::GetFrontFace(rasterState.frontFace, isYFlipEnabledForDrawFBO())); |