blob: e319ee8b78272e0be26499cac1f291401fcc474c [file] [log] [blame] [edit]
/*
* Copyright (c) 2015-2019 The Khronos Group Inc.
* Copyright (c) 2015-2019 Valve Corporation
* Copyright (c) 2015-2019 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Chia-I Wu <olv@lunarg.com>
* Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
* Author: Ian Elliott <ian@LunarG.com>
* Author: Ian Elliott <ianelliott@google.com>
* Author: Jon Ashburn <jon@lunarg.com>
* Author: Gwan-gyeong Mun <elongbug@gmail.com>
* Author: Tony Barbour <tony@LunarG.com>
* Author: Bill Hollings <bill.hollings@brenwill.com>
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <assert.h>
#include <signal.h>
#include <errno.h>
#if defined(VK_USE_PLATFORM_XLIB_KHR) || defined(VK_USE_PLATFORM_XCB_KHR)
#include <X11/Xutil.h>
#elif defined(VK_USE_PLATFORM_WAYLAND_KHR)
#include <linux/input.h>
#include "xdg-shell-client-header.h"
#include "xdg-decoration-client-header.h"
#endif
#ifdef _WIN32
#ifdef _MSC_VER
#pragma comment(linker, "/subsystem:windows")
#endif // MSVC
#define APP_NAME_STR_LEN 80
#endif // _WIN32
#include <vulkan/vulkan.h>
#define VOLK_IMPLEMENTATION
#include "volk.h"
#include "linmath.h"
#include "object_type_string_helper.h"
#include "gettime.h"
#include "inttypes.h"
#define MILLION 1000000L
#define BILLION 1000000000L
#define DEMO_TEXTURE_COUNT 1
#define APP_SHORT_NAME "vkcube"
#define APP_LONG_NAME "Vulkan Cube"
// Allow a maximum of two outstanding presentation operations.
#define FRAME_LAG 2
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
#if defined(NDEBUG) && defined(__GNUC__)
#define U_ASSERT_ONLY __attribute__((unused))
#else
#define U_ASSERT_ONLY
#endif
#if defined(__GNUC__)
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
#ifdef _WIN32
bool in_callback = false;
#define ERR_EXIT(err_msg, err_class) \
do { \
if (!demo->suppress_popups) MessageBox(NULL, err_msg, err_class, MB_OK); \
exit(1); \
} while (0)
void DbgMsg(char *fmt, ...) {
va_list va;
va_start(va, fmt);
vprintf(fmt, va);
va_end(va);
fflush(stdout);
}
#elif defined __ANDROID__
#include <android/log.h>
#define ERR_EXIT(err_msg, err_class) \
do { \
((void)__android_log_print(ANDROID_LOG_INFO, "Vulkan Cube", err_msg)); \
exit(1); \
} while (0)
#ifdef VARARGS_WORKS_ON_ANDROID
void DbgMsg(const char *fmt, ...) {
va_list va;
va_start(va, fmt);
__android_log_print(ANDROID_LOG_INFO, "Vulkan Cube", fmt, va);
va_end(va);
}
#else // VARARGS_WORKS_ON_ANDROID
#define DbgMsg(fmt, ...) \
do { \
((void)__android_log_print(ANDROID_LOG_INFO, "Vulkan Cube", fmt, ##__VA_ARGS__)); \
} while (0)
#endif // VARARGS_WORKS_ON_ANDROID
#else
#define ERR_EXIT(err_msg, err_class) \
do { \
printf("%s\n", err_msg); \
fflush(stdout); \
exit(1); \
} while (0)
void DbgMsg(char *fmt, ...) {
va_list va;
va_start(va, fmt);
vprintf(fmt, va);
va_end(va);
fflush(stdout);
}
#endif
/*
* structure to track all objects related to a texture.
*/
struct texture_object {
VkSampler sampler;
VkImage image;
VkBuffer buffer;
VkImageLayout imageLayout;
VkMemoryAllocateInfo mem_alloc;
VkDeviceMemory mem;
VkImageView view;
int32_t tex_width, tex_height;
};
static char *tex_files[] = {"lunarg.ppm"};
static int validation_error = 0;
struct vktexcube_vs_uniform {
// Must start with MVP
float mvp[4][4];
float position[12 * 3][4];
float attr[12 * 3][4];
};
//--------------------------------------------------------------------------------------
// Mesh and VertexFormat Data
//--------------------------------------------------------------------------------------
// clang-format off
static const float g_vertex_buffer_data[] = {
-1.0f,-1.0f,-1.0f, // -X side
-1.0f,-1.0f, 1.0f,
-1.0f, 1.0f, 1.0f,
-1.0f, 1.0f, 1.0f,
-1.0f, 1.0f,-1.0f,
-1.0f,-1.0f,-1.0f,
-1.0f,-1.0f,-1.0f, // -Z side
1.0f, 1.0f,-1.0f,
1.0f,-1.0f,-1.0f,
-1.0f,-1.0f,-1.0f,
-1.0f, 1.0f,-1.0f,
1.0f, 1.0f,-1.0f,
-1.0f,-1.0f,-1.0f, // -Y side
1.0f,-1.0f,-1.0f,
1.0f,-1.0f, 1.0f,
-1.0f,-1.0f,-1.0f,
1.0f,-1.0f, 1.0f,
-1.0f,-1.0f, 1.0f,
-1.0f, 1.0f,-1.0f, // +Y side
-1.0f, 1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
-1.0f, 1.0f,-1.0f,
1.0f, 1.0f, 1.0f,
1.0f, 1.0f,-1.0f,
1.0f, 1.0f,-1.0f, // +X side
1.0f, 1.0f, 1.0f,
1.0f,-1.0f, 1.0f,
1.0f,-1.0f, 1.0f,
1.0f,-1.0f,-1.0f,
1.0f, 1.0f,-1.0f,
-1.0f, 1.0f, 1.0f, // +Z side
-1.0f,-1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
-1.0f,-1.0f, 1.0f,
1.0f,-1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
};
static const float g_uv_buffer_data[] = {
0.0f, 1.0f, // -X side
1.0f, 1.0f,
1.0f, 0.0f,
1.0f, 0.0f,
0.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f, // -Z side
0.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f,
1.0f, 0.0f,
0.0f, 0.0f,
1.0f, 0.0f, // -Y side
1.0f, 1.0f,
0.0f, 1.0f,
1.0f, 0.0f,
0.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f, // +Y side
0.0f, 0.0f,
0.0f, 1.0f,
1.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f,
1.0f, 0.0f, // +X side
0.0f, 0.0f,
0.0f, 1.0f,
0.0f, 1.0f,
1.0f, 1.0f,
1.0f, 0.0f,
0.0f, 0.0f, // +Z side
0.0f, 1.0f,
1.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f,
1.0f, 0.0f,
};
// clang-format on
void dumpMatrix(const char *note, mat4x4 MVP) {
int i;
printf("%s: \n", note);
for (i = 0; i < 4; i++) {
printf("%f, %f, %f, %f\n", MVP[i][0], MVP[i][1], MVP[i][2], MVP[i][3]);
}
printf("\n");
fflush(stdout);
}
void dumpVec4(const char *note, vec4 vector) {
printf("%s: \n", note);
printf("%f, %f, %f, %f\n", vector[0], vector[1], vector[2], vector[3]);
printf("\n");
fflush(stdout);
}
char const *to_string(VkPhysicalDeviceType const type) {
switch (type) {
case VK_PHYSICAL_DEVICE_TYPE_OTHER:
return "Other";
case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
return "IntegratedGpu";
case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
return "DiscreteGpu";
case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
return "VirtualGpu";
case VK_PHYSICAL_DEVICE_TYPE_CPU:
return "Cpu";
default:
return "Unknown";
}
}
typedef struct {
VkImage image;
VkCommandBuffer cmd;
VkCommandBuffer graphics_to_present_cmd;
VkImageView view;
VkBuffer uniform_buffer;
VkDeviceMemory uniform_memory;
void *uniform_memory_ptr;
VkFramebuffer framebuffer;
VkDescriptorSet descriptor_set;
} SwapchainImageResources;
struct demo {
#if defined(VK_USE_PLATFORM_WIN32_KHR)
#define APP_NAME_STR_LEN 80
HINSTANCE connection; // hInstance - Windows Instance
char name[APP_NAME_STR_LEN]; // Name to put on the window/icon
HWND window; // hWnd - window handle
POINT minsize; // minimum window size
#elif defined(VK_USE_PLATFORM_XLIB_KHR)
Display *display;
Window xlib_window;
Atom xlib_wm_delete_window;
#elif defined(VK_USE_PLATFORM_XCB_KHR)
Display *display;
xcb_connection_t *connection;
xcb_screen_t *screen;
xcb_window_t xcb_window;
xcb_intern_atom_reply_t *atom_wm_delete_window;
#elif defined(VK_USE_PLATFORM_WAYLAND_KHR)
struct wl_display *display;
struct wl_registry *registry;
struct wl_compositor *compositor;
struct wl_surface *window;
struct xdg_wm_base *xdg_wm_base;
struct zxdg_decoration_manager_v1 *xdg_decoration_mgr;
struct zxdg_toplevel_decoration_v1 *toplevel_decoration;
struct xdg_surface *xdg_surface;
int xdg_surface_has_been_configured;
struct xdg_toplevel *xdg_toplevel;
struct wl_seat *seat;
struct wl_pointer *pointer;
struct wl_keyboard *keyboard;
#elif defined(VK_USE_PLATFORM_DIRECTFB_EXT)
IDirectFB *dfb;
IDirectFBSurface *window;
IDirectFBEventBuffer *event_buffer;
#elif defined(VK_USE_PLATFORM_ANDROID_KHR)
struct ANativeWindow *window;
#elif defined(VK_USE_PLATFORM_METAL_EXT)
void *caMetalLayer;
#elif defined(VK_USE_PLATFORM_SCREEN_QNX)
screen_context_t screen_context;
screen_window_t screen_window;
screen_event_t screen_event;
#endif
VkSurfaceKHR surface;
bool prepared;
bool use_staging_buffer;
bool separate_present_queue;
bool is_minimized;
bool invalid_gpu_selection;
int32_t gpu_number;
bool VK_KHR_incremental_present_enabled;
bool VK_GOOGLE_display_timing_enabled;
bool syncd_with_actual_presents;
uint64_t refresh_duration;
uint64_t refresh_duration_multiplier;
uint64_t target_IPD; // image present duration (inverse of frame rate)
uint64_t prev_desired_present_time;
uint32_t next_present_id;
uint32_t last_early_id; // 0 if no early images
uint32_t last_late_id; // 0 if no late images
VkInstance inst;
VkPhysicalDevice gpu;
VkDevice device;
VkQueue graphics_queue;
VkQueue present_queue;
uint32_t graphics_queue_family_index;
uint32_t present_queue_family_index;
VkSemaphore image_acquired_semaphores[FRAME_LAG];
VkSemaphore draw_complete_semaphores[FRAME_LAG];
VkSemaphore image_ownership_semaphores[FRAME_LAG];
VkPhysicalDeviceProperties gpu_props;
VkQueueFamilyProperties *queue_props;
VkPhysicalDeviceMemoryProperties memory_properties;
uint32_t enabled_extension_count;
uint32_t enabled_layer_count;
char *extension_names[64];
char *enabled_layers[64];
int width, height;
VkFormat format;
VkColorSpaceKHR color_space;
uint32_t swapchainImageCount;
VkSwapchainKHR swapchain;
SwapchainImageResources *swapchain_image_resources;
VkPresentModeKHR presentMode;
VkFence fences[FRAME_LAG];
int frame_index;
VkCommandPool cmd_pool;
VkCommandPool present_cmd_pool;
struct {
VkFormat format;
VkImage image;
VkMemoryAllocateInfo mem_alloc;
VkDeviceMemory mem;
VkImageView view;
} depth;
struct texture_object textures[DEMO_TEXTURE_COUNT];
struct texture_object staging_texture;
VkCommandBuffer cmd; // Buffer for initialization commands
VkPipelineLayout pipeline_layout;
VkDescriptorSetLayout desc_layout;
VkPipelineCache pipelineCache;
VkRenderPass render_pass;
VkPipeline pipeline;
mat4x4 projection_matrix;
mat4x4 view_matrix;
mat4x4 model_matrix;
float spin_angle;
float spin_increment;
bool pause;
VkShaderModule vert_shader_module;
VkShaderModule frag_shader_module;
VkDescriptorPool desc_pool;
bool quit;
int32_t curFrame;
int32_t frameCount;
bool validate;
bool validate_checks_disabled;
bool use_break;
bool suppress_popups;
bool force_errors;
VkDebugUtilsMessengerEXT dbg_messenger;
uint32_t current_buffer;
uint32_t queue_family_count;
};
VKAPI_ATTR VkBool32 VKAPI_CALL debug_messenger_callback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData,
void *pUserData) {
char prefix[64] = "";
char *message = (char *)malloc(strlen(pCallbackData->pMessage) + 5000);
assert(message);
struct demo *demo = (struct demo *)pUserData;
if (demo->use_break) {
#ifndef WIN32
raise(SIGTRAP);
#else
DebugBreak();
#endif
}
if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) {
strcat(prefix, "VERBOSE : ");
} else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) {
strcat(prefix, "INFO : ");
} else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) {
strcat(prefix, "WARNING : ");
} else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
strcat(prefix, "ERROR : ");
}
if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT) {
strcat(prefix, "GENERAL");
} else {
if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) {
strcat(prefix, "VALIDATION");
validation_error = 1;
}
if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT) {
if (messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) {
strcat(prefix, "|");
}
strcat(prefix, "PERFORMANCE");
}
}
sprintf(message, "%s - Message Id Number: %d | Message Id Name: %s\n\t%s\n", prefix, pCallbackData->messageIdNumber,
pCallbackData->pMessageIdName == NULL ? "" : pCallbackData->pMessageIdName, pCallbackData->pMessage);
if (pCallbackData->objectCount > 0) {
char tmp_message[500];
sprintf(tmp_message, "\n\tObjects - %d\n", pCallbackData->objectCount);
strcat(message, tmp_message);
for (uint32_t object = 0; object < pCallbackData->objectCount; ++object) {
sprintf(tmp_message, "\t\tObject[%d] - %s", object, string_VkObjectType(pCallbackData->pObjects[object].objectType));
strcat(message, tmp_message);
VkObjectType t = pCallbackData->pObjects[object].objectType;
if (t == VK_OBJECT_TYPE_INSTANCE || t == VK_OBJECT_TYPE_PHYSICAL_DEVICE || t == VK_OBJECT_TYPE_DEVICE ||
t == VK_OBJECT_TYPE_COMMAND_BUFFER || t == VK_OBJECT_TYPE_QUEUE) {
sprintf(tmp_message, ", Handle %p", (void *)(uintptr_t)(pCallbackData->pObjects[object].objectHandle));
strcat(message, tmp_message);
} else {
sprintf(tmp_message, ", Handle Ox%" PRIx64, (pCallbackData->pObjects[object].objectHandle));
strcat(message, tmp_message);
}
if (NULL != pCallbackData->pObjects[object].pObjectName && strlen(pCallbackData->pObjects[object].pObjectName) > 0) {
sprintf(tmp_message, ", Name \"%s\"", pCallbackData->pObjects[object].pObjectName);
strcat(message, tmp_message);
}
sprintf(tmp_message, "\n");
strcat(message, tmp_message);
}
}
if (pCallbackData->cmdBufLabelCount > 0) {
char tmp_message[500];
sprintf(tmp_message, "\n\tCommand Buffer Labels - %d\n", pCallbackData->cmdBufLabelCount);
strcat(message, tmp_message);
for (uint32_t cmd_buf_label = 0; cmd_buf_label < pCallbackData->cmdBufLabelCount; ++cmd_buf_label) {
sprintf(tmp_message, "\t\tLabel[%d] - %s { %f, %f, %f, %f}\n", cmd_buf_label,
pCallbackData->pCmdBufLabels[cmd_buf_label].pLabelName, pCallbackData->pCmdBufLabels[cmd_buf_label].color[0],
pCallbackData->pCmdBufLabels[cmd_buf_label].color[1], pCallbackData->pCmdBufLabels[cmd_buf_label].color[2],
pCallbackData->pCmdBufLabels[cmd_buf_label].color[3]);
strcat(message, tmp_message);
}
}
#ifdef _WIN32
in_callback = true;
if (!demo->suppress_popups) MessageBox(NULL, message, "Alert", MB_OK);
in_callback = false;
#elif defined(ANDROID)
if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) {
__android_log_print(ANDROID_LOG_INFO, APP_SHORT_NAME, "%s", message);
} else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) {
__android_log_print(ANDROID_LOG_WARN, APP_SHORT_NAME, "%s", message);
} else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
__android_log_print(ANDROID_LOG_ERROR, APP_SHORT_NAME, "%s", message);
} else if (messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) {
__android_log_print(ANDROID_LOG_VERBOSE, APP_SHORT_NAME, "%s", message);
} else {
__android_log_print(ANDROID_LOG_INFO, APP_SHORT_NAME, "%s", message);
}
#else
printf("%s\n", message);
fflush(stdout);
#endif
free(message);
// Don't bail out, but keep going.
return false;
}
bool ActualTimeLate(uint64_t desired, uint64_t actual, uint64_t rdur) {
// The desired time was the earliest time that the present should have
// occured. In almost every case, the actual time should be later than the
// desired time. We should only consider the actual time "late" if it is
// after "desired + rdur".
if (actual <= desired) {
// The actual time was before or equal to the desired time. This will
// probably never happen, but in case it does, return false since the
// present was obviously NOT late.
return false;
}
uint64_t deadline = desired + rdur;
if (actual > deadline) {
return true;
} else {
return false;
}
}
bool CanPresentEarlier(uint64_t earliest, uint64_t actual, uint64_t margin, uint64_t rdur) {
if (earliest < actual) {
// Consider whether this present could have occured earlier. Make sure
// that earliest time was at least 2msec earlier than actual time, and
// that the margin was at least 2msec:
uint64_t diff = actual - earliest;
if ((diff >= (2 * MILLION)) && (margin >= (2 * MILLION))) {
// This present could have occured earlier because both: 1) the
// earliest time was at least 2 msec before actual time, and 2) the
// margin was at least 2msec.
return true;
}
}
return false;
}
// Forward declarations:
static void demo_resize(struct demo *demo);
static void demo_create_surface(struct demo *demo);
#if defined(__GNUC__) || defined(__clang__)
#define DECORATE_PRINTF(_fmt_argnum, _first_param_num) __attribute__((format(printf, _fmt_argnum, _first_param_num)))
#else
#define DECORATE_PRINTF(_fmt_num, _first_param_num)
#endif
DECORATE_PRINTF(4, 5)
static void demo_name_object(struct demo *demo, VkObjectType object_type, uint64_t vulkan_handle, const char *format, ...) {
if (!demo->validate) {
return;
}
VkResult U_ASSERT_ONLY err;
char name[1024];
va_list argptr;
va_start(argptr, format);
vsnprintf(name, sizeof(name), format, argptr);
va_end(argptr);
name[sizeof(name) - 1] = '\0';
VkDebugUtilsObjectNameInfoEXT obj_name = {
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT,
.pNext = NULL,
.objectType = object_type,
.objectHandle = vulkan_handle,
.pObjectName = name,
};
err = vkSetDebugUtilsObjectNameEXT(demo->device, &obj_name);
assert(!err);
}
DECORATE_PRINTF(4, 5)
static void demo_push_cb_label(struct demo *demo, VkCommandBuffer cb, const float *color, const char *format, ...) {
if (!demo->validate) {
return;
}
char name[1024];
va_list argptr;
va_start(argptr, format);
vsnprintf(name, sizeof(name), format, argptr);
va_end(argptr);
name[sizeof(name) - 1] = '\0';
VkDebugUtilsLabelEXT label = {
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,
.pNext = NULL,
.pLabelName = name,
};
if (color) {
memcpy(label.color, color, sizeof(label.color));
}
vkCmdBeginDebugUtilsLabelEXT(cb, &label);
}
static void demo_pop_cb_label(struct demo *demo, VkCommandBuffer cb) {
if (!demo->validate) {
return;
}
vkCmdEndDebugUtilsLabelEXT(cb);
}
static bool memory_type_from_properties(struct demo *demo, uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex) {
// Search memtypes to find first index with those properties
for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; i++) {
if ((typeBits & 1) == 1) {
// Type is available, does it match user properties?
if ((demo->memory_properties.memoryTypes[i].propertyFlags & requirements_mask) == requirements_mask) {
*typeIndex = i;
return true;
}
}
typeBits >>= 1;
}
// No memory types matched, return failure
return false;
}
static void demo_flush_init_cmd(struct demo *demo) {
VkResult U_ASSERT_ONLY err;
// This function could get called twice if the texture uses a staging buffer
// In that case the second call should be ignored
if (demo->cmd == VK_NULL_HANDLE) return;
err = vkEndCommandBuffer(demo->cmd);
assert(!err);
VkFence fence;
VkFenceCreateInfo fence_ci = {.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, .pNext = NULL, .flags = 0};
if (demo->force_errors) {
// Remove sType to intentionally force validation layer errors.
fence_ci.sType = 0;
}
err = vkCreateFence(demo->device, &fence_ci, NULL, &fence);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_FENCE, (uint64_t)fence, "InitFence");
const VkCommandBuffer cmd_bufs[] = {demo->cmd};
VkSubmitInfo submit_info = {.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
.pNext = NULL,
.waitSemaphoreCount = 0,
.pWaitSemaphores = NULL,
.pWaitDstStageMask = NULL,
.commandBufferCount = 1,
.pCommandBuffers = cmd_bufs,
.signalSemaphoreCount = 0,
.pSignalSemaphores = NULL};
err = vkQueueSubmit(demo->graphics_queue, 1, &submit_info, fence);
assert(!err);
err = vkWaitForFences(demo->device, 1, &fence, VK_TRUE, UINT64_MAX);
assert(!err);
vkFreeCommandBuffers(demo->device, demo->cmd_pool, 1, cmd_bufs);
vkDestroyFence(demo->device, fence, NULL);
demo->cmd = VK_NULL_HANDLE;
}
static void demo_set_image_layout(struct demo *demo, VkImage image, VkImageAspectFlags aspectMask, VkImageLayout old_image_layout,
VkImageLayout new_image_layout, VkAccessFlagBits srcAccessMask, VkPipelineStageFlags src_stages,
VkPipelineStageFlags dest_stages) {
assert(demo->cmd);
VkImageMemoryBarrier image_memory_barrier = {.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = NULL,
.srcAccessMask = srcAccessMask,
.dstAccessMask = 0,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.oldLayout = old_image_layout,
.newLayout = new_image_layout,
.image = image,
.subresourceRange = {aspectMask, 0, 1, 0, 1}};
switch (new_image_layout) {
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
/* Make sure anything that was copying from this image has completed */
image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
image_memory_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
image_memory_barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
break;
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
image_memory_barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
break;
default:
image_memory_barrier.dstAccessMask = 0;
break;
}
VkImageMemoryBarrier *pmemory_barrier = &image_memory_barrier;
vkCmdPipelineBarrier(demo->cmd, src_stages, dest_stages, 0, 0, NULL, 0, NULL, 1, pmemory_barrier);
}
static void demo_draw_build_cmd(struct demo *demo, VkCommandBuffer cmd_buf) {
const VkCommandBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = NULL,
.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
.pInheritanceInfo = NULL,
};
const VkClearValue clear_values[2] = {
[0] = {.color.float32 = {0.2f, 0.2f, 0.2f, 0.2f}},
[1] = {.depthStencil = {1.0f, 0}},
};
const VkRenderPassBeginInfo rp_begin = {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
.pNext = NULL,
.renderPass = demo->render_pass,
.framebuffer = demo->swapchain_image_resources[demo->current_buffer].framebuffer,
.renderArea.offset.x = 0,
.renderArea.offset.y = 0,
.renderArea.extent.width = demo->width,
.renderArea.extent.height = demo->height,
.clearValueCount = 2,
.pClearValues = clear_values,
};
VkResult U_ASSERT_ONLY err;
err = vkBeginCommandBuffer(cmd_buf, &cmd_buf_info);
demo_name_object(demo, VK_OBJECT_TYPE_COMMAND_BUFFER, (uint64_t)cmd_buf, "CubeDrawCommandBuf");
const float begin_color[4] = {0.4f, 0.3f, 0.2f, 0.1f};
demo_push_cb_label(demo, cmd_buf, begin_color, "DrawBegin");
assert(!err);
vkCmdBeginRenderPass(cmd_buf, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
const float renderpass_color[4] = {8.4f, 7.3f, 6.2f, 7.1f};
demo_push_cb_label(demo, cmd_buf, renderpass_color, "InsideRenderPass");
vkCmdBindPipeline(cmd_buf, VK_PIPELINE_BIND_POINT_GRAPHICS, demo->pipeline);
vkCmdBindDescriptorSets(cmd_buf, VK_PIPELINE_BIND_POINT_GRAPHICS, demo->pipeline_layout, 0, 1,
&demo->swapchain_image_resources[demo->current_buffer].descriptor_set, 0, NULL);
VkViewport viewport;
memset(&viewport, 0, sizeof(viewport));
float viewport_dimension;
if (demo->width < demo->height) {
viewport_dimension = (float)demo->width;
viewport.y = (demo->height - demo->width) / 2.0f;
} else {
viewport_dimension = (float)demo->height;
viewport.x = (demo->width - demo->height) / 2.0f;
}
viewport.height = viewport_dimension;
viewport.width = viewport_dimension;
viewport.minDepth = (float)0.0f;
viewport.maxDepth = (float)1.0f;
vkCmdSetViewport(cmd_buf, 0, 1, &viewport);
VkRect2D scissor;
memset(&scissor, 0, sizeof(scissor));
scissor.extent.width = demo->width;
scissor.extent.height = demo->height;
scissor.offset.x = 0;
scissor.offset.y = 0;
vkCmdSetScissor(cmd_buf, 0, 1, &scissor);
const float draw_color[4] = {-0.4f, -0.3f, -0.2f, -0.1f};
demo_push_cb_label(demo, cmd_buf, draw_color, "ActualDraw");
vkCmdDraw(cmd_buf, 12 * 3, 1, 0, 0);
demo_pop_cb_label(demo, cmd_buf);
// Note that ending the renderpass changes the image's layout from
// COLOR_ATTACHMENT_OPTIMAL to PRESENT_SRC_KHR
vkCmdEndRenderPass(cmd_buf);
demo_pop_cb_label(demo, cmd_buf);
if (demo->separate_present_queue) {
// We have to transfer ownership from the graphics queue family to the
// present queue family to be able to present. Note that we don't have
// to transfer from present queue family back to graphics queue family at
// the start of the next frame because we don't care about the image's
// contents at that point.
VkImageMemoryBarrier image_ownership_barrier = {.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = NULL,
.srcAccessMask = 0,
.dstAccessMask = 0,
.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
.srcQueueFamilyIndex = demo->graphics_queue_family_index,
.dstQueueFamilyIndex = demo->present_queue_family_index,
.image = demo->swapchain_image_resources[demo->current_buffer].image,
.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}};
vkCmdPipelineBarrier(cmd_buf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0,
NULL, 1, &image_ownership_barrier);
}
demo_pop_cb_label(demo, cmd_buf);
err = vkEndCommandBuffer(cmd_buf);
assert(!err);
}
void demo_build_image_ownership_cmd(struct demo *demo, int i) {
VkResult U_ASSERT_ONLY err;
const VkCommandBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = NULL,
.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
.pInheritanceInfo = NULL,
};
err = vkBeginCommandBuffer(demo->swapchain_image_resources[i].graphics_to_present_cmd, &cmd_buf_info);
assert(!err);
VkImageMemoryBarrier image_ownership_barrier = {.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = NULL,
.srcAccessMask = 0,
.dstAccessMask = 0,
.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
.srcQueueFamilyIndex = demo->graphics_queue_family_index,
.dstQueueFamilyIndex = demo->present_queue_family_index,
.image = demo->swapchain_image_resources[i].image,
.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}};
vkCmdPipelineBarrier(demo->swapchain_image_resources[i].graphics_to_present_cmd, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, &image_ownership_barrier);
err = vkEndCommandBuffer(demo->swapchain_image_resources[i].graphics_to_present_cmd);
assert(!err);
}
void demo_update_data_buffer(struct demo *demo) {
mat4x4 MVP, Model, VP;
int matrixSize = sizeof(MVP);
mat4x4_mul(VP, demo->projection_matrix, demo->view_matrix);
// Rotate around the Y axis
mat4x4_dup(Model, demo->model_matrix);
mat4x4_rotate_Y(demo->model_matrix, Model, (float)degreesToRadians(demo->spin_angle));
mat4x4_orthonormalize(demo->model_matrix, demo->model_matrix);
mat4x4_mul(MVP, VP, demo->model_matrix);
memcpy(demo->swapchain_image_resources[demo->current_buffer].uniform_memory_ptr, (const void *)&MVP[0][0], matrixSize);
}
void DemoUpdateTargetIPD(struct demo *demo) {
// Look at what happened to previous presents, and make appropriate
// adjustments in timing:
VkResult U_ASSERT_ONLY err;
VkPastPresentationTimingGOOGLE *past = NULL;
uint32_t count = 0;
err = vkGetPastPresentationTimingGOOGLE(demo->device, demo->swapchain, &count, NULL);
assert(!err);
if (count) {
past = (VkPastPresentationTimingGOOGLE *)malloc(sizeof(VkPastPresentationTimingGOOGLE) * count);
assert(past);
err = vkGetPastPresentationTimingGOOGLE(demo->device, demo->swapchain, &count, past);
assert(!err);
bool early = false;
bool late = false;
bool calibrate_next = false;
for (uint32_t i = 0; i < count; i++) {
if (!demo->syncd_with_actual_presents) {
// This is the first time that we've received an
// actualPresentTime for this swapchain. In order to not
// perceive these early frames as "late", we need to sync-up
// our future desiredPresentTime's with the
// actualPresentTime(s) that we're receiving now.
calibrate_next = true;
// So that we don't suspect any pending presents as late,
// record them all as suspected-late presents:
demo->last_late_id = demo->next_present_id - 1;
demo->last_early_id = 0;
demo->syncd_with_actual_presents = true;
break;
} else if (CanPresentEarlier(past[i].earliestPresentTime, past[i].actualPresentTime, past[i].presentMargin,
demo->refresh_duration)) {
// This image could have been presented earlier. We don't want
// to decrease the target_IPD until we've seen early presents
// for at least two seconds.
if (demo->last_early_id == past[i].presentID) {
// We've now seen two seconds worth of early presents.
// Flag it as such, and reset the counter:
early = true;
demo->last_early_id = 0;
} else if (demo->last_early_id == 0) {
// This is the first early present we've seen.
// Calculate the presentID for two seconds from now.
uint64_t lastEarlyTime = past[i].actualPresentTime + (2 * BILLION);
uint32_t howManyPresents = (uint32_t)((lastEarlyTime - past[i].actualPresentTime) / demo->target_IPD);
demo->last_early_id = past[i].presentID + howManyPresents;
} else {
// We are in the midst of a set of early images,
// and so we won't do anything.
}
late = false;
demo->last_late_id = 0;
} else if (ActualTimeLate(past[i].desiredPresentTime, past[i].actualPresentTime, demo->refresh_duration)) {
// This image was presented after its desired time. Since
// there's a delay between calling vkQueuePresentKHR and when
// we get the timing data, several presents may have been late.
// Thus, we need to threat all of the outstanding presents as
// being likely late, so that we only increase the target_IPD
// once for all of those presents.
if ((demo->last_late_id == 0) || (demo->last_late_id < past[i].presentID)) {
late = true;
// Record the last suspected-late present:
demo->last_late_id = demo->next_present_id - 1;
} else {
// We are in the midst of a set of likely-late images,
// and so we won't do anything.
}
early = false;
demo->last_early_id = 0;
} else {
// Since this image was not presented early or late, reset
// any sets of early or late presentIDs:
early = false;
late = false;
calibrate_next = true;
demo->last_early_id = 0;
demo->last_late_id = 0;
}
}
if (early) {
// Since we've seen at least two-seconds worth of presnts that
// could have occured earlier than desired, let's decrease the
// target_IPD (i.e. increase the frame rate):
//
// TODO(ianelliott): Try to calculate a better target_IPD based
// on the most recently-seen present (this is overly-simplistic).
demo->refresh_duration_multiplier--;
if (demo->refresh_duration_multiplier == 0) {
// This should never happen, but in case it does, don't
// try to go faster.
demo->refresh_duration_multiplier = 1;
}
demo->target_IPD = demo->refresh_duration * demo->refresh_duration_multiplier;
}
if (late) {
// Since we found a new instance of a late present, we want to
// increase the target_IPD (i.e. decrease the frame rate):
//
// TODO(ianelliott): Try to calculate a better target_IPD based
// on the most recently-seen present (this is overly-simplistic).
demo->refresh_duration_multiplier++;
demo->target_IPD = demo->refresh_duration * demo->refresh_duration_multiplier;
}
if (calibrate_next) {
int64_t multiple = demo->next_present_id - past[count - 1].presentID;
demo->prev_desired_present_time = (past[count - 1].actualPresentTime + (multiple * demo->target_IPD));
}
free(past);
}
}
static void demo_draw(struct demo *demo) {
VkResult U_ASSERT_ONLY err;
// Ensure no more than FRAME_LAG renderings are outstanding
vkWaitForFences(demo->device, 1, &demo->fences[demo->frame_index], VK_TRUE, UINT64_MAX);
vkResetFences(demo->device, 1, &demo->fences[demo->frame_index]);
do {
// Get the index of the next available swapchain image:
err = vkAcquireNextImageKHR(demo->device, demo->swapchain, UINT64_MAX, demo->image_acquired_semaphores[demo->frame_index],
VK_NULL_HANDLE, &demo->current_buffer);
if (err == VK_ERROR_OUT_OF_DATE_KHR) {
// demo->swapchain is out of date (e.g. the window was resized) and
// must be recreated:
demo_resize(demo);
} else if (err == VK_SUBOPTIMAL_KHR) {
// demo->swapchain is not as optimal as it could be, but the platform's
// presentation engine will still present the image correctly.
break;
} else if (err == VK_ERROR_SURFACE_LOST_KHR) {
vkDestroySurfaceKHR(demo->inst, demo->surface, NULL);
demo_create_surface(demo);
demo_resize(demo);
} else {
assert(!err);
}
} while (err != VK_SUCCESS);
demo_update_data_buffer(demo);
if (demo->VK_GOOGLE_display_timing_enabled) {
// Look at what happened to previous presents, and make appropriate
// adjustments in timing:
DemoUpdateTargetIPD(demo);
// Note: a real application would position its geometry to that it's in
// the correct locatoin for when the next image is presented. It might
// also wait, so that there's less latency between any input and when
// the next image is rendered/presented. This demo program is so
// simple that it doesn't do either of those.
}
// Wait for the image acquired semaphore to be signaled to ensure
// that the image won't be rendered to until the presentation
// engine has fully released ownership to the application, and it is
// okay to render to the image.
VkPipelineStageFlags pipe_stage_flags;
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
submit_info.pWaitDstStageMask = &pipe_stage_flags;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &demo->image_acquired_semaphores[demo->frame_index];
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &demo->swapchain_image_resources[demo->current_buffer].cmd;
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &demo->draw_complete_semaphores[demo->frame_index];
err = vkQueueSubmit(demo->graphics_queue, 1, &submit_info, demo->fences[demo->frame_index]);
assert(!err);
if (demo->separate_present_queue) {
// If we are using separate queues, change image ownership to the
// present queue before presenting, waiting for the draw complete
// semaphore and signalling the ownership released semaphore when finished
VkFence nullFence = VK_NULL_HANDLE;
pipe_stage_flags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &demo->draw_complete_semaphores[demo->frame_index];
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &demo->swapchain_image_resources[demo->current_buffer].graphics_to_present_cmd;
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &demo->image_ownership_semaphores[demo->frame_index];
err = vkQueueSubmit(demo->present_queue, 1, &submit_info, nullFence);
assert(!err);
}
// If we are using separate queues we have to wait for image ownership,
// otherwise wait for draw complete
VkPresentInfoKHR present = {
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
.pNext = NULL,
.waitSemaphoreCount = 1,
.pWaitSemaphores = (demo->separate_present_queue) ? &demo->image_ownership_semaphores[demo->frame_index]
: &demo->draw_complete_semaphores[demo->frame_index],
.swapchainCount = 1,
.pSwapchains = &demo->swapchain,
.pImageIndices = &demo->current_buffer,
};
VkRectLayerKHR rect;
VkPresentRegionKHR region;
VkPresentRegionsKHR regions;
if (demo->VK_KHR_incremental_present_enabled) {
// If using VK_KHR_incremental_present, we provide a hint of the region
// that contains changed content relative to the previously-presented
// image. The implementation can use this hint in order to save
// work/power (by only copying the region in the hint). The
// implementation is free to ignore the hint though, and so we must
// ensure that the entire image has the correctly-drawn content.
uint32_t eighthOfWidth = demo->width / 8;
uint32_t eighthOfHeight = demo->height / 8;
rect.offset.x = eighthOfWidth;
rect.offset.y = eighthOfHeight;
rect.extent.width = eighthOfWidth * 6;
rect.extent.height = eighthOfHeight * 6;
rect.layer = 0;
region.rectangleCount = 1;
region.pRectangles = &rect;
regions.sType = VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR;
regions.pNext = present.pNext;
regions.swapchainCount = present.swapchainCount;
regions.pRegions = &region;
present.pNext = &regions;
}
if (demo->VK_GOOGLE_display_timing_enabled) {
VkPresentTimeGOOGLE ptime;
if (demo->prev_desired_present_time == 0) {
// This must be the first present for this swapchain.
//
// We don't know where we are relative to the presentation engine's
// display's refresh cycle. We also don't know how long rendering
// takes. Let's make a grossly-simplified assumption that the
// desiredPresentTime should be half way between now and
// now+target_IPD. We will adjust over time.
uint64_t curtime = getTimeInNanoseconds();
if (curtime == 0) {
// Since we didn't find out the current time, don't give a
// desiredPresentTime:
ptime.desiredPresentTime = 0;
} else {
ptime.desiredPresentTime = curtime + (demo->target_IPD >> 1);
}
} else {
ptime.desiredPresentTime = (demo->prev_desired_present_time + demo->target_IPD);
}
ptime.presentID = demo->next_present_id++;
demo->prev_desired_present_time = ptime.desiredPresentTime;
VkPresentTimesInfoGOOGLE present_time = {
.sType = VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE,
.pNext = present.pNext,
.swapchainCount = present.swapchainCount,
.pTimes = &ptime,
};
if (demo->VK_GOOGLE_display_timing_enabled) {
present.pNext = &present_time;
}
}
err = vkQueuePresentKHR(demo->present_queue, &present);
demo->frame_index += 1;
demo->frame_index %= FRAME_LAG;
if (err == VK_ERROR_OUT_OF_DATE_KHR) {
// demo->swapchain is out of date (e.g. the window was resized) and
// must be recreated:
demo_resize(demo);
} else if (err == VK_SUBOPTIMAL_KHR) {
// SUBOPTIMAL could be due to a resize
VkSurfaceCapabilitiesKHR surfCapabilities;
err = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(demo->gpu, demo->surface, &surfCapabilities);
assert(!err);
if (surfCapabilities.currentExtent.width != (uint32_t)demo->width ||
surfCapabilities.currentExtent.height != (uint32_t)demo->height) {
demo_resize(demo);
}
} else if (err == VK_ERROR_SURFACE_LOST_KHR) {
vkDestroySurfaceKHR(demo->inst, demo->surface, NULL);
demo_create_surface(demo);
demo_resize(demo);
} else {
assert(!err);
}
}
static void demo_prepare_buffers(struct demo *demo) {
VkResult U_ASSERT_ONLY err;
VkSwapchainKHR oldSwapchain = demo->swapchain;
// Check the surface capabilities and formats
VkSurfaceCapabilitiesKHR surfCapabilities;
err = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(demo->gpu, demo->surface, &surfCapabilities);
assert(!err);
uint32_t presentModeCount;
err = vkGetPhysicalDeviceSurfacePresentModesKHR(demo->gpu, demo->surface, &presentModeCount, NULL);
assert(!err);
VkPresentModeKHR *presentModes = (VkPresentModeKHR *)malloc(presentModeCount * sizeof(VkPresentModeKHR));
assert(presentModes);
err = vkGetPhysicalDeviceSurfacePresentModesKHR(demo->gpu, demo->surface, &presentModeCount, presentModes);
assert(!err);
VkExtent2D swapchainExtent;
// width and height are either both 0xFFFFFFFF, or both not 0xFFFFFFFF.
if (surfCapabilities.currentExtent.width == 0xFFFFFFFF) {
// If the surface size is undefined, the size is set to the size
// of the images requested, which must fit within the minimum and
// maximum values.
swapchainExtent.width = demo->width;
swapchainExtent.height = demo->height;
if (swapchainExtent.width < surfCapabilities.minImageExtent.width) {
swapchainExtent.width = surfCapabilities.minImageExtent.width;
} else if (swapchainExtent.width > surfCapabilities.maxImageExtent.width) {
swapchainExtent.width = surfCapabilities.maxImageExtent.width;
}
if (swapchainExtent.height < surfCapabilities.minImageExtent.height) {
swapchainExtent.height = surfCapabilities.minImageExtent.height;
} else if (swapchainExtent.height > surfCapabilities.maxImageExtent.height) {
swapchainExtent.height = surfCapabilities.maxImageExtent.height;
}
} else {
// If the surface size is defined, the swap chain size must match
swapchainExtent = surfCapabilities.currentExtent;
demo->width = surfCapabilities.currentExtent.width;
demo->height = surfCapabilities.currentExtent.height;
}
if (surfCapabilities.maxImageExtent.width == 0 || surfCapabilities.maxImageExtent.height == 0) {
demo->is_minimized = true;
return;
} else {
demo->is_minimized = false;
}
// The FIFO present mode is guaranteed by the spec to be supported
// and to have no tearing. It's a great default present mode to use.
VkPresentModeKHR swapchainPresentMode = VK_PRESENT_MODE_FIFO_KHR;
// There are times when you may wish to use another present mode. The
// following code shows how to select them, and the comments provide some
// reasons you may wish to use them.
//
// It should be noted that Vulkan 1.0 doesn't provide a method for
// synchronizing rendering with the presentation engine's display. There
// is a method provided for throttling rendering with the display, but
// there are some presentation engines for which this method will not work.
// If an application doesn't throttle its rendering, and if it renders much
// faster than the refresh rate of the display, this can waste power on
// mobile devices. That is because power is being spent rendering images
// that may never be seen.
// VK_PRESENT_MODE_IMMEDIATE_KHR is for applications that don't care about
// tearing, or have some way of synchronizing their rendering with the
// display.
// VK_PRESENT_MODE_MAILBOX_KHR may be useful for applications that
// generally render a new presentable image every refresh cycle, but are
// occasionally early. In this case, the application wants the new image
// to be displayed instead of the previously-queued-for-presentation image
// that has not yet been displayed.
// VK_PRESENT_MODE_FIFO_RELAXED_KHR is for applications that generally
// render a new presentable image every refresh cycle, but are occasionally
// late. In this case (perhaps because of stuttering/latency concerns),
// the application wants the late image to be immediately displayed, even
// though that may mean some tearing.
if (demo->presentMode != swapchainPresentMode) {
for (size_t i = 0; i < presentModeCount; ++i) {
if (presentModes[i] == demo->presentMode) {
swapchainPresentMode = demo->presentMode;
break;
}
}
}
if (swapchainPresentMode != demo->presentMode) {
ERR_EXIT("Present mode specified is not supported\n", "Present mode unsupported");
}
// Determine the number of VkImages to use in the swap chain.
// Application desires to acquire 3 images at a time for triple
// buffering
uint32_t desiredNumOfSwapchainImages = 3;
if (desiredNumOfSwapchainImages < surfCapabilities.minImageCount) {
desiredNumOfSwapchainImages = surfCapabilities.minImageCount;
}
// If maxImageCount is 0, we can ask for as many images as we want;
// otherwise we're limited to maxImageCount
if ((surfCapabilities.maxImageCount > 0) && (desiredNumOfSwapchainImages > surfCapabilities.maxImageCount)) {
// Application must settle for fewer images than desired:
desiredNumOfSwapchainImages = surfCapabilities.maxImageCount;
}
VkSurfaceTransformFlagsKHR preTransform;
if (surfCapabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) {
preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
} else {
preTransform = surfCapabilities.currentTransform;
}
// Find a supported composite alpha mode - one of these is guaranteed to be set
VkCompositeAlphaFlagBitsKHR compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
VkCompositeAlphaFlagBitsKHR compositeAlphaFlags[4] = {
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
};
for (uint32_t i = 0; i < ARRAY_SIZE(compositeAlphaFlags); i++) {
if (surfCapabilities.supportedCompositeAlpha & compositeAlphaFlags[i]) {
compositeAlpha = compositeAlphaFlags[i];
break;
}
}
VkSwapchainCreateInfoKHR swapchain_ci = {
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
.pNext = NULL,
.surface = demo->surface,
.minImageCount = desiredNumOfSwapchainImages,
.imageFormat = demo->format,
.imageColorSpace = demo->color_space,
.imageExtent =
{
.width = swapchainExtent.width,
.height = swapchainExtent.height,
},
.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
.preTransform = preTransform,
.compositeAlpha = compositeAlpha,
.imageArrayLayers = 1,
.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = NULL,
.presentMode = swapchainPresentMode,
.oldSwapchain = oldSwapchain,
.clipped = true,
};
uint32_t i;
err = vkCreateSwapchainKHR(demo->device, &swapchain_ci, NULL, &demo->swapchain);
assert(!err);
// If we just re-created an existing swapchain, we should destroy the old
// swapchain at this point.
// Note: destroying the swapchain also cleans up all its associated
// presentable images once the platform is done with them.
if (oldSwapchain != VK_NULL_HANDLE) {
vkDestroySwapchainKHR(demo->device, oldSwapchain, NULL);
}
err = vkGetSwapchainImagesKHR(demo->device, demo->swapchain, &demo->swapchainImageCount, NULL);
assert(!err);
VkImage *swapchainImages = (VkImage *)malloc(demo->swapchainImageCount * sizeof(VkImage));
assert(swapchainImages);
err = vkGetSwapchainImagesKHR(demo->device, demo->swapchain, &demo->swapchainImageCount, swapchainImages);
assert(!err);
demo->swapchain_image_resources =
(SwapchainImageResources *)malloc(sizeof(SwapchainImageResources) * demo->swapchainImageCount);
assert(demo->swapchain_image_resources);
for (i = 0; i < demo->swapchainImageCount; i++) {
demo_name_object(demo, VK_OBJECT_TYPE_IMAGE, (uint64_t)swapchainImages[i], "SwapchainImage(%u)", i);
}
for (i = 0; i < demo->swapchainImageCount; i++) {
VkImageViewCreateInfo color_image_view = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = NULL,
.format = demo->format,
.components =
{
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
},
.subresourceRange =
{.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1},
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.flags = 0,
};
demo->swapchain_image_resources[i].image = swapchainImages[i];
color_image_view.image = demo->swapchain_image_resources[i].image;
err = vkCreateImageView(demo->device, &color_image_view, NULL, &demo->swapchain_image_resources[i].view);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_IMAGE_VIEW, (uint64_t)demo->swapchain_image_resources[i].view, "SwapchainView(%u)",
i);
}
if (demo->VK_GOOGLE_display_timing_enabled) {
VkRefreshCycleDurationGOOGLE rc_dur;
err = vkGetRefreshCycleDurationGOOGLE(demo->device, demo->swapchain, &rc_dur);
assert(!err);
demo->refresh_duration = rc_dur.refreshDuration;
demo->syncd_with_actual_presents = false;
// Initially target 1X the refresh duration:
demo->target_IPD = demo->refresh_duration;
demo->refresh_duration_multiplier = 1;
demo->prev_desired_present_time = 0;
demo->next_present_id = 1;
}
if (NULL != swapchainImages) {
free(swapchainImages);
}
if (NULL != presentModes) {
free(presentModes);
}
}
static void demo_prepare_depth(struct demo *demo) {
const VkFormat depth_format = VK_FORMAT_D16_UNORM;
const VkImageCreateInfo image = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = NULL,
.imageType = VK_IMAGE_TYPE_2D,
.format = depth_format,
.extent = {demo->width, demo->height, 1},
.mipLevels = 1,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
.flags = 0,
};
VkImageViewCreateInfo view = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = NULL,
.image = VK_NULL_HANDLE,
.format = depth_format,
.subresourceRange =
{.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT, .baseMipLevel = 0, .levelCount = 1, .baseArrayLayer = 0, .layerCount = 1},
.flags = 0,
.viewType = VK_IMAGE_VIEW_TYPE_2D,
};
if (demo->force_errors) {
// Intentionally force a bad pNext value to generate a validation layer error
view.pNext = &image;
}
VkMemoryRequirements mem_reqs;
VkResult U_ASSERT_ONLY err;
bool U_ASSERT_ONLY pass;
demo->depth.format = depth_format;
/* create image */
err = vkCreateImage(demo->device, &image, NULL, &demo->depth.image);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_IMAGE, (uint64_t)demo->depth.image, "DepthImage");
vkGetImageMemoryRequirements(demo->device, demo->depth.image, &mem_reqs);
assert(!err);
demo->depth.mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
demo->depth.mem_alloc.pNext = NULL;
demo->depth.mem_alloc.allocationSize = mem_reqs.size;
demo->depth.mem_alloc.memoryTypeIndex = 0;
pass = memory_type_from_properties(demo, mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
&demo->depth.mem_alloc.memoryTypeIndex);
assert(pass);
/* allocate memory */
err = vkAllocateMemory(demo->device, &demo->depth.mem_alloc, NULL, &demo->depth.mem);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)demo->depth.mem, "DepthMem");
/* bind memory */
err = vkBindImageMemory(demo->device, demo->depth.image, demo->depth.mem, 0);
assert(!err);
/* create image view */
view.image = demo->depth.image;
err = vkCreateImageView(demo->device, &view, NULL, &demo->depth.view);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_IMAGE_VIEW, (uint64_t)demo->depth.view, "DepthView");
}
/* Convert ppm image data from header file into RGBA texture image */
#include "lunarg.ppm.h"
bool loadTexture(const char *filename, uint8_t *rgba_data, VkSubresourceLayout *layout, int32_t *width, int32_t *height) {
(void)filename;
char *cPtr;
cPtr = (char *)lunarg_ppm;
if ((unsigned char *)cPtr >= (lunarg_ppm + lunarg_ppm_len) || strncmp(cPtr, "P6\n", 3)) {
return false;
}
while (strncmp(cPtr++, "\n", 1))
;
sscanf(cPtr, "%u %u", width, height);
if (rgba_data == NULL) {
return true;
}
while (strncmp(cPtr++, "\n", 1))
;
if ((unsigned char *)cPtr >= (lunarg_ppm + lunarg_ppm_len) || strncmp(cPtr, "255\n", 4)) {
return false;
}
while (strncmp(cPtr++, "\n", 1))
;
for (int y = 0; y < *height; y++) {
uint8_t *rowPtr = rgba_data;
for (int x = 0; x < *width; x++) {
memcpy(rowPtr, cPtr, 3);
rowPtr[3] = 255; /* Alpha of 1 */
rowPtr += 4;
cPtr += 3;
}
rgba_data += layout->rowPitch;
}
return true;
}
static void demo_prepare_texture_buffer(struct demo *demo, const char *filename, struct texture_object *tex_obj) {
int32_t tex_width;
int32_t tex_height;
VkResult U_ASSERT_ONLY err;
bool U_ASSERT_ONLY pass;
if (!loadTexture(filename, NULL, NULL, &tex_width, &tex_height)) {
ERR_EXIT("Failed to load textures", "Load Texture Failure");
}
tex_obj->tex_width = tex_width;
tex_obj->tex_height = tex_height;
const VkBufferCreateInfo buffer_create_info = {.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.pNext = NULL,
.flags = 0,
.size = tex_width * tex_height * 4,
.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.queueFamilyIndexCount = 0,
.pQueueFamilyIndices = NULL};
err = vkCreateBuffer(demo->device, &buffer_create_info, NULL, &tex_obj->buffer);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_BUFFER, (uint64_t)tex_obj->buffer, "TexBuffer(%s)", filename);
VkMemoryRequirements mem_reqs;
vkGetBufferMemoryRequirements(demo->device, tex_obj->buffer, &mem_reqs);
tex_obj->mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
tex_obj->mem_alloc.pNext = NULL;
tex_obj->mem_alloc.allocationSize = mem_reqs.size;
tex_obj->mem_alloc.memoryTypeIndex = 0;
VkFlags requirements = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
pass = memory_type_from_properties(demo, mem_reqs.memoryTypeBits, requirements, &tex_obj->mem_alloc.memoryTypeIndex);
assert(pass);
err = vkAllocateMemory(demo->device, &tex_obj->mem_alloc, NULL, &(tex_obj->mem));
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)tex_obj->mem, "TexBufMemory(%s)", filename);
/* bind memory */
err = vkBindBufferMemory(demo->device, tex_obj->buffer, tex_obj->mem, 0);
assert(!err);
VkSubresourceLayout layout;
memset(&layout, 0, sizeof(layout));
layout.rowPitch = tex_width * 4;
void *data;
err = vkMapMemory(demo->device, tex_obj->mem, 0, tex_obj->mem_alloc.allocationSize, 0, &data);
assert(!err);
if (!loadTexture(filename, data, &layout, &tex_width, &tex_height)) {
fprintf(stderr, "Error loading texture: %s\n", filename);
}
vkUnmapMemory(demo->device, tex_obj->mem);
}
static void demo_prepare_texture_image(struct demo *demo, const char *filename, struct texture_object *tex_obj,
VkImageTiling tiling, VkImageUsageFlags usage, VkFlags required_props) {
const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM;
int32_t tex_width;
int32_t tex_height;
VkResult U_ASSERT_ONLY err;
bool U_ASSERT_ONLY pass;
if (!loadTexture(filename, NULL, NULL, &tex_width, &tex_height)) {
ERR_EXIT("Failed to load textures", "Load Texture Failure");
}
tex_obj->tex_width = tex_width;
tex_obj->tex_height = tex_height;
const VkImageCreateInfo image_create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = NULL,
.imageType = VK_IMAGE_TYPE_2D,
.format = tex_format,
.extent = {tex_width, tex_height, 1},
.mipLevels = 1,
.arrayLayers = 1,
.samples = VK_SAMPLE_COUNT_1_BIT,
.tiling = tiling,
.usage = usage,
.flags = 0,
.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED,
};
VkMemoryRequirements mem_reqs;
err = vkCreateImage(demo->device, &image_create_info, NULL, &tex_obj->image);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_IMAGE, (uint64_t)tex_obj->image, "TexImage(%s)", filename);
vkGetImageMemoryRequirements(demo->device, tex_obj->image, &mem_reqs);
tex_obj->mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
tex_obj->mem_alloc.pNext = NULL;
tex_obj->mem_alloc.allocationSize = mem_reqs.size;
tex_obj->mem_alloc.memoryTypeIndex = 0;
pass = memory_type_from_properties(demo, mem_reqs.memoryTypeBits, required_props, &tex_obj->mem_alloc.memoryTypeIndex);
assert(pass);
/* allocate memory */
err = vkAllocateMemory(demo->device, &tex_obj->mem_alloc, NULL, &(tex_obj->mem));
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)tex_obj->mem, "TexImageMem(%s)", filename);
/* bind memory */
err = vkBindImageMemory(demo->device, tex_obj->image, tex_obj->mem, 0);
assert(!err);
if (required_props & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
const VkImageSubresource subres = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.mipLevel = 0,
.arrayLayer = 0,
};
VkSubresourceLayout layout;
void *data;
vkGetImageSubresourceLayout(demo->device, tex_obj->image, &subres, &layout);
err = vkMapMemory(demo->device, tex_obj->mem, 0, tex_obj->mem_alloc.allocationSize, 0, &data);
assert(!err);
if (!loadTexture(filename, data, &layout, &tex_width, &tex_height)) {
fprintf(stderr, "Error loading texture: %s\n", filename);
}
vkUnmapMemory(demo->device, tex_obj->mem);
}
tex_obj->imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
static void demo_destroy_texture(struct demo *demo, struct texture_object *tex_objs) {
/* clean up staging resources */
vkFreeMemory(demo->device, tex_objs->mem, NULL);
if (tex_objs->image) vkDestroyImage(demo->device, tex_objs->image, NULL);
if (tex_objs->buffer) vkDestroyBuffer(demo->device, tex_objs->buffer, NULL);
}
static void demo_prepare_textures(struct demo *demo) {
const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM;
VkFormatProperties props;
uint32_t i;
vkGetPhysicalDeviceFormatProperties(demo->gpu, tex_format, &props);
for (i = 0; i < DEMO_TEXTURE_COUNT; i++) {
VkResult U_ASSERT_ONLY err;
if ((props.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) && !demo->use_staging_buffer) {
demo_push_cb_label(demo, demo->cmd, NULL, "DirectTexture(%u)", i);
/* Device can texture using linear textures */
demo_prepare_texture_image(demo, tex_files[i], &demo->textures[i], VK_IMAGE_TILING_LINEAR, VK_IMAGE_USAGE_SAMPLED_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
// Nothing in the pipeline needs to be complete to start, and don't allow fragment
// shader to run until layout transition completes
demo_set_image_layout(demo, demo->textures[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED,
demo->textures[i].imageLayout, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
demo->staging_texture.image = 0;
demo_pop_cb_label(demo, demo->cmd); // "DirectTexture"
} else if (props.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) {
/* Must use staging buffer to copy linear texture to optimized */
demo_push_cb_label(demo, demo->cmd, NULL, "StagingTexture(%u)", i);
memset(&demo->staging_texture, 0, sizeof(demo->staging_texture));
demo_prepare_texture_buffer(demo, tex_files[i], &demo->staging_texture);
demo_prepare_texture_image(demo, tex_files[i], &demo->textures[i], VK_IMAGE_TILING_OPTIMAL,
(VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT),
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
demo_set_image_layout(demo, demo->textures[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_PREINITIALIZED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
demo_push_cb_label(demo, demo->cmd, NULL, "StagingBufferCopy(%u)", i);
VkBufferImageCopy copy_region = {
.bufferOffset = 0,
.bufferRowLength = demo->staging_texture.tex_width,
.bufferImageHeight = demo->staging_texture.tex_height,
.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1},
.imageOffset = {0, 0, 0},
.imageExtent = {demo->staging_texture.tex_width, demo->staging_texture.tex_height, 1},
};
vkCmdCopyBufferToImage(demo->cmd, demo->staging_texture.buffer, demo->textures[i].image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region);
demo_pop_cb_label(demo, demo->cmd); // "StagingBufferCopy"
demo_set_image_layout(demo, demo->textures[i].image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
demo->textures[i].imageLayout, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
demo_pop_cb_label(demo, demo->cmd); // "StagingTexture"
} else {
/* Can't support VK_FORMAT_R8G8B8A8_UNORM !? */
assert(!"No support for R8G8B8A8_UNORM as texture image format");
}
const VkSamplerCreateInfo sampler = {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = NULL,
.magFilter = VK_FILTER_NEAREST,
.minFilter = VK_FILTER_NEAREST,
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST,
.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
.mipLodBias = 0.0f,
.anisotropyEnable = VK_FALSE,
.maxAnisotropy = 1,
.compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = 0.0f,
.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE,
.unnormalizedCoordinates = VK_FALSE,
};
VkImageViewCreateInfo view = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = NULL,
.image = VK_NULL_HANDLE,
.viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = tex_format,
.components =
{
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
},
.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1},
.flags = 0,
};
/* create sampler */
err = vkCreateSampler(demo->device, &sampler, NULL, &demo->textures[i].sampler);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_SAMPLER, (uint64_t)demo->textures[i].sampler, "Sampler(%u)", i);
/* create image view */
view.image = demo->textures[i].image;
err = vkCreateImageView(demo->device, &view, NULL, &demo->textures[i].view);
demo_name_object(demo, VK_OBJECT_TYPE_IMAGE_VIEW, (uint64_t)demo->textures[i].view, "TexImageView(%u)", i);
assert(!err);
}
}
void demo_prepare_cube_data_buffers(struct demo *demo) {
VkBufferCreateInfo buf_info;
VkMemoryRequirements mem_reqs;
VkMemoryAllocateInfo mem_alloc;
mat4x4 MVP, VP;
VkResult U_ASSERT_ONLY err;
bool U_ASSERT_ONLY pass;
struct vktexcube_vs_uniform data;
mat4x4_mul(VP, demo->projection_matrix, demo->view_matrix);
mat4x4_mul(MVP, VP, demo->model_matrix);
memcpy(data.mvp, MVP, sizeof(MVP));
// dumpMatrix("MVP", MVP);
for (unsigned int i = 0; i < 12 * 3; i++) {
data.position[i][0] = g_vertex_buffer_data[i * 3];
data.position[i][1] = g_vertex_buffer_data[i * 3 + 1];
data.position[i][2] = g_vertex_buffer_data[i * 3 + 2];
data.position[i][3] = 1.0f;
data.attr[i][0] = g_uv_buffer_data[2 * i];
data.attr[i][1] = g_uv_buffer_data[2 * i + 1];
data.attr[i][2] = 0;
data.attr[i][3] = 0;
}
memset(&buf_info, 0, sizeof(buf_info));
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buf_info.size = sizeof(data);
for (unsigned int i = 0; i < demo->swapchainImageCount; i++) {
err = vkCreateBuffer(demo->device, &buf_info, NULL, &demo->swapchain_image_resources[i].uniform_buffer);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_BUFFER, (uint64_t)demo->swapchain_image_resources[i].uniform_buffer,
"SwapchainUniformBuf(%u)", i);
vkGetBufferMemoryRequirements(demo->device, demo->swapchain_image_resources[i].uniform_buffer, &mem_reqs);
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = mem_reqs.size;
mem_alloc.memoryTypeIndex = 0;
pass = memory_type_from_properties(demo, mem_reqs.memoryTypeBits,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
&mem_alloc.memoryTypeIndex);
assert(pass);
err = vkAllocateMemory(demo->device, &mem_alloc, NULL, &demo->swapchain_image_resources[i].uniform_memory);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)demo->swapchain_image_resources[i].uniform_memory,
"SwapchainUniformMem(%u)", i);
err = vkMapMemory(demo->device, demo->swapchain_image_resources[i].uniform_memory, 0, VK_WHOLE_SIZE, 0,
&demo->swapchain_image_resources[i].uniform_memory_ptr);
assert(!err);
memcpy(demo->swapchain_image_resources[i].uniform_memory_ptr, &data, sizeof data);
err = vkBindBufferMemory(demo->device, demo->swapchain_image_resources[i].uniform_buffer,
demo->swapchain_image_resources[i].uniform_memory, 0);
assert(!err);
}
}
static void demo_prepare_descriptor_layout(struct demo *demo) {
const VkDescriptorSetLayoutBinding layout_bindings[2] = {
[0] =
{
.binding = 0,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
.pImmutableSamplers = NULL,
},
[1] =
{
.binding = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = DEMO_TEXTURE_COUNT,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
.pImmutableSamplers = NULL,
},
};
const VkDescriptorSetLayoutCreateInfo descriptor_layout = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = NULL,
.bindingCount = 2,
.pBindings = layout_bindings,
};
VkResult U_ASSERT_ONLY err;
err = vkCreateDescriptorSetLayout(demo->device, &descriptor_layout, NULL, &demo->desc_layout);
assert(!err);
const VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.pNext = NULL,
.setLayoutCount = 1,
.pSetLayouts = &demo->desc_layout,
};
err = vkCreatePipelineLayout(demo->device, &pPipelineLayoutCreateInfo, NULL, &demo->pipeline_layout);
assert(!err);
}
static void demo_prepare_render_pass(struct demo *demo) {
// The initial layout for the color and depth attachments will be LAYOUT_UNDEFINED
// because at the start of the renderpass, we don't care about their contents.
// At the start of the subpass, the color attachment's layout will be transitioned
// to LAYOUT_COLOR_ATTACHMENT_OPTIMAL and the depth stencil attachment's layout
// will be transitioned to LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL. At the end of
// the renderpass, the color attachment's layout will be transitioned to
// LAYOUT_PRESENT_SRC_KHR to be ready to present. This is all done as part of
// the renderpass, no barriers are necessary.
const VkAttachmentDescription attachments[2] = {
[0] =
{
.format = demo->format,
.flags = 0,
.samples = VK_SAMPLE_COUNT_1_BIT,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
},
[1] =
{
.format = demo->depth.format,
.flags = 0,
.samples = VK_SAMPLE_COUNT_1_BIT,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
},
};
const VkAttachmentReference color_reference = {
.attachment = 0,
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
};
const VkAttachmentReference depth_reference = {
.attachment = 1,
.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
};
const VkSubpassDescription subpass = {
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
.flags = 0,
.inputAttachmentCount = 0,
.pInputAttachments = NULL,
.colorAttachmentCount = 1,
.pColorAttachments = &color_reference,
.pResolveAttachments = NULL,
.pDepthStencilAttachment = &depth_reference,
.preserveAttachmentCount = 0,
.pPreserveAttachments = NULL,
};
VkSubpassDependency attachmentDependencies[2] = {
[0] =
{
// Depth buffer is shared between swapchain images
.srcSubpass = VK_SUBPASS_EXTERNAL,
.dstSubpass = 0,
.srcStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
.dstStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
.dependencyFlags = 0,
},
[1] =
{
// Image Layout Transition
.srcSubpass = VK_SUBPASS_EXTERNAL,
.dstSubpass = 0,
.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
.dependencyFlags = 0,
},
};
const VkRenderPassCreateInfo rp_info = {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
.pNext = NULL,
.flags = 0,
.attachmentCount = 2,
.pAttachments = attachments,
.subpassCount = 1,
.pSubpasses = &subpass,
.dependencyCount = 2,
.pDependencies = attachmentDependencies,
};
VkResult U_ASSERT_ONLY err;
err = vkCreateRenderPass(demo->device, &rp_info, NULL, &demo->render_pass);
assert(!err);
}
static VkShaderModule demo_prepare_shader_module(const char *name, struct demo *demo, const uint32_t *code, size_t size) {
VkShaderModule module;
VkShaderModuleCreateInfo moduleCreateInfo;
VkResult U_ASSERT_ONLY err;
moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
moduleCreateInfo.pNext = NULL;
moduleCreateInfo.flags = 0;
moduleCreateInfo.codeSize = size;
moduleCreateInfo.pCode = code;
err = vkCreateShaderModule(demo->device, &moduleCreateInfo, NULL, &module);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_SHADER_MODULE, (uint64_t)module, "%s", name);
return module;
}
static void demo_prepare_vs(struct demo *demo) {
const uint32_t vs_code[] = {
#include "cube.vert.inc"
};
demo->vert_shader_module = demo_prepare_shader_module("cube.vert", demo, vs_code, sizeof(vs_code));
}
static void demo_prepare_fs(struct demo *demo) {
const uint32_t fs_code[] = {
#include "cube.frag.inc"
};
demo->frag_shader_module = demo_prepare_shader_module("cube.frag", demo, fs_code, sizeof(fs_code));
}
static void demo_prepare_pipeline(struct demo *demo) {
#define NUM_DYNAMIC_STATES 2 /*Viewport + Scissor*/
VkGraphicsPipelineCreateInfo pipeline;
VkPipelineCacheCreateInfo pipelineCache;
VkPipelineVertexInputStateCreateInfo vi;
VkPipelineInputAssemblyStateCreateInfo ia;
VkPipelineRasterizationStateCreateInfo rs;
VkPipelineColorBlendStateCreateInfo cb;
VkPipelineDepthStencilStateCreateInfo ds;
VkPipelineViewportStateCreateInfo vp;
VkPipelineMultisampleStateCreateInfo ms;
VkDynamicState dynamicStateEnables[NUM_DYNAMIC_STATES];
VkPipelineDynamicStateCreateInfo dynamicState;
VkResult U_ASSERT_ONLY err;
memset(dynamicStateEnables, 0, sizeof dynamicStateEnables);
memset(&dynamicState, 0, sizeof dynamicState);
dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamicState.pDynamicStates = dynamicStateEnables;
memset(&pipeline, 0, sizeof(pipeline));
pipeline.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipeline.layout = demo->pipeline_layout;
memset(&vi, 0, sizeof(vi));
vi.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
memset(&ia, 0, sizeof(ia));
ia.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
memset(&rs, 0, sizeof(rs));
rs.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs.polygonMode = VK_POLYGON_MODE_FILL;
rs.cullMode = VK_CULL_MODE_BACK_BIT;
rs.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rs.depthClampEnable = VK_FALSE;
rs.rasterizerDiscardEnable = VK_FALSE;
rs.depthBiasEnable = VK_FALSE;
rs.lineWidth = 1.0f;
memset(&cb, 0, sizeof(cb));
cb.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
VkPipelineColorBlendAttachmentState att_state[1];
memset(att_state, 0, sizeof(att_state));
att_state[0].colorWriteMask = 0xf;
att_state[0].blendEnable = VK_FALSE;
cb.attachmentCount = 1;
cb.pAttachments = att_state;
memset(&vp, 0, sizeof(vp));
vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
vp.viewportCount = 1;
dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_VIEWPORT;
vp.scissorCount = 1;
dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_SCISSOR;
memset(&ds, 0, sizeof(ds));
ds.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
ds.depthTestEnable = VK_TRUE;
ds.depthWriteEnable = VK_TRUE;
ds.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL;
ds.depthBoundsTestEnable = VK_FALSE;
ds.back.failOp = VK_STENCIL_OP_KEEP;
ds.back.passOp = VK_STENCIL_OP_KEEP;
ds.back.compareOp = VK_COMPARE_OP_ALWAYS;
ds.stencilTestEnable = VK_FALSE;
ds.front = ds.back;
memset(&ms, 0, sizeof(ms));
ms.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
ms.pSampleMask = NULL;
ms.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
demo_prepare_vs(demo);
demo_prepare_fs(demo);
// Two stages: vs and fs
VkPipelineShaderStageCreateInfo shaderStages[2];
memset(&shaderStages, 0, 2 * sizeof(VkPipelineShaderStageCreateInfo));
shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
shaderStages[0].module = demo->vert_shader_module;
shaderStages[0].pName = "main";
shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
shaderStages[1].module = demo->frag_shader_module;
shaderStages[1].pName = "main";
memset(&pipelineCache, 0, sizeof(pipelineCache));
pipelineCache.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
err = vkCreatePipelineCache(demo->device, &pipelineCache, NULL, &demo->pipelineCache);
assert(!err);
pipeline.pVertexInputState = &vi;
pipeline.pInputAssemblyState = &ia;
pipeline.pRasterizationState = &rs;
pipeline.pColorBlendState = &cb;
pipeline.pMultisampleState = &ms;
pipeline.pViewportState = &vp;
pipeline.pDepthStencilState = &ds;
pipeline.stageCount = ARRAY_SIZE(shaderStages);
pipeline.pStages = shaderStages;
pipeline.renderPass = demo->render_pass;
pipeline.pDynamicState = &dynamicState;
pipeline.renderPass = demo->render_pass;
err = vkCreateGraphicsPipelines(demo->device, demo->pipelineCache, 1, &pipeline, NULL, &demo->pipeline);
assert(!err);
vkDestroyShaderModule(demo->device, demo->frag_shader_module, NULL);
vkDestroyShaderModule(demo->device, demo->vert_shader_module, NULL);
}
static void demo_prepare_descriptor_pool(struct demo *demo) {
const VkDescriptorPoolSize type_counts[2] = {
[0] =
{
.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.descriptorCount = demo->swapchainImageCount,
},
[1] =
{
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = demo->swapchainImageCount * DEMO_TEXTURE_COUNT,
},
};
const VkDescriptorPoolCreateInfo descriptor_pool = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = NULL,
.maxSets = demo->swapchainImageCount,
.poolSizeCount = 2,
.pPoolSizes = type_counts,
};
VkResult U_ASSERT_ONLY err;
err = vkCreateDescriptorPool(demo->device, &descriptor_pool, NULL, &demo->desc_pool);
assert(!err);
}
static void demo_prepare_descriptor_set(struct demo *demo) {
VkDescriptorImageInfo tex_descs[DEMO_TEXTURE_COUNT];
VkWriteDescriptorSet writes[2];
VkResult U_ASSERT_ONLY err;
VkDescriptorSetAllocateInfo alloc_info = {.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.pNext = NULL,
.descriptorPool = demo->desc_pool,
.descriptorSetCount = 1,
.pSetLayouts = &demo->desc_layout};
VkDescriptorBufferInfo buffer_info;
buffer_info.offset = 0;
buffer_info.range = sizeof(struct vktexcube_vs_uniform);
memset(&tex_descs, 0, sizeof(tex_descs));
for (unsigned int i = 0; i < DEMO_TEXTURE_COUNT; i++) {
tex_descs[i].sampler = demo->textures[i].sampler;
tex_descs[i].imageView = demo->textures[i].view;
tex_descs[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
memset(&writes, 0, sizeof(writes));
writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
writes[0].descriptorCount = 1;
writes[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
writes[0].pBufferInfo = &buffer_info;
writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
writes[1].dstBinding = 1;
writes[1].descriptorCount = DEMO_TEXTURE_COUNT;
writes[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
writes[1].pImageInfo = tex_descs;
for (unsigned int i = 0; i < demo->swapchainImageCount; i++) {
err = vkAllocateDescriptorSets(demo->device, &alloc_info, &demo->swapchain_image_resources[i].descriptor_set);
assert(!err);
buffer_info.buffer = demo->swapchain_image_resources[i].uniform_buffer;
writes[0].dstSet = demo->swapchain_image_resources[i].descriptor_set;
writes[1].dstSet = demo->swapchain_image_resources[i].descriptor_set;
vkUpdateDescriptorSets(demo->device, 2, writes, 0, NULL);
}
}
static void demo_prepare_framebuffers(struct demo *demo) {
VkImageView attachments[2];
attachments[1] = demo->depth.view;
const VkFramebufferCreateInfo fb_info = {
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.pNext = NULL,
.renderPass = demo->render_pass,
.attachmentCount = 2,
.pAttachments = attachments,
.width = demo->width,
.height = demo->height,
.layers = 1,
};
VkResult U_ASSERT_ONLY err;
uint32_t i;
for (i = 0; i < demo->swapchainImageCount; i++) {
attachments[0] = demo->swapchain_image_resources[i].view;
err = vkCreateFramebuffer(demo->device, &fb_info, NULL, &demo->swapchain_image_resources[i].framebuffer);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_FRAMEBUFFER, (uint64_t)demo->swapchain_image_resources[i].framebuffer,
"Framebuffer(%u)", i);
}
}
static void demo_prepare(struct demo *demo) {
demo_prepare_buffers(demo);
if (demo->is_minimized) {
demo->prepared = false;
return;
}
VkResult U_ASSERT_ONLY err;
if (demo->cmd_pool == VK_NULL_HANDLE) {
const VkCommandPoolCreateInfo cmd_pool_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = NULL,
.queueFamilyIndex = demo->graphics_queue_family_index,
.flags = 0,
};
err = vkCreateCommandPool(demo->device, &cmd_pool_info, NULL, &demo->cmd_pool);
assert(!err);
}
const VkCommandBufferAllocateInfo cmd = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.pNext = NULL,
.commandPool = demo->cmd_pool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = 1,
};
err = vkAllocateCommandBuffers(demo->device, &cmd, &demo->cmd);
assert(!err);
demo_name_object(demo, VK_OBJECT_TYPE_COMMAND_BUFFER, (uint64_t)demo->cmd, "PrepareCB");
VkCommandBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
.pNext = NULL,
.flags = 0,
.pInheritanceInfo = NULL,
};
err = vkBeginCommandBuffer(demo->cmd, &cmd_buf_info);
demo_push_cb_label(demo, demo->cmd, NULL, "Prepare");
assert(!err);
demo_prepare_depth(demo);
demo_prepare_textures(demo);
demo_prepare_cube_data_buffers(demo);
demo_prepare_descriptor_layout(demo);
demo_prepare_render_pass(demo);
demo_prepare_pipeline(demo);
for (uint32_t i = 0; i < demo->swapchainImageCount; i++) {
err = vkAllocateCommandBuffers(demo->device, &cmd, &demo->swapchain_image_resources[i].cmd);
assert(!err);
}
if (demo->separate_present_queue) {
const VkCommandPoolCreateInfo present_cmd_pool_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
.pNext = NULL,
.queueFamilyIndex = demo->present_queue_family_index,
.flags = 0,
};
err = vkCreateCommandPool(demo->device, &present_cmd_pool_info, NULL, &demo->present_cmd_pool);
assert(!err);
const VkCommandBufferAllocateInfo present_cmd_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.pNext = NULL,
.commandPool = demo->present_cmd_pool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = 1,
};
for (uint32_t i = 0; i < demo->swapchainImageCount; i++) {
err = vkAllocateCommandBuffers(demo->device, &present_cmd_info,
&demo->swapchain_image_resources[i].graphics_to_present_cmd);
assert(!err);
demo_build_image_ownership_cmd(demo, i);
demo_name_object(demo, VK_OBJECT_TYPE_COMMAND_BUFFER,
(uint64_t)demo->swapchain_image_resources[i].graphics_to_present_cmd, "GfxToPresent(%u)", i);
}
}
demo_prepare_descriptor_pool(demo);
demo_prepare_descriptor_set(demo);
demo_prepare_framebuffers(demo);
for (uint32_t i = 0; i < demo->swapchainImageCount; i++) {
demo->current_buffer = i;
demo_draw_build_cmd(demo, demo->swapchain_image_resources[i].cmd);
}
/*
* Prepare functions above may generate pipeline commands
* that need to be flushed before beginning the render loop.
*/
demo_pop_cb_label(demo, demo->cmd); // "Prepare"
demo_flush_init_cmd(demo);
if (demo->staging_texture.buffer) {
demo_destroy_texture(demo, &demo->staging_texture);
}
demo->current_buffer = 0;
demo->prepared = true;
}
static void demo_cleanup(struct demo *demo) {
uint32_t i;
demo->prepared = false;
vkDeviceWaitIdle(demo->device);
// Wait for fences from present operations
for (i = 0; i < FRAME_LAG; i++) {
vkWaitForFences(demo->device, 1, &demo->fences[i], VK_TRUE, UINT64_MAX);
vkDestroyFence(demo->device, demo->fences[i], NULL);
vkDestroySemaphore(demo->device, demo->image_acquired_semaphores[i], NULL);
vkDestroySemaphore(demo->device, demo->draw_complete_semaphores[i], NULL);
if (demo->separate_present_queue) {
vkDestroySemaphore(demo->device, demo->image_ownership_semaphores[i], NULL);
}
}
// If the window is currently minimized, demo_resize has already done some cleanup for us.
if (!demo->is_minimized) {
for (i = 0; i < demo->swapchainImageCount; i++) {
vkDestroyFramebuffer(demo->device, demo->swapchain_image_resources[i].framebuffer, NULL);
}
vkDestroyDescriptorPool(demo->device, demo->desc_pool, NULL);
vkDestroyPipeline(demo->device, demo->pipeline, NULL);
vkDestroyPipelineCache(demo->device, demo->pipelineCache, NULL);
vkDestroyRenderPass(demo->device, demo->render_pass, NULL);
vkDestroyPipelineLayout(demo->device, demo->pipeline_layout, NULL);
vkDestroyDescriptorSetLayout(demo->device, demo->desc_layout, NULL);
for (i = 0; i < DEMO_TEXTURE_COUNT; i++) {
vkDestroyImageView(demo->device, demo->textures[i].view, NULL);
vkDestroyImage(demo->device, demo->textures[i].image, NULL);
vkFreeMemory(demo->device, demo->textures[i].mem, NULL);
vkDestroySampler(demo->device, demo->textures[i].sampler, NULL);
}
vkDestroySwapchainKHR(demo->device, demo->swapchain, NULL);
vkDestroyImageView(demo->device, demo->depth.view, NULL);
vkDestroyImage(demo->device, demo->depth.image, NULL);
vkFreeMemory(demo->device, demo->depth.mem, NULL);
for (i = 0; i < demo->swapchainImageCount; i++) {
vkDestroyImageView(demo->device, demo->swapchain_image_resources[i].view, NULL);
vkFreeCommandBuffers(demo->device, demo->cmd_pool, 1, &demo->swapchain_image_resources[i].cmd);
vkDestroyBuffer(demo->device, demo->swapchain_image_resources[i].uniform_buffer, NULL);
vkUnmapMemory(demo->device, demo->swapchain_image_resources[i].uniform_memory);
vkFreeMemory(demo->device, demo->swapchain_image_resources[i].uniform_memory, NULL);
}
free(demo->swapchain_image_resources);
free(demo->queue_props);
vkDestroyCommandPool(demo->device, demo->cmd_pool, NULL);
if (demo->separate_present_queue) {
vkDestroyCommandPool(demo->device, demo->present_cmd_pool, NULL);
}
}
vkDeviceWaitIdle(demo->device);
vkDestroyDevice(demo->device, NULL);
if (demo->validate) {
vkDestroyDebugUtilsMessengerEXT(demo->inst, demo->dbg_messenger, NULL);
}
vkDestroySurfaceKHR(demo->inst, demo->surface, NULL);
#if defined(VK_USE_PLATFORM_XLIB_KHR)
XDestroyWindow(demo->display, demo->xlib_window);
XCloseDisplay(demo->display);
#elif defined(VK_USE_PLATFORM_XCB_KHR)
xcb_destroy_window(demo->connection, demo->xcb_window);
xcb_disconnect(demo->connection);
free(demo->atom_wm_delete_window);
#elif defined(VK_USE_PLATFORM_WAYLAND_KHR)
if (demo->keyboard) wl_keyboard_destroy(demo->keyboard);
if (demo->pointer) wl_pointer_destroy(demo->pointer);
if (demo->seat) wl_seat_destroy(demo->seat);
xdg_toplevel_destroy(demo->xdg_toplevel);
xdg_surface_destroy(demo->xdg_surface);
wl_surface_destroy(demo->window);
xdg_wm_base_destroy(demo->xdg_wm_base);
if (demo->xdg_decoration_mgr) {
zxdg_toplevel_decoration_v1_destroy(demo->toplevel_decoration);
zxdg_decoration_manager_v1_destroy(demo->xdg_decoration_mgr);
}
wl_compositor_destroy(demo->compositor);
wl_registry_destroy(demo->registry);
wl_display_disconnect(demo->display);
#elif defined(VK_USE_PLATFORM_DIRECTFB_EXT)
demo->event_buffer->Release(demo->event_buffer);
demo->window->Release(demo->window);
demo->dfb->Release(demo->dfb);
#elif defined(VK_USE_PLATFORM_SCREEN_QNX)
screen_destroy_event(demo->screen_event);
screen_destroy_window(demo->screen_window);
screen_destroy_context(demo->screen_context);
#endif
vkDestroyInstance(demo->inst, NULL);
}
static void demo_resize(struct demo *demo) {
uint32_t i;
// Don't react to resize until after first initialization.
if (!demo->prepared) {
if (demo->is_minimized) {
demo_prepare(demo);
}
return;
}
// In order to properly resize the window, we must re-create the swapchain
// AND redo the command buffers, etc.
//
// First, perform part of the demo_cleanup() function:
demo->prepared = false;
vkDeviceWaitIdle(demo->device);
for (i = 0; i < demo->swapchainImageCount; i++) {
vkDestroyFramebuffer(demo->device, demo->swapchain_image_resources[i].framebuffer, NULL);
}
vkDestroyDescriptorPool(demo->device, demo->desc_pool, NULL);
vkDestroyPipeline(demo->device, demo->pipeline, NULL);
vkDestroyPipelineCache(demo->device, demo->pipelineCache, NULL);
vkDestroyRenderPass(demo->device, demo->render_pass, NULL);
vkDestroyPipelineLayout(demo->device, demo->pipeline_layout, NULL);
vkDestroyDescriptorSetLayout(demo->device, demo->desc_layout, NULL);
for (i = 0; i < DEMO_TEXTURE_COUNT; i++) {
vkDestroyImageView(demo->device, demo->textures[i].view, NULL);
vkDestroyImage(demo->device, demo->textures[i].image, NULL);
vkFreeMemory(demo->device, demo->textures[i].mem, NULL);
vkDestroySampler(demo->device, demo->textures[i].sampler, NULL);
}
vkDestroyImageView(demo->device, demo->depth.view, NULL);
vkDestroyImage(demo->device, demo->depth.image, NULL);
vkFreeMemory(demo->device, demo->depth.mem, NULL);
for (i = 0; i < demo->swapchainImageCount; i++) {
vkDestroyImageView(demo->device, demo->swapchain_image_resources[i].view, NULL);
vkFreeCommandBuffers(demo->device, demo->cmd_pool, 1, &demo->swapchain_image_resources[i].cmd);
vkDestroyBuffer(demo->device, demo->swapchain_image_resources[i].uniform_buffer, NULL);
vkUnmapMemory(demo->device, demo->swapchain_image_resources[i].uniform_memory);
vkFreeMemory(demo->device, demo->swapchain_image_resources[i].uniform_memory, NULL);
}
vkDestroyCommandPool(demo->device, demo->cmd_pool, NULL);
demo->cmd_pool = VK_NULL_HANDLE;
if (demo->separate_present_queue) {
vkDestroyCommandPool(demo->device, demo->present_cmd_pool, NULL);
}
free(demo->swapchain_image_resources);
// Second, re-perform the demo_prepare() function, which will re-create the
// swapchain:
demo_prepare(demo);
}
// On MS-Windows, make this a global, so it's available to WndProc()
struct demo demo;
#if defined(VK_USE_PLATFORM_WIN32_KHR)
static void demo_run(struct demo *demo) {
if (!demo->prepared) return;
demo_draw(demo);
demo->curFrame++;
if (demo->frameCount != INT32_MAX && demo->curFrame == demo->frameCount) {
PostQuitMessage(validation_error);
}
}
// MS-Windows event handling function:
LRESULT CALLBACK WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam) {
switch (uMsg) {
case WM_CLOSE:
PostQuitMessage(validation_error);
break;
case WM_PAINT:
// The validation callback calls MessageBox which can generate paint
// events - don't make more Vulkan calls if we got here from the
// callback
if (!in_callback) {
demo_run(&demo);
}
break;
case WM_GETMINMAXINFO: // set window's minimum size
((MINMAXINFO *)lParam)->ptMinTrackSize = demo.minsize;
return 0;
case WM_ERASEBKGND:
return 1;
case WM_SIZE:
// Resize the application to the new window size, except when
// it was minimized. Vulkan doesn't support images or swapchains
// with width=0 and height=0.
if (wParam != SIZE_MINIMIZED) {
demo.width = lParam & 0xffff;
demo.height = (lParam & 0xffff0000) >> 16;
demo_resize(&demo);
}
break;
case WM_KEYDOWN:
switch (wParam) {
case VK_ESCAPE:
PostQuitMessage(validation_error);
break;
case VK_LEFT:
demo.spin_angle -= demo.spin_increment;
break;
case VK_RIGHT:
demo.spin_angle += demo.spin_increment;
break;
case VK_SPACE:
demo.pause = !demo.pause;
break;
}
return 0;
default:
break;
}
return (DefWindowProc(hWnd, uMsg, wParam, lParam));
}
static void demo_create_window(struct demo *demo) {
WNDCLASSEX win_class;
// Initialize the window class structure:
win_class.cbSize = sizeof(WNDCLASSEX);
win_class.style = CS_HREDRAW | CS_VREDRAW;
win_class.lpfnWndProc = WndProc;
win_class.cbClsExtra = 0;
win_class.cbWndExtra = 0;
win_class.hInstance = demo->connection; // hInstance
win_class.hIcon = LoadIcon(NULL, IDI_APPLICATION);
win_class.hCursor = LoadCursor(NULL, IDC_ARROW);
win_class.hbrBackground = (HBRUSH)GetStockObject(WHITE_BRUSH);
win_class.lpszMenuName = NULL;
win_class.lpszClassName = demo->name;
win_class.hIconSm = LoadIcon(NULL, IDI_WINLOGO);
// Register window class:
if (!RegisterClassEx(&win_class)) {
// It didn't work, so try to give a useful error:
printf("Unexpected error trying to start the application!\n");
fflush(stdout);
exit(1);
}
// Create window with the registered class:
RECT wr = {0, 0, demo->width, demo->height};
AdjustWindowRect(&wr, WS_OVERLAPPEDWINDOW, FALSE);
demo->window = CreateWindowEx(0,
demo->name, // class name
demo->name, // app name
WS_OVERLAPPEDWINDOW | // window style
WS_VISIBLE | WS_SYSMENU,
100, 100, // x/y coords
wr.right - wr.left, // width
wr.bottom - wr.top, // height
NULL, // handle to parent
NULL, // handle to menu
demo->connection, // hInstance
NULL); // no extra parameters
if (!demo->window) {
// It didn't work, so try to give a useful error:
printf("Cannot create a window in which to draw!\n");
fflush(stdout);
exit(1);
}
// Window client area size must be at least 1 pixel high, to prevent crash.
demo->minsize.x = GetSystemMetrics(SM_CXMINTRACK);
demo->minsize.y = GetSystemMetrics(SM_CYMINTRACK) + 1;
}
#elif defined(VK_USE_PLATFORM_XLIB_KHR)
static void demo_create_xlib_window(struct demo *demo) {
const char *display_envar = getenv("DISPLAY");
if (display_envar == NULL || display_envar[0] == '\0') {
printf("Environment variable DISPLAY requires a valid value.\nExiting ...\n");
fflush(stdout);
exit(1);
}
XInitThreads();
demo->display = XOpenDisplay(NULL);
long visualMask = VisualScreenMask;
int numberOfVisuals;
XVisualInfo vInfoTemplate = {};
vInfoTemplate.screen = DefaultScreen(demo->display);
XVisualInfo *visualInfo = XGetVisualInfo(demo->display, visualMask, &vInfoTemplate, &numberOfVisuals);
Colormap colormap =
XCreateColormap(demo->display, RootWindow(demo->display, vInfoTemplate.screen), visualInfo->visual, AllocNone);
XSetWindowAttributes windowAttributes = {};
windowAttributes.colormap = colormap;
windowAttributes.background_pixel = 0xFFFFFFFF;
windowAttributes.border_pixel = 0;
windowAttributes.event_mask = KeyPressMask | KeyReleaseMask | StructureNotifyMask | ExposureMask;
demo->xlib_window = XCreateWindow(demo->display, RootWindow(demo->display, vInfoTemplate.screen), 0, 0, demo->width,
demo->height, 0, visualInfo->depth, InputOutput, visualInfo->visual,
CWBackPixel | CWBorderPixel | CWEventMask | CWColormap, &windowAttributes);
XSelectInput(demo->display, demo->xlib_window, ExposureMask | KeyPressMask);
XMapWindow(demo->display, demo->xlib_window);
XFlush(demo->display);
demo->xlib_wm_delete_window = XInternAtom(demo->display, "WM_DELETE_WINDOW", False);
}
static void demo_handle_xlib_event(struct demo *demo, const XEvent *event) {
switch (event->type) {
case ClientMessage:
if ((Atom)event->xclient.data.l[0] == demo->xlib_wm_delete_window) demo->quit = true;
break;
case KeyPress:
switch (event->xkey.keycode) {
case 0x9: // Escape
demo->quit = true;
break;
case 0x71: // left arrow key
demo->spin_angle -= demo->spin_increment;
break;
case 0x72: // right arrow key
demo->spin_angle += demo->spin_increment;
break;
case 0x41: // space bar
demo->pause = !demo->pause;
break;
}
break;
case ConfigureNotify:
if ((demo->width != event->xconfigure.width) || (demo->height != event->xconfigure.height)) {
demo->width = event->xconfigure.width;
demo->height = event->xconfigure.height;
demo_resize(demo);
}
break;
default:
break;
}