| /************************************************************************** |
| * |
| * Copyright (C) 2014 Red Hat Inc. |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a |
| * copy of this software and associated documentation files (the "Software"), |
| * to deal in the Software without restriction, including without limitation |
| * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| * and/or sell copies of the Software, and to permit persons to whom the |
| * Software is furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included |
| * in all copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| * OTHER DEALINGS IN THE SOFTWARE. |
| * |
| **************************************************************************/ |
| #ifdef HAVE_CONFIG_H |
| #include "config.h" |
| #endif |
| |
| #include <unistd.h> |
| #include <stdatomic.h> |
| #include <stdio.h> |
| #include <errno.h> |
| #include "pipe/p_shader_tokens.h" |
| |
| #include "pipe/p_defines.h" |
| #include "pipe/p_state.h" |
| #include "util/macros.h" |
| #include "util/u_inlines.h" |
| #include "util/u_memory.h" |
| #include "util/u_dual_blend.h" |
| #include "util/hash_table.h" |
| #include "util/ralloc.h" |
| |
| #include "util/u_thread.h" |
| #include "util/u_format.h" |
| #include "tgsi/tgsi_parse.h" |
| |
| #include "vrend_object.h" |
| #include "vrend_shader.h" |
| |
| #include "vrend_renderer.h" |
| #include "vrend_blitter.h" |
| #include "vrend_debug.h" |
| #include "vrend_winsys.h" |
| #include "vrend_blitter.h" |
| |
| #include "virgl_util.h" |
| |
| #include "virgl_hw.h" |
| #include "virgl_resource.h" |
| #include "virglrenderer.h" |
| #include "virglrenderer_hw.h" |
| #include "virgl_protocol.h" |
| |
| #include "tgsi/tgsi_text.h" |
| |
| #ifdef HAVE_EPOXY_GLX_H |
| #include <epoxy/glx.h> |
| #endif |
| |
| #ifdef ENABLE_VIDEO |
| #include <vrend_video.h> |
| #endif |
| |
| #ifdef WIN32 |
| #include <dxgi1_2.h> |
| #endif |
| |
| /* |
| * VIRGL_RENDERER_CAPSET_VIRGL has version 0 and 1, but they are both |
| * virgl_caps_v1 and are exactly the same. |
| * |
| * VIRGL_RENDERER_CAPSET_VIRGL2 has version 0, 1, and 2, but they are |
| * all virgl_caps_v2 and are exactly the same. |
| * |
| * Since virgl_caps_v2 is growable and no backward-incompatible change is |
| * expected, we don't bump up these versions anymore. |
| */ |
| #define VREND_CAPSET_VIRGL_MAX_VERSION 1 |
| #define VREND_CAPSET_VIRGL2_MAX_VERSION 2 |
| |
| static const uint32_t fake_occlusion_query_samples_passed_default = 1024; |
| |
| const struct vrend_if_cbs *vrend_clicbs; |
| |
| struct vrend_fence { |
| /* When the sync thread is waiting on the fence and the main thread |
| * destroys the context, ctx is set to NULL. Otherwise, ctx is always |
| * valid. |
| */ |
| struct vrend_context *ctx; |
| uint32_t flags; |
| uint64_t fence_id; |
| |
| union { |
| GLsync glsyncobj; |
| #ifdef HAVE_EPOXY_EGL_H |
| EGLSyncKHR eglsyncobj; |
| #endif |
| }; |
| struct list_head fences; |
| }; |
| |
| struct vrend_query { |
| struct list_head waiting_queries; |
| |
| GLuint id; |
| GLuint type; |
| GLuint index; |
| GLuint gltype; |
| struct vrend_context *ctx; |
| int sub_ctx_id; |
| struct vrend_resource *res; |
| bool fake_samples_passed; |
| }; |
| |
| struct global_error_state { |
| enum virgl_errors last_error; |
| }; |
| |
| enum features_id |
| { |
| feat_arb_or_gles_ext_texture_buffer, |
| feat_arb_robustness, |
| feat_arb_buffer_storage, |
| feat_arrays_of_arrays, |
| feat_ati_meminfo, |
| feat_atomic_counters, |
| feat_base_instance, |
| feat_barrier, |
| feat_bind_vertex_buffers, |
| feat_bit_encoding, |
| feat_blend_equation_advanced, |
| feat_clear_texture, |
| feat_clip_control, |
| feat_compute_shader, |
| feat_copy_image, |
| feat_conditional_render_inverted, |
| feat_conservative_depth, |
| feat_cube_map_array, |
| feat_cull_distance, |
| feat_debug_cb, |
| feat_depth_clamp, |
| feat_draw_instance, |
| feat_draw_parameters, |
| feat_dual_src_blend, |
| feat_egl_image, |
| feat_egl_image_storage, |
| feat_enhanced_layouts, |
| feat_fb_no_attach, |
| feat_framebuffer_fetch, |
| feat_framebuffer_fetch_non_coherent, |
| feat_geometry_shader, |
| feat_gl_conditional_render, |
| feat_gl_prim_restart, |
| feat_gles_khr_robustness, |
| feat_gles31_compatibility, |
| feat_gles31_vertex_attrib_binding, |
| feat_gpu_shader5, |
| feat_group_vote, |
| feat_images, |
| feat_indep_blend, |
| feat_indep_blend_func, |
| feat_indirect_draw, |
| feat_indirect_params, |
| feat_khr_debug, |
| feat_memory_object, |
| feat_memory_object_fd, |
| feat_mesa_invert, |
| feat_ms_scaled_blit, |
| feat_multisample, |
| feat_multi_draw_indirect, |
| feat_nv_conditional_render, |
| feat_nv_prim_restart, |
| feat_shader_noperspective_interpolation, |
| feat_nvx_gpu_memory_info, |
| feat_polygon_offset_clamp, |
| feat_occlusion_query, |
| feat_occlusion_query_boolean, |
| feat_pipeline_statistics_query, |
| feat_qbo, |
| feat_robust_buffer_access, |
| feat_sample_mask, |
| feat_sample_shading, |
| feat_samplers, |
| feat_sampler_border_colors, |
| feat_shader_clock, |
| feat_separate_shader_objects, |
| feat_ssbo, |
| feat_ssbo_barrier, |
| feat_srgb_write_control, |
| feat_stencil_texturing, |
| feat_storage_multisample, |
| feat_tessellation, |
| feat_texture_array, |
| feat_texture_barrier, |
| feat_texture_buffer_range, |
| feat_texture_gather, |
| feat_texture_multisample, |
| feat_texture_query_lod, |
| feat_texture_shadow_lod, |
| feat_texture_srgb_decode, |
| feat_texture_storage, |
| feat_texture_view, |
| feat_timer_query, |
| feat_transform_feedback, |
| feat_transform_feedback2, |
| feat_transform_feedback3, |
| feat_transform_feedback_overflow_query, |
| feat_txqs, |
| feat_ubo, |
| feat_viewport_array, |
| feat_implicit_msaa, |
| feat_anisotropic_filter, |
| feat_seamless_cubemap_per_texture, |
| feat_vs_layer_viewport, |
| feat_vs_viewport_index, |
| feat_last, |
| }; |
| |
| #define FEAT_MAX_EXTS 4 |
| #define UNAVAIL INT_MAX |
| |
| #define FEAT(NAME, GLVER, GLESVER, ...) \ |
| [feat_ ## NAME ] = {GLVER, GLESVER, { __VA_ARGS__ }, #NAME} |
| |
| static const struct { |
| int gl_ver; |
| int gles_ver; |
| const char *gl_ext[FEAT_MAX_EXTS]; |
| const char *log_name; |
| } feature_list[] = { |
| FEAT(arb_or_gles_ext_texture_buffer, 31, UNAVAIL, "GL_ARB_texture_buffer_object", "GL_EXT_texture_buffer", NULL), |
| FEAT(arb_robustness, UNAVAIL, UNAVAIL, "GL_ARB_robustness" ), |
| FEAT(arb_buffer_storage, 44, UNAVAIL, "GL_ARB_buffer_storage", "GL_EXT_buffer_storage"), |
| FEAT(arrays_of_arrays, 43, 31, "GL_ARB_arrays_of_arrays"), |
| FEAT(ati_meminfo, UNAVAIL, UNAVAIL, "GL_ATI_meminfo" ), |
| FEAT(atomic_counters, 42, 31, "GL_ARB_shader_atomic_counters" ), |
| FEAT(base_instance, 42, UNAVAIL, "GL_ARB_base_instance", "GL_EXT_base_instance" ), |
| FEAT(barrier, 42, 31, "GL_ARB_shader_image_load_store"), |
| FEAT(bind_vertex_buffers, 44, UNAVAIL, NULL), |
| FEAT(bit_encoding, 33, UNAVAIL, "GL_ARB_shader_bit_encoding" ), |
| FEAT(blend_equation_advanced, UNAVAIL, 32, "GL_KHR_blend_equation_advanced" ), |
| FEAT(clear_texture, 44, UNAVAIL, "GL_ARB_clear_texture", "GL_EXT_clear_texture"), |
| FEAT(clip_control, 45, UNAVAIL, "GL_ARB_clip_control", "GL_EXT_clip_control"), |
| FEAT(compute_shader, 43, 31, "GL_ARB_compute_shader" ), |
| FEAT(copy_image, 43, 32, "GL_ARB_copy_image", "GL_EXT_copy_image", "GL_OES_copy_image" ), |
| FEAT(conditional_render_inverted, 45, UNAVAIL, "GL_ARB_conditional_render_inverted" ), |
| FEAT(conservative_depth, 42, UNAVAIL, "GL_ARB_conservative_depth", "GL_EXT_conservative_depth" ), |
| FEAT(cube_map_array, 40, 32, "GL_ARB_texture_cube_map_array", "GL_EXT_texture_cube_map_array", "GL_OES_texture_cube_map_array" ), |
| FEAT(cull_distance, 45, UNAVAIL, "GL_ARB_cull_distance", "GL_EXT_clip_cull_distance" ), |
| FEAT(debug_cb, UNAVAIL, UNAVAIL, NULL), /* special case */ |
| FEAT(draw_instance, 31, 30, "GL_ARB_draw_instanced" ), |
| FEAT(draw_parameters, 46, 0, "ARB_shader_draw_parameters"), |
| FEAT(dual_src_blend, 33, UNAVAIL, "GL_ARB_blend_func_extended", "GL_EXT_blend_func_extended" ), |
| FEAT(depth_clamp, 32, UNAVAIL, "GL_ARB_depth_clamp", "GL_EXT_depth_clamp", "GL_NV_depth_clamp"), |
| FEAT(enhanced_layouts, 44, UNAVAIL, "GL_ARB_enhanced_layouts"), |
| FEAT(egl_image, UNAVAIL, UNAVAIL, "GL_OES_EGL_image"), |
| FEAT(egl_image_storage, UNAVAIL, UNAVAIL, "GL_EXT_EGL_image_storage"), |
| FEAT(fb_no_attach, 43, 31, "GL_ARB_framebuffer_no_attachments" ), |
| FEAT(framebuffer_fetch, UNAVAIL, UNAVAIL, "GL_EXT_shader_framebuffer_fetch" ), |
| FEAT(framebuffer_fetch_non_coherent, UNAVAIL, UNAVAIL, "GL_EXT_shader_framebuffer_fetch_non_coherent" ), |
| FEAT(geometry_shader, 32, 32, "GL_EXT_geometry_shader", "GL_OES_geometry_shader"), |
| FEAT(gl_conditional_render, 30, UNAVAIL, NULL), |
| FEAT(gl_prim_restart, 31, 30, NULL), |
| FEAT(gles_khr_robustness, UNAVAIL, UNAVAIL, "GL_KHR_robustness" ), |
| FEAT(gles31_compatibility, 45, 31, "ARB_ES3_1_compatibility" ), |
| FEAT(gles31_vertex_attrib_binding, 43, 31, "GL_ARB_vertex_attrib_binding" ), |
| FEAT(gpu_shader5, 40, 32, "GL_ARB_gpu_shader5", "GL_EXT_gpu_shader5", "GL_OES_gpu_shader5" ), |
| FEAT(group_vote, 46, UNAVAIL, "GL_ARB_shader_group_vote"), |
| FEAT(images, 42, 31, "GL_ARB_shader_image_load_store" ), |
| FEAT(indep_blend, 30, 32, "GL_EXT_draw_buffers2", "GL_OES_draw_buffers_indexed" ), |
| FEAT(indep_blend_func, 40, 32, "GL_ARB_draw_buffers_blend", "GL_OES_draw_buffers_indexed"), |
| FEAT(indirect_draw, 40, 31, "GL_ARB_draw_indirect" ), |
| FEAT(indirect_params, 46, UNAVAIL, "GL_ARB_indirect_parameters" ), |
| FEAT(khr_debug, 43, 32, "GL_KHR_debug" ), |
| FEAT(memory_object, UNAVAIL, UNAVAIL, "GL_EXT_memory_object"), |
| FEAT(memory_object_fd, UNAVAIL, UNAVAIL, "GL_EXT_memory_object_fd"), |
| FEAT(mesa_invert, UNAVAIL, UNAVAIL, "GL_MESA_pack_invert" ), |
| FEAT(ms_scaled_blit, UNAVAIL, UNAVAIL, "GL_EXT_framebuffer_multisample_blit_scaled" ), |
| FEAT(multisample, 32, 30, "GL_ARB_texture_multisample" ), |
| FEAT(multi_draw_indirect, 43, UNAVAIL, "GL_ARB_multi_draw_indirect", "GL_EXT_multi_draw_indirect" ), |
| FEAT(nv_conditional_render, UNAVAIL, UNAVAIL, "GL_NV_conditional_render" ), |
| FEAT(nv_prim_restart, UNAVAIL, UNAVAIL, "GL_NV_primitive_restart" ), |
| FEAT(shader_noperspective_interpolation, 31, UNAVAIL, "GL_NV_shader_noperspective_interpolation", "GL_EXT_gpu_shader4"), |
| FEAT(nvx_gpu_memory_info, UNAVAIL, UNAVAIL, "GL_NVX_gpu_memory_info" ), |
| FEAT(pipeline_statistics_query, 46, UNAVAIL, "GL_ARB_pipeline_statistics_query"), |
| FEAT(polygon_offset_clamp, 46, UNAVAIL, "GL_ARB_polygon_offset_clamp", "GL_EXT_polygon_offset_clamp"), |
| FEAT(occlusion_query, 15, UNAVAIL, "GL_ARB_occlusion_query"), |
| FEAT(occlusion_query_boolean, 33, 30, "GL_EXT_occlusion_query_boolean", "GL_ARB_occlusion_query2"), |
| FEAT(qbo, 44, UNAVAIL, "GL_ARB_query_buffer_object" ), |
| FEAT(robust_buffer_access, 43, UNAVAIL, "GL_ARB_robust_buffer_access_behavior", "GL_KHR_robust_buffer_access_behavior" ), |
| FEAT(sample_mask, 32, 31, "GL_ARB_texture_multisample" ), |
| FEAT(sample_shading, 40, 32, "GL_ARB_sample_shading", "GL_OES_sample_shading" ), |
| FEAT(samplers, 33, 30, "GL_ARB_sampler_objects" ), |
| FEAT(sampler_border_colors, 33, 32, "GL_ARB_sampler_objects", "GL_EXT_texture_border_clamp", "GL_OES_texture_border_clamp" ), |
| FEAT(separate_shader_objects, 41, 31, "GL_ARB_seperate_shader_objects"), |
| FEAT(shader_clock, UNAVAIL, UNAVAIL, "GL_ARB_shader_clock" ), |
| FEAT(ssbo, 43, 31, "GL_ARB_shader_storage_buffer_object" ), |
| FEAT(ssbo_barrier, 43, 31, "GL_ARB_shader_storage_buffer_object"), |
| FEAT(srgb_write_control, 30, UNAVAIL, "GL_EXT_sRGB_write_control"), |
| FEAT(stencil_texturing, 43, 31, "GL_ARB_stencil_texturing" ), |
| FEAT(storage_multisample, 43, 31, "GL_ARB_texture_storage_multisample" ), |
| FEAT(tessellation, 40, 32, "GL_ARB_tessellation_shader", "GL_OES_tessellation_shader", "GL_EXT_tessellation_shader" ), |
| FEAT(texture_array, 30, 30, "GL_EXT_texture_array" ), |
| FEAT(texture_barrier, 45, UNAVAIL, "GL_ARB_texture_barrier" ), |
| FEAT(texture_buffer_range, 43, 32, "GL_ARB_texture_buffer_range" ), |
| FEAT(texture_gather, 40, 31, "GL_ARB_texture_gather" ), |
| FEAT(texture_multisample, 32, 31, "GL_ARB_texture_multisample" ), |
| FEAT(texture_query_lod, 40, UNAVAIL, "GL_ARB_texture_query_lod", "GL_EXT_texture_query_lod"), |
| FEAT(texture_shadow_lod, UNAVAIL, UNAVAIL, "GL_EXT_texture_shadow_lod"), |
| FEAT(texture_srgb_decode, UNAVAIL, UNAVAIL, "GL_EXT_texture_sRGB_decode" ), |
| FEAT(texture_storage, 42, 30, "GL_ARB_texture_storage" ), |
| FEAT(texture_view, 43, UNAVAIL, "GL_ARB_texture_view", "GL_OES_texture_view", "GL_EXT_texture_view" ), |
| FEAT(timer_query, 33, UNAVAIL, "GL_ARB_timer_query", "GL_EXT_disjoint_timer_query"), |
| FEAT(transform_feedback, 30, 30, "GL_EXT_transform_feedback" ), |
| FEAT(transform_feedback2, 40, 30, "GL_ARB_transform_feedback2" ), |
| FEAT(transform_feedback3, 40, UNAVAIL, "GL_ARB_transform_feedback3" ), |
| FEAT(transform_feedback_overflow_query, 46, UNAVAIL, "GL_ARB_transform_feedback_overflow_query" ), |
| FEAT(txqs, 45, UNAVAIL, "GL_ARB_shader_texture_image_samples" ), |
| FEAT(ubo, 31, 30, "GL_ARB_uniform_buffer_object" ), |
| FEAT(viewport_array, 41, UNAVAIL, "GL_ARB_viewport_array", "GL_OES_viewport_array"), |
| FEAT(implicit_msaa, UNAVAIL, UNAVAIL, "GL_EXT_multisampled_render_to_texture"), |
| FEAT(anisotropic_filter, 46, UNAVAIL, "GL_EXT_texture_filter_anisotropic", "GL_ARB_texture_filter_anisotropic"), |
| FEAT(seamless_cubemap_per_texture, UNAVAIL, UNAVAIL, "GL_AMD_seamless_cubemap_per_texture" ), |
| FEAT(vs_layer_viewport, UNAVAIL, UNAVAIL, "GL_AMD_vertex_shader_layer"), |
| FEAT(vs_viewport_index, UNAVAIL, UNAVAIL, "GL_AMD_vertex_shader_viewport_index"), |
| }; |
| |
| struct global_renderer_state { |
| struct vrend_context *ctx0; |
| struct vrend_context *current_ctx; |
| struct vrend_context *current_hw_ctx; |
| |
| struct list_head waiting_query_list; |
| struct list_head fence_list; |
| struct list_head fence_wait_list; |
| struct vrend_fence *fence_waiting; |
| |
| int gl_major_ver; |
| int gl_minor_ver; |
| |
| mtx_t fence_mutex; |
| thrd_t sync_thread; |
| virgl_gl_context sync_context; |
| |
| cnd_t fence_cond; |
| |
| /* only used with async fence callback */ |
| atomic_bool has_waiting_queries; |
| bool polling; |
| mtx_t poll_mutex; |
| cnd_t poll_cond; |
| |
| float tess_factors[6]; |
| int eventfd; |
| |
| uint32_t max_draw_buffers; |
| uint32_t max_texture_buffer_size; |
| uint32_t max_texture_2d_size; |
| uint32_t max_texture_3d_size; |
| uint32_t max_texture_cube_size; |
| uint32_t max_shader_patch_varyings; |
| |
| /* inferred GL caching type */ |
| uint32_t inferred_gl_caching_type; |
| |
| uint64_t features[feat_last / 64 + 1]; |
| |
| bool finishing : 1; |
| bool use_gles : 1; |
| bool use_core_profile : 1; |
| bool use_external_blob : 1; |
| bool use_integer : 1; |
| /* these appeared broken on at least one driver */ |
| bool use_explicit_locations : 1; |
| /* threaded sync */ |
| bool stop_sync_thread : 1; |
| /* async fence callback */ |
| bool use_async_fence_cb : 1; |
| |
| #ifdef HAVE_EPOXY_EGL_H |
| bool use_egl_fence : 1; |
| #endif |
| bool d3d_share_texture : 1; |
| }; |
| |
| struct sysval_uniform_block { |
| GLfloat clipp[VIRGL_NUM_CLIP_PLANES][4]; |
| GLuint stipple_pattern[VREND_POLYGON_STIPPLE_SIZE][4]; |
| GLfloat winsys_adjust_y; |
| GLfloat alpha_ref_val; |
| GLfloat clip_plane_enabled; |
| GLint drawid_base; |
| }; |
| |
| static struct global_renderer_state vrend_state; |
| |
| static inline bool has_feature(enum features_id feature_id) |
| { |
| int slot = feature_id / 64; |
| uint64_t mask = 1ull << (feature_id & 63); |
| bool retval = vrend_state.features[slot] & mask ? true : false; |
| VREND_DEBUG(dbg_feature_use, NULL, "Try using feature %s:%d\n", |
| feature_list[feature_id].log_name, |
| retval); |
| return retval; |
| } |
| |
| |
| static inline void set_feature(enum features_id feature_id) |
| { |
| int slot = feature_id / 64; |
| uint64_t mask = 1ull << (feature_id & 63); |
| vrend_state.features[slot] |= mask; |
| } |
| |
| static inline void clear_feature(enum features_id feature_id) |
| { |
| int slot = feature_id / 64; |
| uint64_t mask = 1ull << (feature_id & 63); |
| vrend_state.features[slot] &= ~mask; |
| } |
| |
| |
| struct vrend_linked_shader_program { |
| struct list_head head; |
| struct list_head sl[PIPE_SHADER_TYPES]; |
| bool is_pipeline; |
| union { |
| GLuint program; |
| GLuint pipeline; |
| } id; |
| |
| bool dual_src_linked; |
| struct vrend_shader *ss[PIPE_SHADER_TYPES]; |
| uint64_t vs_fs_key; |
| |
| uint32_t ubo_used_mask[PIPE_SHADER_TYPES]; |
| uint32_t samplers_used_mask[PIPE_SHADER_TYPES]; |
| |
| GLuint *shadow_samp_mask_locs[PIPE_SHADER_TYPES]; |
| GLuint *shadow_samp_add_locs[PIPE_SHADER_TYPES]; |
| |
| GLint const_location[PIPE_SHADER_TYPES]; |
| |
| GLuint *attrib_locs; |
| uint32_t shadow_samp_mask[PIPE_SHADER_TYPES]; |
| |
| GLuint separate_virgl_block_id[PIPE_SHADER_TYPES]; |
| GLint virgl_block_bind; |
| uint32_t sysvalue_data_cookie; |
| GLint ubo_sysval_buffer_id; |
| |
| uint32_t images_used_mask[PIPE_SHADER_TYPES]; |
| GLint *img_locs[PIPE_SHADER_TYPES]; |
| |
| uint32_t ssbo_used_mask[PIPE_SHADER_TYPES]; |
| |
| int32_t tex_levels_uniform_id[PIPE_SHADER_TYPES]; |
| |
| struct vrend_sub_context *ref_context; |
| |
| uint32_t gles_use_query_texturelevel_mask; |
| |
| bool reads_drawid; |
| }; |
| |
| struct vrend_shader { |
| struct vrend_shader *next_variant; |
| struct vrend_shader_selector *sel; |
| |
| struct vrend_variable_shader_info var_sinfo; |
| |
| struct vrend_strarray glsl_strings; |
| GLuint id; |
| GLuint program_id; /* only used for separable shaders */ |
| GLuint last_pipeline_id; |
| uint32_t uid; |
| bool is_compiled; |
| bool is_linked; /* only used for separable shaders */ |
| struct vrend_shader_key key; |
| struct list_head programs; |
| }; |
| |
| struct vrend_shader_selector { |
| struct pipe_reference reference; |
| |
| enum pipe_shader_type type; |
| struct vrend_shader_info sinfo; |
| |
| struct vrend_shader *current; |
| struct tgsi_token *tokens; |
| |
| uint32_t req_local_mem; |
| char *tmp_buf; |
| uint32_t buf_len; |
| uint32_t buf_offset; |
| }; |
| |
| struct vrend_texture { |
| struct vrend_resource base; |
| struct pipe_sampler_state state; |
| GLint cur_swizzle[4]; |
| GLuint cur_srgb_decode; |
| GLuint cur_base, cur_max; |
| }; |
| |
| struct vrend_surface { |
| struct pipe_reference reference; |
| GLuint id; |
| GLuint res_handle; |
| GLuint format; |
| GLuint val0, val1; |
| GLuint nr_samples; |
| struct vrend_resource *texture; |
| }; |
| |
| struct vrend_sampler_state { |
| struct pipe_sampler_state base; |
| struct vrend_sub_context *sub_ctx; |
| GLuint ids[2]; |
| }; |
| |
| struct vrend_depth_stencil_alpha_state { |
| struct pipe_depth_stencil_alpha_state base; |
| struct vrend_sub_context *owning_sub; |
| }; |
| |
| struct vrend_so_target { |
| struct pipe_reference reference; |
| GLuint res_handle; |
| unsigned buffer_offset; |
| unsigned buffer_size; |
| struct vrend_resource *buffer; |
| struct vrend_sub_context *sub_ctx; |
| }; |
| |
| struct vrend_sampler_view { |
| struct pipe_reference reference; |
| GLuint id; |
| enum virgl_formats format; |
| GLenum target; |
| GLuint val0, val1; |
| GLint gl_swizzle[4]; |
| GLuint srgb_decode; |
| GLuint levels; |
| bool emulated_rect; |
| struct vrend_resource *texture; |
| }; |
| |
| struct vrend_image_view { |
| GLuint id; |
| GLenum access; |
| GLenum format; |
| uint32_t vformat; |
| union { |
| struct { |
| unsigned first_layer:16; /**< first layer to use for array textures */ |
| unsigned last_layer:16; /**< last layer to use for array textures */ |
| unsigned level:8; /**< mipmap level to use */ |
| } tex; |
| struct { |
| unsigned offset; /**< offset in bytes */ |
| unsigned size; /**< size of the accessible sub-range in bytes */ |
| } buf; |
| } u; |
| struct vrend_resource *texture; |
| GLuint view_id; |
| }; |
| |
| struct vrend_ssbo { |
| struct vrend_resource *res; |
| unsigned buffer_size; |
| unsigned buffer_offset; |
| }; |
| |
| struct vrend_abo { |
| struct vrend_resource *res; |
| unsigned buffer_size; |
| unsigned buffer_offset; |
| }; |
| |
| struct vrend_vertex_element { |
| struct pipe_vertex_element base; |
| GLenum type; |
| GLboolean norm; |
| GLuint nr_chan; |
| }; |
| |
| struct vrend_vertex_element_array { |
| unsigned count; |
| struct vrend_vertex_element elements[PIPE_MAX_ATTRIBS]; |
| GLuint id; |
| uint32_t signed_int_bitmask; |
| uint32_t unsigned_int_bitmask; |
| uint32_t zyxw_bitmask; |
| struct vrend_sub_context *owning_sub; |
| }; |
| |
| struct vrend_constants { |
| unsigned int *consts; |
| uint32_t num_consts; |
| uint32_t num_allocated_consts; |
| }; |
| |
| struct vrend_shader_view { |
| int num_views; |
| struct vrend_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS]; |
| uint32_t res_id[PIPE_MAX_SHADER_SAMPLER_VIEWS]; |
| uint32_t old_ids[PIPE_MAX_SHADER_SAMPLER_VIEWS]; |
| }; |
| |
| struct vrend_viewport { |
| GLint cur_x, cur_y; |
| GLsizei width, height; |
| GLclampd near_val, far_val; |
| }; |
| |
| /* create a streamout object to support pause/resume */ |
| struct vrend_streamout_object { |
| GLuint id; |
| uint32_t num_targets; |
| uint32_t handles[16]; |
| struct list_head head; |
| int xfb_state; |
| struct vrend_so_target *so_targets[16]; |
| }; |
| |
| #define XFB_STATE_OFF 0 |
| #define XFB_STATE_STARTED_NEED_BEGIN 1 |
| #define XFB_STATE_STARTED 2 |
| #define XFB_STATE_PAUSED 3 |
| |
| struct vrend_vertex_buffer { |
| struct pipe_vertex_buffer base; |
| uint32_t res_id; |
| }; |
| |
| #define VREND_PROGRAM_NQUEUES (1 << 8) |
| #define VREND_PROGRAM_NQUEUE_MASK (VREND_PROGRAM_NQUEUES - 1) |
| |
| struct vrend_sub_context { |
| struct list_head head; |
| |
| virgl_gl_context gl_context; |
| |
| int sub_ctx_id; |
| |
| GLuint vaoid; |
| uint32_t enabled_attribs_bitmask; |
| |
| /* Using an array of lists only adds VREND_PROGRAM_NQUEUES - 1 list_head |
| * structures to the consumed memory, but looking up the program can |
| * be spead up by the factor VREND_PROGRAM_NQUEUES which makes this |
| * worthwile. */ |
| struct list_head gl_programs[VREND_PROGRAM_NQUEUES]; |
| struct list_head cs_programs; |
| struct util_hash_table *object_hash; |
| |
| struct vrend_vertex_element_array *ve; |
| int num_vbos; |
| int old_num_vbos; /* for cleaning up */ |
| struct vrend_vertex_buffer vbo[PIPE_MAX_ATTRIBS]; |
| |
| struct pipe_index_buffer ib; |
| uint32_t index_buffer_res_id; |
| |
| bool vbo_dirty; |
| bool shader_dirty; |
| bool cs_shader_dirty; |
| bool stencil_state_dirty; |
| bool image_state_dirty; |
| bool blend_state_dirty; |
| |
| uint32_t long_shader_in_progress_handle[PIPE_SHADER_TYPES]; |
| struct vrend_shader_selector *shaders[PIPE_SHADER_TYPES]; |
| struct vrend_linked_shader_program *prog; |
| |
| GLuint prog_ids[PIPE_SHADER_TYPES]; |
| struct vrend_shader_view views[PIPE_SHADER_TYPES]; |
| |
| struct vrend_constants consts[PIPE_SHADER_TYPES]; |
| bool const_dirty[PIPE_SHADER_TYPES]; |
| struct vrend_sampler_state *sampler_state[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS]; |
| |
| struct pipe_constant_buffer cbs[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS]; |
| uint32_t const_bufs_used_mask[PIPE_SHADER_TYPES]; |
| uint32_t const_bufs_dirty[PIPE_SHADER_TYPES]; |
| |
| int num_sampler_states[PIPE_SHADER_TYPES]; |
| |
| uint32_t sampler_views_dirty[PIPE_SHADER_TYPES]; |
| int32_t texture_levels[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS]; |
| int32_t n_samplers[PIPE_SHADER_TYPES]; |
| |
| uint32_t fb_id; |
| int nr_cbufs; |
| struct vrend_surface *zsurf; |
| struct vrend_surface *surf[PIPE_MAX_COLOR_BUFS]; |
| |
| struct vrend_viewport vps[PIPE_MAX_VIEWPORTS]; |
| /* viewport is negative */ |
| uint32_t scissor_state_dirty; |
| uint32_t viewport_state_dirty; |
| uint32_t viewport_state_initialized; |
| |
| uint32_t fb_height; |
| |
| struct pipe_scissor_state ss[PIPE_MAX_VIEWPORTS]; |
| |
| struct pipe_blend_state blend_state; |
| struct pipe_depth_stencil_alpha_state dsa_state; |
| struct pipe_rasterizer_state rs_state; |
| |
| uint8_t stencil_refs[2]; |
| bool viewport_is_negative; |
| /* this is set if the contents of the FBO look upside down when viewed |
| with 0,0 as the bottom corner */ |
| bool fbo_origin_upper_left; |
| |
| GLuint blit_fb_ids[2]; |
| |
| struct vrend_depth_stencil_alpha_state *dsa; |
| |
| struct pipe_clip_state ucp_state; |
| |
| bool depth_test_enabled; |
| bool alpha_test_enabled; |
| bool stencil_test_enabled; |
| bool framebuffer_srgb_enabled; |
| |
| int last_shader_idx; |
| |
| GLint draw_indirect_buffer; |
| |
| GLint draw_indirect_params_buffer; |
| |
| struct pipe_rasterizer_state hw_rs_state; |
| struct pipe_blend_state hw_blend_state; |
| |
| struct list_head streamout_list; |
| struct vrend_streamout_object *current_so; |
| |
| struct pipe_blend_color blend_color; |
| |
| uint32_t cond_render_q_id; |
| GLenum cond_render_gl_mode; |
| |
| struct vrend_image_view image_views[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES]; |
| uint32_t images_used_mask[PIPE_SHADER_TYPES]; |
| |
| struct vrend_ssbo ssbo[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS]; |
| uint32_t ssbo_used_mask[PIPE_SHADER_TYPES]; |
| uint32_t ssbo_binding_offset[PIPE_SHADER_TYPES]; |
| |
| struct vrend_abo abo[PIPE_MAX_HW_ATOMIC_BUFFERS]; |
| uint32_t abo_used_mask; |
| struct vrend_context_tweaks tweaks; |
| uint8_t swizzle_output_rgb_to_bgr; |
| uint8_t needs_manual_srgb_encode_bitmask; |
| int fake_occlusion_query_samples_passed_multiplier; |
| |
| int prim_mode; |
| bool drawing; |
| struct vrend_context *parent; |
| struct sysval_uniform_block sysvalue_data; |
| uint32_t sysvalue_data_cookie; |
| uint32_t current_program_id; |
| uint32_t current_pipeline_id; |
| }; |
| |
| struct vrend_untyped_resource { |
| struct virgl_resource *resource; |
| struct list_head head; |
| }; |
| |
| struct vrend_context { |
| char debug_name[64]; |
| |
| struct list_head sub_ctxs; |
| struct list_head vrend_resources; |
| |
| #ifdef ENABLE_VIDEO |
| struct vrend_video_context *video; |
| #endif |
| |
| struct vrend_sub_context *sub; |
| struct vrend_sub_context *sub0; |
| |
| int ctx_id; |
| /* has this ctx gotten an error? */ |
| bool in_error; |
| bool ctx_switch_pending; |
| |
| enum virgl_ctx_errors last_error; |
| |
| /* resource bounds to this context */ |
| struct util_hash_table *res_hash; |
| |
| /* |
| * vrend_context only works with typed virgl_resources. More specifically, |
| * it works with vrend_resources that are inherited from pipe_resources |
| * wrapped in virgl_resources. |
| * |
| * Normally, a vrend_resource is created first by |
| * vrend_renderer_resource_create. It is then wrapped in a virgl_resource |
| * by virgl_resource_create_from_pipe. Depending on whether it is a blob |
| * resource or not, the two functions can be called from different paths. |
| * But we always get both a virgl_resource and a vrend_resource as a |
| * result. |
| * |
| * It is however possible that we encounter untyped virgl_resources that |
| * have no pipe_resources. To work with untyped virgl_resources, we park |
| * them in untyped_resources first when they are attached. We move them |
| * into res_hash only after we get the type information and create the |
| * vrend_resources in vrend_decode_pipe_resource_set_type. |
| */ |
| struct list_head untyped_resources; |
| struct virgl_resource *untyped_resource_cache; |
| |
| struct vrend_shader_cfg shader_cfg; |
| |
| unsigned debug_flags; |
| |
| vrend_context_fence_retire fence_retire; |
| void *fence_retire_data; |
| |
| #ifdef ENABLE_TRACING |
| struct hash_table *active_markers; |
| #endif |
| }; |
| |
| static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause); |
| static void vrend_update_viewport_state(struct vrend_sub_context *sub_ctx); |
| static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx); |
| static void vrend_destroy_query_object(void *obj_ptr); |
| static void vrend_finish_context_switch(struct vrend_context *ctx); |
| static void vrend_patch_blend_state(struct vrend_sub_context *sub_ctx); |
| static void vrend_update_frontface_state(struct vrend_sub_context *ctx); |
| static int vrender_get_glsl_version(void); |
| static void vrend_destroy_program(struct vrend_linked_shader_program *ent); |
| static void vrend_apply_sampler_state(struct vrend_sub_context *sub_ctx, |
| struct vrend_resource *res, |
| uint32_t shader_type, |
| int id, int sampler_id, |
| struct vrend_sampler_view *tview); |
| static GLenum tgsitargettogltarget(const enum pipe_texture_target target, int nr_samples); |
| |
| void vrend_update_stencil_state(struct vrend_sub_context *sub_ctx); |
| |
| static struct vrend_format_table tex_conv_table[VIRGL_FORMAT_MAX_EXTENDED]; |
| |
| static uint32_t vrend_renderer_get_video_memory(void); |
| |
| static inline bool vrend_format_can_sample(enum virgl_formats format) |
| { |
| if (tex_conv_table[format].bindings & VIRGL_BIND_SAMPLER_VIEW) |
| return true; |
| |
| #ifdef ENABLE_MINIGBM_ALLOCATION |
| uint32_t gbm_format = 0; |
| if (virgl_gbm_convert_format(&format, &gbm_format)) |
| return false; |
| |
| if (!gbm || !gbm->device || !gbm_format) |
| return false; |
| |
| uint32_t gbm_usage = GBM_BO_USE_TEXTURING; |
| return gbm_device_is_format_supported(gbm->device, gbm_format, gbm_usage); |
| #else |
| return false; |
| #endif |
| } |
| |
| static inline bool vrend_format_can_readback(enum virgl_formats format) |
| { |
| return tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_READBACK; |
| } |
| |
| static inline bool vrend_format_can_multisample(enum virgl_formats format) |
| { |
| return tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_MULTISAMPLE; |
| } |
| |
| static inline bool vrend_format_can_render(enum virgl_formats format) |
| { |
| return tex_conv_table[format].bindings & VIRGL_BIND_RENDER_TARGET; |
| } |
| |
| static inline bool vrend_format_is_ds(enum virgl_formats format) |
| { |
| return tex_conv_table[format].bindings & VIRGL_BIND_DEPTH_STENCIL; |
| } |
| |
| static inline bool vrend_format_can_scanout(enum virgl_formats format) |
| { |
| #ifdef ENABLE_MINIGBM_ALLOCATION |
| uint32_t gbm_format = 0; |
| if (virgl_gbm_convert_format(&format, &gbm_format)) |
| return false; |
| |
| if (!gbm || !gbm->device || !gbm_format) |
| return false; |
| |
| return gbm_device_is_format_supported(gbm->device, gbm_format, GBM_BO_USE_SCANOUT); |
| #else |
| (void)format; |
| return true; |
| #endif |
| } |
| |
| #ifdef ENABLE_MINIGBM_ALLOCATION |
| static inline bool vrend_format_can_texture_view(enum virgl_formats format) |
| { |
| return has_feature(feat_texture_view) && |
| tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE; |
| } |
| #endif |
| |
| struct vrend_context_tweaks *vrend_get_context_tweaks(struct vrend_context *ctx) |
| { |
| return &ctx->sub->tweaks; |
| } |
| |
| bool vrend_format_is_emulated_alpha(enum virgl_formats format) |
| { |
| if (vrend_state.use_gles || !vrend_state.use_core_profile) |
| return false; |
| return (format == VIRGL_FORMAT_A8_UNORM || |
| format == VIRGL_FORMAT_A16_UNORM); |
| } |
| |
| bool vrend_format_is_bgra(enum virgl_formats format) { |
| return (format == VIRGL_FORMAT_B8G8R8X8_UNORM || |
| format == VIRGL_FORMAT_B8G8R8A8_UNORM || |
| format == VIRGL_FORMAT_B8G8R8X8_SRGB || |
| format == VIRGL_FORMAT_B8G8R8A8_SRGB); |
| } |
| |
| static bool vrend_resource_has_24bpp_internal_format(const struct vrend_resource *res) |
| { |
| /* Some shared resources imported to guest mesa as EGL images occupy 24bpp instead of more common 32bpp. */ |
| return (has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE) && |
| (res->base.format == VIRGL_FORMAT_B8G8R8X8_UNORM || |
| res->base.format == VIRGL_FORMAT_R8G8B8X8_UNORM)); |
| } |
| |
| static bool vrend_resource_supports_view(const struct vrend_resource *res, |
| UNUSED enum virgl_formats view_format) |
| { |
| /* Texture views on eglimage-backed bgr* resources are not supported and |
| * lead to unexpected format interpretation since internally allocated |
| * bgr* resources use GL_RGBA8 internal format, while eglimage-backed |
| * resources use BGRA8, but GL lacks an equivalent internalformat enum. |
| * |
| * For views that don't require colorspace conversion, we can add swizzles |
| * instead. For views that do require colorspace conversion, manual srgb |
| * decode/encode is required. */ |
| return !(vrend_format_is_bgra(res->base.format) && |
| has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE)) && |
| !vrend_resource_has_24bpp_internal_format(res); |
| } |
| |
| static inline bool |
| vrend_resource_needs_redblue_swizzle(struct vrend_resource *res, |
| enum virgl_formats view_format) |
| { |
| return !vrend_resource_supports_view(res, view_format) && |
| vrend_format_is_bgra(res->base.format) ^ vrend_format_is_bgra(view_format); |
| } |
| |
| static inline bool |
| vrend_resource_needs_srgb_decode(struct vrend_resource *res, |
| enum virgl_formats view_format) |
| { |
| return !vrend_resource_supports_view(res, view_format) && |
| util_format_is_srgb(res->base.format) && |
| !util_format_is_srgb(view_format); |
| } |
| |
| static inline bool |
| vrend_resource_needs_srgb_encode(struct vrend_resource *res, |
| enum virgl_formats view_format) |
| { |
| return !vrend_resource_supports_view(res, view_format) && |
| !util_format_is_srgb(res->base.format) && |
| util_format_is_srgb(view_format); |
| } |
| |
| static bool vrend_blit_needs_swizzle(enum virgl_formats src, |
| enum virgl_formats dst) |
| { |
| for (int i = 0; i < 4; ++i) { |
| if (tex_conv_table[src].swizzle[i] != tex_conv_table[dst].swizzle[i]) |
| return true; |
| } |
| return false; |
| } |
| |
| static inline const char *pipe_shader_to_prefix(enum pipe_shader_type shader_type) |
| { |
| switch (shader_type) { |
| case PIPE_SHADER_VERTEX: return "vs"; |
| case PIPE_SHADER_FRAGMENT: return "fs"; |
| case PIPE_SHADER_GEOMETRY: return "gs"; |
| case PIPE_SHADER_TESS_CTRL: return "tc"; |
| case PIPE_SHADER_TESS_EVAL: return "te"; |
| case PIPE_SHADER_COMPUTE: return "cs"; |
| default: |
| return NULL; |
| }; |
| } |
| |
| static GLenum translate_blend_func_advanced(enum gl_advanced_blend_mode blend) |
| { |
| switch(blend){ |
| case BLEND_MULTIPLY: return GL_MULTIPLY_KHR; |
| case BLEND_SCREEN: return GL_SCREEN_KHR; |
| case BLEND_OVERLAY: return GL_OVERLAY_KHR; |
| case BLEND_DARKEN: return GL_DARKEN_KHR; |
| case BLEND_LIGHTEN: return GL_LIGHTEN_KHR; |
| case BLEND_COLORDODGE: return GL_COLORDODGE_KHR; |
| case BLEND_COLORBURN: return GL_COLORBURN_KHR; |
| case BLEND_HARDLIGHT: return GL_HARDLIGHT_KHR; |
| case BLEND_SOFTLIGHT: return GL_SOFTLIGHT_KHR; |
| case BLEND_DIFFERENCE: return GL_DIFFERENCE_KHR; |
| case BLEND_EXCLUSION: return GL_EXCLUSION_KHR; |
| case BLEND_HSL_HUE: return GL_HSL_HUE_KHR; |
| case BLEND_HSL_SATURATION: return GL_HSL_SATURATION_KHR; |
| case BLEND_HSL_COLOR: return GL_HSL_COLOR_KHR; |
| case BLEND_HSL_LUMINOSITY: return GL_HSL_LUMINOSITY_KHR; |
| default: |
| assert("invalid blend token()" == NULL); |
| return 0; |
| } |
| } |
| |
| static const char *vrend_ctx_error_strings[] = { |
| [VIRGL_ERROR_CTX_NONE] = "None", |
| [VIRGL_ERROR_CTX_UNKNOWN] = "Unknown", |
| [VIRGL_ERROR_CTX_ILLEGAL_SHADER] = "Illegal shader", |
| [VIRGL_ERROR_CTX_ILLEGAL_HANDLE] = "Illegal handle", |
| [VIRGL_ERROR_CTX_ILLEGAL_RESOURCE] = "Illegal resource", |
| [VIRGL_ERROR_CTX_ILLEGAL_SURFACE] = "Illegal surface", |
| [VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT] = "Illegal vertex format", |
| [VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER] = "Illegal command buffer", |
| [VIRGL_ERROR_CTX_GLES_HAVE_TES_BUT_MISS_TCS] = "On GLES context and shader program has tesselation evaluation shader but no tesselation control shader", |
| [VIRGL_ERROR_GL_ANY_SAMPLES_PASSED] = "Query for ANY_SAMPLES_PASSED not supported", |
| [VIRGL_ERROR_CTX_ILLEGAL_FORMAT] = "Illegal format ID", |
| [VIRGL_ERROR_CTX_ILLEGAL_SAMPLER_VIEW_TARGET] = "Illegat target for sampler view", |
| [VIRGL_ERROR_CTX_TRANSFER_IOV_BOUNDS] = "IOV data size exceeds resource capacity", |
| [VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND]= "Dual source blend not supported", |
| [VIRGL_ERROR_CTX_UNSUPPORTED_FUNCTION] = "Unsupported host function called", |
| [VIRGL_ERROR_CTX_ILLEGAL_PROGRAM_PIPELINE] = "Illegal shader program pipeline", |
| }; |
| |
| void vrend_report_context_error_internal(const char *fname, struct vrend_context *ctx, |
| enum virgl_ctx_errors error, uint32_t value) |
| { |
| ctx->in_error = true; |
| ctx->last_error = error; |
| vrend_printf("%s: context error reported %d \"%s\" %s %d\n", fname, |
| ctx->ctx_id, ctx->debug_name, vrend_ctx_error_strings[error], |
| value); |
| } |
| |
| #define CORE_PROFILE_WARN_NONE 0 |
| #define CORE_PROFILE_WARN_STIPPLE 1 |
| #define CORE_PROFILE_WARN_POLYGON_MODE 2 |
| #define CORE_PROFILE_WARN_TWO_SIDE 3 |
| #define CORE_PROFILE_WARN_CLAMP 4 |
| #define CORE_PROFILE_WARN_SHADE_MODEL 5 |
| |
| static const char *vrend_core_profile_warn_strings[] = { |
| [CORE_PROFILE_WARN_NONE] = "None", |
| [CORE_PROFILE_WARN_STIPPLE] = "Stipple", |
| [CORE_PROFILE_WARN_POLYGON_MODE] = "Polygon Mode", |
| [CORE_PROFILE_WARN_TWO_SIDE] = "Two Side", |
| [CORE_PROFILE_WARN_CLAMP] = "Clamping", |
| [CORE_PROFILE_WARN_SHADE_MODEL] = "Shade Model", |
| }; |
| |
| static void __report_core_warn(const char *fname, struct vrend_context *ctx, |
| enum virgl_ctx_errors error) |
| { |
| vrend_printf("%s: core profile violation reported %d \"%s\" %s\n", fname, |
| ctx->ctx_id, ctx->debug_name, |
| vrend_core_profile_warn_strings[error]); |
| } |
| #define report_core_warn(ctx, error) __report_core_warn(__func__, ctx, error) |
| |
| |
| #define GLES_WARN_NONE 0 |
| #define GLES_WARN_STIPPLE 1 |
| #define GLES_WARN_POLYGON_MODE 2 |
| #define GLES_WARN_DEPTH_RANGE 3 |
| #define GLES_WARN_POINT_SIZE 4 |
| #define GLES_WARN_SEAMLESS_CUBE_MAP 5 |
| #define GLES_WARN_LOD_BIAS 6 |
| #define GLES_WARN_OFFSET_LINE 8 |
| #define GLES_WARN_OFFSET_POINT 9 |
| //#define GLES_WARN_ free slot 10 |
| #define GLES_WARN_FLATSHADE_FIRST 11 |
| #define GLES_WARN_LINE_SMOOTH 12 |
| #define GLES_WARN_POLY_SMOOTH 13 |
| #define GLES_WARN_DEPTH_CLEAR 14 |
| #define GLES_WARN_LOGIC_OP 15 |
| #define GLES_WARN_TIMESTAMP 16 |
| #define GLES_WARN_IMPLICIT_MSAA_SURFACE 17 |
| |
| ASSERTED |
| static const char *vrend_gles_warn_strings[] = { |
| [GLES_WARN_NONE] = "None", |
| [GLES_WARN_STIPPLE] = "Stipple", |
| [GLES_WARN_POLYGON_MODE] = "Polygon Mode", |
| [GLES_WARN_DEPTH_RANGE] = "Depth Range", |
| [GLES_WARN_POINT_SIZE] = "Point Size", |
| [GLES_WARN_SEAMLESS_CUBE_MAP] = "Seamless Cube Map", |
| [GLES_WARN_LOD_BIAS] = "Lod Bias", |
| [GLES_WARN_OFFSET_LINE] = "Offset Line", |
| [GLES_WARN_OFFSET_POINT] = "Offset Point", |
| [GLES_WARN_FLATSHADE_FIRST] = "Flatshade First", |
| [GLES_WARN_LINE_SMOOTH] = "Line Smooth", |
| [GLES_WARN_POLY_SMOOTH] = "Poly Smooth", |
| [GLES_WARN_DEPTH_CLEAR] = "Depth Clear", |
| [GLES_WARN_LOGIC_OP] = "LogicOp", |
| [GLES_WARN_TIMESTAMP] = "GL_TIMESTAMP", |
| [GLES_WARN_IMPLICIT_MSAA_SURFACE] = "Implicit MSAA Surface", |
| }; |
| |
| static void __report_gles_warn(ASSERTED const char *fname, |
| ASSERTED struct vrend_context *ctx, |
| ASSERTED enum virgl_ctx_errors error) |
| { |
| VREND_DEBUG(dbg_gles, ctx, "%s: GLES violation - %s\n", fname, vrend_gles_warn_strings[error]); |
| } |
| #define report_gles_warn(ctx, error) __report_gles_warn(__func__, ctx, error) |
| |
| static void __report_gles_missing_func(ASSERTED const char *fname, |
| ASSERTED struct vrend_context *ctx, |
| ASSERTED const char *missf) |
| { |
| VREND_DEBUG(dbg_gles, ctx, "%s: GLES function %s is missing\n", fname, missf); |
| } |
| |
| #define report_gles_missing_func(ctx, missf) __report_gles_missing_func(__func__, ctx, missf) |
| |
| static void init_features(int gl_ver, int gles_ver) |
| { |
| for (enum features_id id = 0; id < feat_last; id++) { |
| if (gl_ver >= feature_list[id].gl_ver || |
| gles_ver >= feature_list[id].gles_ver) { |
| set_feature(id); |
| VREND_DEBUG(dbg_features, NULL, "Host feature %s provided by %s %3.1f\n", |
| feature_list[id].log_name, (gl_ver > 0 ? "GL" : "GLES"), |
| 0.1f * (gl_ver > 0 ? gl_ver : gles_ver)); |
| } else { |
| for (uint32_t i = 0; i < FEAT_MAX_EXTS; i++) { |
| if (!feature_list[id].gl_ext[i]) |
| break; |
| if (epoxy_has_gl_extension(feature_list[id].gl_ext[i])) { |
| set_feature(id); |
| VREND_DEBUG(dbg_features, NULL, |
| "Host feature %s provide by %s\n", feature_list[id].log_name, |
| feature_list[id].gl_ext[i]); |
| break; |
| } |
| } |
| } |
| } |
| } |
| |
| static void vrend_destroy_surface(struct vrend_surface *surf) |
| { |
| if (surf->id != surf->texture->id) |
| glDeleteTextures(1, &surf->id); |
| vrend_resource_reference(&surf->texture, NULL); |
| free(surf); |
| } |
| |
| static inline void |
| vrend_surface_reference(struct vrend_surface **ptr, struct vrend_surface *surf) |
| { |
| struct vrend_surface *old_surf = *ptr; |
| |
| if (pipe_reference(&(*ptr)->reference, &surf->reference)) |
| vrend_destroy_surface(old_surf); |
| *ptr = surf; |
| } |
| |
| static void vrend_destroy_sampler_view(struct vrend_sampler_view *samp) |
| { |
| if (samp->texture->id != samp->id) |
| glDeleteTextures(1, &samp->id); |
| vrend_resource_reference(&samp->texture, NULL); |
| free(samp); |
| } |
| |
| static inline void |
| vrend_sampler_view_reference(struct vrend_sampler_view **ptr, struct vrend_sampler_view *view) |
| { |
| struct vrend_sampler_view *old_view = *ptr; |
| |
| if (pipe_reference(&(*ptr)->reference, &view->reference)) |
| vrend_destroy_sampler_view(old_view); |
| *ptr = view; |
| } |
| |
| static void vrend_destroy_so_target(struct vrend_so_target *target) |
| { |
| vrend_resource_reference(&target->buffer, NULL); |
| free(target); |
| } |
| |
| static inline void |
| vrend_so_target_reference(struct vrend_so_target **ptr, struct vrend_so_target *target) |
| { |
| struct vrend_so_target *old_target = *ptr; |
| |
| if (pipe_reference(&(*ptr)->reference, &target->reference)) |
| vrend_destroy_so_target(old_target); |
| *ptr = target; |
| } |
| |
| static void vrend_shader_dump(struct vrend_shader *shader) |
| { |
| const char *prefix = pipe_shader_to_prefix(shader->sel->type); |
| if (shader->sel->tmp_buf) |
| vrend_printf("%s: %d TGSI:\n%s\n", prefix, shader->id, shader->sel->tmp_buf); |
| |
| vrend_printf("%s: %d GLSL:\n", prefix, shader->id); |
| strarray_dump_with_line_numbers(&shader->glsl_strings); |
| vrend_printf("\n"); |
| } |
| |
| static void vrend_shader_destroy(struct vrend_shader *shader) |
| { |
| struct vrend_linked_shader_program *ent, *tmp; |
| |
| LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &shader->programs, sl[shader->sel->type]) { |
| vrend_destroy_program(ent); |
| } |
| |
| if (shader->sel->sinfo.separable_program) |
| glDeleteProgram(shader->program_id); |
| glDeleteShader(shader->id); |
| strarray_free(&shader->glsl_strings, true); |
| free(shader); |
| } |
| |
| static void vrend_destroy_shader_selector(struct vrend_shader_selector *sel) |
| { |
| struct vrend_shader *p = sel->current, *c; |
| unsigned i; |
| while (p) { |
| c = p->next_variant; |
| vrend_shader_destroy(p); |
| p = c; |
| } |
| if (sel->sinfo.so_names) |
| for (i = 0; i < sel->sinfo.so_info.num_outputs; i++) |
| free(sel->sinfo.so_names[i]); |
| free(sel->tmp_buf); |
| free(sel->sinfo.so_names); |
| free(sel->sinfo.sampler_arrays); |
| free(sel->sinfo.image_arrays); |
| free(sel->tokens); |
| free(sel); |
| } |
| |
| static inline int conv_shader_type(int type) |
| { |
| switch (type) { |
| case PIPE_SHADER_VERTEX: return GL_VERTEX_SHADER; |
| case PIPE_SHADER_FRAGMENT: return GL_FRAGMENT_SHADER; |
| case PIPE_SHADER_GEOMETRY: return GL_GEOMETRY_SHADER; |
| case PIPE_SHADER_TESS_CTRL: return GL_TESS_CONTROL_SHADER; |
| case PIPE_SHADER_TESS_EVAL: return GL_TESS_EVALUATION_SHADER; |
| case PIPE_SHADER_COMPUTE: return GL_COMPUTE_SHADER; |
| default: |
| return 0; |
| }; |
| } |
| |
| static bool vrend_compile_shader(struct vrend_sub_context *sub_ctx, |
| struct vrend_shader *shader) |
| { |
| GLint param; |
| const char *shader_parts[SHADER_MAX_STRINGS]; |
| |
| for (int i = 0; i < shader->glsl_strings.num_strings; i++) |
| shader_parts[i] = shader->glsl_strings.strings[i].buf; |
| |
| shader->id = glCreateShader(conv_shader_type(shader->sel->type)); |
| glShaderSource(shader->id, shader->glsl_strings.num_strings, shader_parts, NULL); |
| glCompileShader(shader->id); |
| glGetShaderiv(shader->id, GL_COMPILE_STATUS, ¶m); |
| if (param == GL_FALSE) { |
| char infolog[65536]; |
| int len; |
| glGetShaderInfoLog(shader->id, 65536, &len, infolog); |
| vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0); |
| vrend_printf("shader failed to compile\n%s\n", infolog); |
| vrend_shader_dump(shader); |
| return false; |
| } |
| |
| if (shader->sel->sinfo.separable_program) { |
| shader->program_id = glCreateProgram(); |
| shader->last_pipeline_id = 0xffffffff; |
| glProgramParameteri(shader->program_id, GL_PROGRAM_SEPARABLE, GL_TRUE); |
| glAttachShader(shader->program_id, shader->id); |
| } |
| |
| shader->is_compiled = true; |
| return true; |
| } |
| |
| static inline void |
| vrend_shader_state_reference(struct vrend_shader_selector **ptr, struct vrend_shader_selector *shader) |
| { |
| struct vrend_shader_selector *old_shader = *ptr; |
| |
| if (pipe_reference(&(*ptr)->reference, &shader->reference)) |
| vrend_destroy_shader_selector(old_shader); |
| *ptr = shader; |
| } |
| |
| void |
| vrend_insert_format(struct vrend_format_table *entry, uint32_t bindings, uint32_t flags) |
| { |
| tex_conv_table[entry->format] = *entry; |
| tex_conv_table[entry->format].bindings = bindings; |
| tex_conv_table[entry->format].flags = flags; |
| } |
| |
| void |
| vrend_insert_format_swizzle(int override_format, struct vrend_format_table *entry, |
| uint32_t bindings, enum pipe_swizzle swizzle[4], uint32_t flags) |
| { |
| int i; |
| tex_conv_table[override_format] = *entry; |
| tex_conv_table[override_format].bindings = bindings; |
| tex_conv_table[override_format].flags = flags | VIRGL_TEXTURE_NEED_SWIZZLE; |
| for (i = 0; i < 4; i++) |
| tex_conv_table[override_format].swizzle[i] = swizzle[i]; |
| } |
| |
| const struct vrend_format_table * |
| vrend_get_format_table_entry(enum virgl_formats format) |
| { |
| return &tex_conv_table[format]; |
| } |
| |
| static bool vrend_is_timer_query(GLenum gltype) |
| { |
| return gltype == GL_TIMESTAMP || |
| gltype == GL_TIME_ELAPSED; |
| } |
| |
| static inline void use_program(struct vrend_sub_context *sub_ctx, uint32_t id) |
| { |
| if (sub_ctx->current_program_id != id) { |
| sub_ctx->current_program_id = id; |
| glUseProgram(id); |
| } |
| } |
| |
| static inline void bind_pipeline(struct vrend_sub_context *sub_ctx, uint32_t id) |
| { |
| if (sub_ctx->current_pipeline_id != id) { |
| sub_ctx->current_pipeline_id = id; |
| glBindProgramPipeline(id); |
| } |
| } |
| |
| static void vrend_use_program(struct vrend_sub_context *sub_ctx, |
| struct vrend_linked_shader_program *program) |
| { |
| GLuint id = !program ? 0 : |
| program->is_pipeline ? program->id.pipeline : |
| program->id.program; |
| if (program && program->is_pipeline) { |
| use_program(sub_ctx, 0); |
| bind_pipeline(sub_ctx, id); |
| } else { |
| if (has_feature(feat_separate_shader_objects)) |
| bind_pipeline(sub_ctx, 0); |
| use_program(sub_ctx, id); |
| } |
| } |
| |
| static void vrend_depth_test_enable(struct vrend_context *ctx, bool depth_test_enable) |
| { |
| if (ctx->sub->depth_test_enabled != depth_test_enable) { |
| ctx->sub->depth_test_enabled = depth_test_enable; |
| if (depth_test_enable) |
| glEnable(GL_DEPTH_TEST); |
| else |
| glDisable(GL_DEPTH_TEST); |
| } |
| } |
| |
| static void vrend_alpha_test_enable(struct vrend_context *ctx, bool alpha_test_enable) |
| { |
| if (vrend_state.use_core_profile) { |
| /* handled in shaders */ |
| return; |
| } |
| if (ctx->sub->alpha_test_enabled != alpha_test_enable) { |
| ctx->sub->alpha_test_enabled = alpha_test_enable; |
| if (alpha_test_enable) |
| glEnable(GL_ALPHA_TEST); |
| else |
| glDisable(GL_ALPHA_TEST); |
| } |
| } |
| |
| static void vrend_stencil_test_enable(struct vrend_sub_context *sub_ctx, bool stencil_test_enable) |
| { |
| if (sub_ctx->stencil_test_enabled != stencil_test_enable) { |
| sub_ctx->stencil_test_enabled = stencil_test_enable; |
| if (stencil_test_enable) |
| glEnable(GL_STENCIL_TEST); |
| else |
| glDisable(GL_STENCIL_TEST); |
| } |
| } |
| |
| ASSERTED |
| static void dump_stream_out(struct pipe_stream_output_info *so) |
| { |
| unsigned i; |
| if (!so) |
| return; |
| vrend_printf("streamout: %d\n", so->num_outputs); |
| vrend_printf("strides: "); |
| for (i = 0; i < 4; i++) |
| vrend_printf("%d ", so->stride[i]); |
| vrend_printf("\n"); |
| vrend_printf("outputs:\n"); |
| for (i = 0; i < so->num_outputs; i++) { |
| vrend_printf("\t%d: reg: %d sc: %d, nc: %d ob: %d do: %d st: %d\n", |
| i, |
| so->output[i].register_index, |
| so->output[i].start_component, |
| so->output[i].num_components, |
| so->output[i].output_buffer, |
| so->output[i].dst_offset, |
| so->output[i].stream); |
| } |
| } |
| |
| static char *get_skip_str(int *skip_val) |
| { |
| char *start_skip = NULL; |
| if (*skip_val < 0) { |
| *skip_val = 0; |
| return NULL; |
| } |
| |
| if (*skip_val == 1) { |
| start_skip = strdup("gl_SkipComponents1"); |
| *skip_val -= 1; |
| } else if (*skip_val == 2) { |
| start_skip = strdup("gl_SkipComponents2"); |
| *skip_val -= 2; |
| } else if (*skip_val == 3) { |
| start_skip = strdup("gl_SkipComponents3"); |
| *skip_val -= 3; |
| } else if (*skip_val >= 4) { |
| start_skip = strdup("gl_SkipComponents4"); |
| *skip_val -= 4; |
| } |
| return start_skip; |
| } |
| |
| static void set_stream_out_varyings(ASSERTED struct vrend_sub_context *sub_ctx, |
| int prog_id, |
| struct vrend_shader_info *sinfo) |
| { |
| struct pipe_stream_output_info *so = &sinfo->so_info; |
| char *varyings[PIPE_MAX_SHADER_OUTPUTS*2]; |
| int j; |
| uint i, n_outputs = 0; |
| int last_buffer = 0; |
| char *start_skip; |
| int buf_offset = 0; |
| int skip; |
| if (!so->num_outputs) |
| return; |
| |
| VREND_DEBUG_EXT(dbg_shader_streamout, sub_ctx->parent, dump_stream_out(so)); |
| |
| for (i = 0; i < so->num_outputs; i++) { |
| if (last_buffer != so->output[i].output_buffer) { |
| |
| skip = so->stride[last_buffer] - buf_offset; |
| while (skip && n_outputs < ARRAY_SIZE(varyings)) { |
| start_skip = get_skip_str(&skip); |
| if (start_skip) |
| varyings[n_outputs++] = start_skip; |
| } |
| for (j = last_buffer; j < so->output[i].output_buffer && n_outputs < ARRAY_SIZE(varyings); j++) |
| varyings[n_outputs++] = strdup("gl_NextBuffer"); |
| last_buffer = so->output[i].output_buffer; |
| buf_offset = 0; |
| } |
| |
| skip = so->output[i].dst_offset - buf_offset; |
| while (skip && n_outputs < ARRAY_SIZE(varyings)) { |
| start_skip = get_skip_str(&skip); |
| if (start_skip) |
| varyings[n_outputs++] = start_skip; |
| } |
| buf_offset = so->output[i].dst_offset; |
| |
| buf_offset += so->output[i].num_components; |
| if (sinfo->so_names[i] && n_outputs < ARRAY_SIZE(varyings)) |
| varyings[n_outputs++] = strdup(sinfo->so_names[i]); |
| } |
| |
| skip = so->stride[last_buffer] - buf_offset; |
| while (skip && n_outputs < ARRAY_SIZE(varyings)) { |
| start_skip = get_skip_str(&skip); |
| if (start_skip) |
| varyings[n_outputs++] = start_skip; |
| } |
| |
| glTransformFeedbackVaryings(prog_id, n_outputs, |
| (const GLchar **)varyings, GL_INTERLEAVED_ATTRIBS_EXT); |
| |
| for (i = 0; i < n_outputs; i++) |
| if (varyings[i]) |
| free(varyings[i]); |
| } |
| |
| static inline int |
| vrend_get_uniform_location(struct vrend_linked_shader_program *sprog, |
| char *name, int shader_type) |
| { |
| assert(!sprog->is_pipeline || sprog->ss[shader_type]->sel->sinfo.separable_program); |
| |
| GLint id = sprog->is_pipeline ? |
| sprog->ss[shader_type]->program_id : |
| sprog->id.program; |
| |
| return glGetUniformLocation(id, name); |
| } |
| |
| static inline void |
| vrend_set_active_pipeline_stage(struct vrend_linked_shader_program *sprog, int shader_type) |
| { |
| if (sprog->is_pipeline && sprog->ss[shader_type]) |
| glActiveShaderProgram(sprog->id.pipeline, sprog->ss[shader_type]->program_id); |
| } |
| |
| static int bind_sampler_locs(struct vrend_linked_shader_program *sprog, |
| enum pipe_shader_type shader_type, int next_sampler_id) |
| { |
| const struct vrend_shader_info *sinfo = &sprog->ss[shader_type]->sel->sinfo; |
| |
| if (sinfo->samplers_used_mask) { |
| uint32_t mask = sinfo->samplers_used_mask; |
| sprog->shadow_samp_mask[shader_type] = sinfo->shadow_samp_mask; |
| if (sinfo->shadow_samp_mask) { |
| unsigned nsamp = util_bitcount(sinfo->samplers_used_mask); |
| sprog->shadow_samp_mask_locs[shader_type] = calloc(nsamp, sizeof(uint32_t)); |
| sprog->shadow_samp_add_locs[shader_type] = calloc(nsamp, sizeof(uint32_t)); |
| } else { |
| sprog->shadow_samp_mask_locs[shader_type] = sprog->shadow_samp_add_locs[shader_type] = NULL; |
| } |
| const char *prefix = pipe_shader_to_prefix(shader_type); |
| int sampler_index = 0; |
| while(mask) { |
| uint32_t i = u_bit_scan(&mask); |
| char name[64]; |
| if (sinfo->num_sampler_arrays) { |
| int arr_idx = vrend_shader_lookup_sampler_array(sinfo, i); |
| snprintf(name, 32, "%ssamp%d[%d]", prefix, arr_idx, i - arr_idx); |
| } else |
| snprintf(name, 32, "%ssamp%d", prefix, i); |
| |
| vrend_set_active_pipeline_stage(sprog, shader_type); |
| glUniform1i(vrend_get_uniform_location(sprog, name, shader_type), |
| next_sampler_id++); |
| |
| if (sinfo->shadow_samp_mask & (1 << i)) { |
| snprintf(name, 32, "%sshadmask%d", prefix, i); |
| sprog->shadow_samp_mask_locs[shader_type][sampler_index] = |
| vrend_get_uniform_location(sprog, name, shader_type); |
| snprintf(name, 32, "%sshadadd%d", prefix, i); |
| sprog->shadow_samp_add_locs[shader_type][sampler_index] = |
| vrend_get_uniform_location(sprog, name, shader_type); |
| } |
| sampler_index++; |
| } |
| } else { |
| sprog->shadow_samp_mask_locs[shader_type] = NULL; |
| sprog->shadow_samp_add_locs[shader_type] = NULL; |
| sprog->shadow_samp_mask[shader_type] = 0; |
| } |
| sprog->samplers_used_mask[shader_type] = sinfo->samplers_used_mask; |
| |
| return next_sampler_id; |
| } |
| |
| static void bind_const_locs(struct vrend_linked_shader_program *sprog, |
| enum pipe_shader_type shader_type) |
| { |
| if (sprog->ss[shader_type]->sel->sinfo.num_consts) { |
| char name[32]; |
| snprintf(name, 32, "%sconst0", pipe_shader_to_prefix(shader_type)); |
| sprog->const_location[shader_type] = vrend_get_uniform_location(sprog, name, |
| shader_type); |
| } else |
| sprog->const_location[shader_type] = -1; |
| } |
| |
| static inline GLuint |
| vrend_get_uniform_block_index(struct vrend_linked_shader_program *sprog, |
| char *name, int shader_type) |
| { |
| assert(!sprog->is_pipeline || sprog->ss[shader_type]->sel->sinfo.separable_program); |
| |
| GLuint id = sprog->is_pipeline ? |
| sprog->ss[shader_type]->program_id : |
| sprog->id.program; |
| |
| return glGetUniformBlockIndex(id, name); |
| } |
| |
| static inline void |
| vrend_uniform_block_binding(struct vrend_linked_shader_program *sprog, |
| int shader_type, int loc, int value) |
| { |
| assert(!sprog->is_pipeline || sprog->ss[shader_type]->sel->sinfo.separable_program); |
| |
| GLint id = sprog->is_pipeline ? |
| sprog->ss[shader_type]->program_id : |
| sprog->id.program; |
| |
| glUniformBlockBinding(id, loc, value); |
| } |
| |
| static int bind_ubo_locs(struct vrend_linked_shader_program *sprog, |
| enum pipe_shader_type shader_type, int next_ubo_id) |
| { |
| const struct vrend_shader_info *sinfo = &sprog->ss[shader_type]->sel->sinfo; |
| if (sinfo->ubo_used_mask) { |
| const char *prefix = pipe_shader_to_prefix(shader_type); |
| |
| unsigned mask = sinfo->ubo_used_mask; |
| while (mask) { |
| uint32_t ubo_idx = u_bit_scan(&mask); |
| char name[32]; |
| if (sinfo->ubo_indirect) |
| snprintf(name, 32, "%subo[%d]", prefix, ubo_idx - 1); |
| else |
| snprintf(name, 32, "%subo%d", prefix, ubo_idx); |
| |
| GLuint loc = vrend_get_uniform_block_index(sprog, name, shader_type); |
| vrend_uniform_block_binding(sprog, shader_type, loc, next_ubo_id++); |
| } |
| } |
| |
| sprog->ubo_used_mask[shader_type] = sinfo->ubo_used_mask; |
| |
| return next_ubo_id; |
| } |
| |
| static void bind_virgl_block_loc(struct vrend_linked_shader_program *sprog, |
| enum pipe_shader_type shader_type, |
| int virgl_block_ubo_id) |
| { |
| sprog->separate_virgl_block_id[shader_type] = |
| vrend_get_uniform_block_index(sprog, "VirglBlock", shader_type); |
| |
| if (sprog->separate_virgl_block_id[shader_type] != GL_INVALID_INDEX) { |
| bool created_virgl_block_buffer = false; |
| |
| if (sprog->virgl_block_bind == -1) { |
| sprog->virgl_block_bind = virgl_block_ubo_id; |
| if (sprog->ubo_sysval_buffer_id == -1) { |
| glGenBuffers(1, (GLuint *) &sprog->ubo_sysval_buffer_id); |
| created_virgl_block_buffer = true; |
| } |
| } |
| |
| vrend_set_active_pipeline_stage(sprog, shader_type); |
| vrend_uniform_block_binding(sprog, shader_type, |
| sprog->separate_virgl_block_id[shader_type], |
| sprog->virgl_block_bind); |
| |
| GLint virgl_block_size; |
| int prog_id = sprog->is_pipeline ? sprog->ss[shader_type]->program_id : |
| sprog->id.program; |
| glGetActiveUniformBlockiv(prog_id, sprog->separate_virgl_block_id[shader_type], |
| GL_UNIFORM_BLOCK_DATA_SIZE, &virgl_block_size); |
| assert((size_t) virgl_block_size >= sizeof(struct sysval_uniform_block)); |
| |
| if (created_virgl_block_buffer) { |
| glBindBuffer(GL_UNIFORM_BUFFER, sprog->ubo_sysval_buffer_id); |
| glBufferData(GL_UNIFORM_BUFFER, virgl_block_size, NULL, GL_DYNAMIC_DRAW); |
| glBindBuffer(GL_UNIFORM_BUFFER, 0); |
| } |
| } |
| } |
| |
| static void rebind_ubo_and_sampler_locs(struct vrend_linked_shader_program *sprog, |
| enum pipe_shader_type last_shader) |
| { |
| int next_sampler_id = 0; |
| int next_ubo_id = 0; |
| |
| for (enum pipe_shader_type shader_type = PIPE_SHADER_VERTEX; |
| shader_type <= last_shader; |
| shader_type++) { |
| if (!sprog->ss[shader_type]) |
| continue; |
| |
| next_sampler_id = bind_sampler_locs(sprog, shader_type, next_sampler_id); |
| next_ubo_id = bind_ubo_locs(sprog, shader_type, next_ubo_id); |
| |
| if (sprog->is_pipeline) |
| sprog->ss[shader_type]->last_pipeline_id = sprog->id.pipeline; |
| } |
| |
| /* Now `next_ubo_id` is the last ubo id, which is used for the VirglBlock. */ |
| sprog->virgl_block_bind = -1; |
| for (enum pipe_shader_type shader_type = PIPE_SHADER_VERTEX; |
| shader_type <= last_shader; |
| shader_type++) { |
| if (!sprog->ss[shader_type]) |
| continue; |
| |
| bind_virgl_block_loc(sprog, shader_type, next_ubo_id); |
| } |
| } |
| |
| static void bind_ssbo_locs(struct vrend_linked_shader_program *sprog, |
| enum pipe_shader_type shader_type) |
| { |
| if (!has_feature(feat_ssbo)) |
| return; |
| sprog->ssbo_used_mask[shader_type] = sprog->ss[shader_type]->sel->sinfo.ssbo_used_mask; |
| } |
| |
| static void bind_image_locs(struct vrend_linked_shader_program *sprog, |
| enum pipe_shader_type shader_type) |
| { |
| int i; |
| char name[32]; |
| const char *prefix = pipe_shader_to_prefix(shader_type); |
| const struct vrend_shader_info *sinfo = &sprog->ss[shader_type]->sel->sinfo; |
| |
| uint32_t mask = sinfo->images_used_mask; |
| if (!mask && !sinfo->num_image_arrays) |
| return; |
| |
| if (!has_feature(feat_images)) |
| return; |
| |
| int nsamp = util_last_bit(mask); |
| if (nsamp) { |
| sprog->img_locs[shader_type] = calloc(nsamp, sizeof(GLint)); |
| if (!sprog->img_locs[shader_type]) |
| return; |
| } else |
| sprog->img_locs[shader_type] = NULL; |
| |
| if (sinfo->num_image_arrays) { |
| for (i = 0; i < sinfo->num_image_arrays; i++) { |
| struct vrend_array *img_array = &sinfo->image_arrays[i]; |
| for (int j = 0; j < img_array->array_size; j++) { |
| snprintf(name, 32, "%simg%d[%d]", prefix, img_array->first, j); |
| sprog->img_locs[shader_type][img_array->first + j] = |
| vrend_get_uniform_location(sprog, name, shader_type); |
| if (sprog->img_locs[shader_type][img_array->first + j] == -1) |
| vrend_printf( "failed to get uniform loc for image %s\n", name); |
| } |
| } |
| } else if (mask) { |
| for (i = 0; i < nsamp; i++) { |
| if (mask & (1 << i)) { |
| snprintf(name, 32, "%simg%d", prefix, i); |
| sprog->img_locs[shader_type][i] = |
| vrend_get_uniform_location(sprog, name, shader_type); |
| if (sprog->img_locs[shader_type][i] == -1) |
| vrend_printf( "failed to get uniform loc for image %s\n", name); |
| } else { |
| sprog->img_locs[shader_type][i] = -1; |
| } |
| } |
| } |
| sprog->images_used_mask[shader_type] = mask; |
| } |
| |
| static bool vrend_link(GLuint id) |
| { |
| GLint lret; |
| glLinkProgram(id); |
| glGetProgramiv(id, GL_LINK_STATUS, &lret); |
| if (lret == GL_FALSE) { |
| char infolog[65536]; |
| int len; |
| glGetProgramInfoLog(id, 65536, &len, infolog); |
| vrend_printf("Error linking program:\n%s\n", infolog); |
| return false; |
| } |
| return true; |
| } |
| |
| static bool vrend_link_separable_shader(struct vrend_sub_context *sub_ctx, |
| struct vrend_shader *shader, int type) |
| { |
| int i; |
| char name[64]; |
| |
| if (type == PIPE_SHADER_VERTEX || type == PIPE_SHADER_GEOMETRY || |
| type == PIPE_SHADER_TESS_EVAL) |
| set_stream_out_varyings(sub_ctx, shader->program_id, &shader->sel->sinfo); |
| |
| if (type == PIPE_SHADER_FRAGMENT && shader->sel->sinfo.num_outputs > 1) { |
| bool dual_src_linked = util_blend_state_is_dual(&sub_ctx->blend_state, 0); |
| if (dual_src_linked) { |
| if (has_feature(feat_dual_src_blend)) { |
| if (!vrend_state.use_gles) { |
| glBindFragDataLocationIndexed(shader->program_id, 0, 0, "fsout_c0"); |
| glBindFragDataLocationIndexed(shader->program_id, 0, 1, "fsout_c1"); |
| } else { |
| glBindFragDataLocationIndexedEXT(shader->program_id, 0, 0, "fsout_c0"); |
| glBindFragDataLocationIndexedEXT(shader->program_id, 0, 1, "fsout_c1"); |
| } |
| } else { |
| vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0); |
| } |
| } else if (!vrend_state.use_gles && has_feature(feat_dual_src_blend)) { |
| /* On GLES without dual source blending we emit the layout directly in the shader |
| * so there is no need to define the binding here */ |
| for (int i = 0; i < shader->sel->sinfo.num_outputs; ++i) { |
| if (shader->sel->sinfo.fs_output_layout[i] >= 0) { |
| char buf[64]; |
| snprintf(buf, sizeof(buf), "fsout_c%d", |
| shader->sel->sinfo.fs_output_layout[i]); |
| glBindFragDataLocationIndexed(shader->program_id, |
| shader->sel->sinfo.fs_output_layout[i], |
| 0, buf); |
| } |
| } |
| } |
| } |
| |
| if (type == PIPE_SHADER_VERTEX && has_feature(feat_gles31_vertex_attrib_binding)) { |
| uint32_t mask = shader->sel->sinfo.attrib_input_mask; |
| while (mask) { |
| i = u_bit_scan(&mask); |
| snprintf(name, 32, "in_%d", i); |
| glBindAttribLocation(shader->program_id, i, name); |
| } |
| } |
| |
| shader->is_linked = vrend_link(shader->program_id); |
| |
| if (!shader->is_linked) { |
| /* dump shaders */ |
| vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0); |
| vrend_shader_dump(shader); |
| } |
| |
| return shader->is_linked; |
| } |
| |
| static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_context *ctx, |
| struct vrend_shader *cs) |
| { |
| struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program); |
| GLuint prog_id; |
| prog_id = glCreateProgram(); |
| glAttachShader(prog_id, cs->id); |
| |
| if (!vrend_link(prog_id)) { |
| /* dump shaders */ |
| vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0); |
| vrend_shader_dump(cs); |
| glDeleteProgram(prog_id); |
| free(sprog); |
| return NULL; |
| } |
| sprog->ss[PIPE_SHADER_COMPUTE] = cs; |
| |
| list_add(&sprog->sl[PIPE_SHADER_COMPUTE], &cs->programs); |
| sprog->id.program = prog_id; |
| list_addtail(&sprog->head, &ctx->sub->cs_programs); |
| |
| vrend_use_program(ctx->sub, sprog); |
| |
| bind_sampler_locs(sprog, PIPE_SHADER_COMPUTE, 0); |
| bind_ubo_locs(sprog, PIPE_SHADER_COMPUTE, 0); |
| bind_ssbo_locs(sprog, PIPE_SHADER_COMPUTE); |
| bind_const_locs(sprog, PIPE_SHADER_COMPUTE); |
| bind_image_locs(sprog, PIPE_SHADER_COMPUTE); |
| return sprog; |
| } |
| |
| static inline bool |
| vrend_link_stage(struct vrend_shader *stage) { |
| if (!stage->is_linked) |
| stage->is_linked = vrend_link(stage->program_id); |
| return stage->is_linked; |
| } |
| |
| static struct vrend_linked_shader_program *add_shader_program(struct vrend_sub_context *sub_ctx, |
| struct vrend_shader *vs, |
| struct vrend_shader *fs, |
| struct vrend_shader *gs, |
| struct vrend_shader *tcs, |
| struct vrend_shader *tes, |
| bool separable) |
| { |
| struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program); |
| char name[64]; |
| int i; |
| GLuint prog_id = 0; |
| GLuint pipeline_id = 0; |
| GLuint vs_id, fs_id, gs_id, tes_id = 0; |
| enum pipe_shader_type last_shader; |
| if (!sprog) |
| return NULL; |
| |
| if (separable) { |
| glGenProgramPipelines(1, &pipeline_id); |
| |
| vs_id = vs->program_id; |
| fs_id = fs->program_id; |
| if (gs) |
| gs_id = gs->program_id; |
| if (tes) |
| tes_id = tes->program_id; |
| } else { /* inseparable programs */ |
| prog_id = glCreateProgram(); |
| glAttachShader(prog_id, vs->id); |
| if (tcs && tcs->id > 0) |
| glAttachShader(prog_id, tcs->id); |
| if (tes && tes->id > 0) |
| glAttachShader(prog_id, tes->id); |
| if (gs && gs->id > 0) |
| glAttachShader(prog_id, gs->id); |
| glAttachShader(prog_id, fs->id); |
| |
| /* For the non-separable codepath (the usual path), all these shader stages are |
| * contained inside a single program. */ |
| vs_id = prog_id; |
| fs_id = prog_id; |
| if (gs) |
| gs_id = prog_id; |
| if (tes) |
| tes_id = prog_id; |
| } |
| |
| if (gs) { |
| set_stream_out_varyings(sub_ctx, gs_id, &gs->sel->sinfo); |
| } else if (tes) |
| set_stream_out_varyings(sub_ctx, tes_id, &tes->sel->sinfo); |
| else |
| set_stream_out_varyings(sub_ctx, vs_id, &vs->sel->sinfo); |
| |
| if (fs->sel->sinfo.num_outputs > 1) { |
| sprog->dual_src_linked = util_blend_state_is_dual(&sub_ctx->blend_state, 0); |
| if (sprog->dual_src_linked) { |
| if (has_feature(feat_dual_src_blend)) { |
| if (!vrend_state.use_gles) { |
| glBindFragDataLocationIndexed(fs_id, 0, 0, "fsout_c0"); |
| glBindFragDataLocationIndexed(fs_id, 0, 1, "fsout_c1"); |
| } else { |
| glBindFragDataLocationIndexedEXT(fs_id, 0, 0, "fsout_c0"); |
| glBindFragDataLocationIndexedEXT(fs_id, 0, 1, "fsout_c1"); |
| } |
| } else { |
| vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0); |
| } |
| } else if (!vrend_state.use_gles && has_feature(feat_dual_src_blend)) { |
| /* On GLES without dual source blending we emit the layout directly in the shader |
| * so there is no need to define the binding here */ |
| for (int i = 0; i < fs->sel->sinfo.num_outputs; ++i) { |
| if (fs->sel->sinfo.fs_output_layout[i] >= 0) { |
| char buf[64]; |
| snprintf(buf, sizeof(buf), "fsout_c%d", fs->sel->sinfo.fs_output_layout[i]); |
| glBindFragDataLocationIndexed(fs_id, fs->sel->sinfo.fs_output_layout[i], 0, buf); |
| } |
| } |
| } |
| } else |
| sprog->dual_src_linked = false; |
| |
| if (has_feature(feat_gles31_vertex_attrib_binding)) { |
| uint32_t mask = vs->sel->sinfo.attrib_input_mask; |
| while (mask) { |
| i = u_bit_scan(&mask); |
| snprintf(name, 32, "in_%d", i); |
| glBindAttribLocation(vs_id, i, name); |
| } |
| } |
| |
| bool link_success; |
| if (separable) { /* separable programs */ |
| link_success = vrend_link_stage(vs); |
| link_success &= vrend_link_stage(fs); |
| if (gs) link_success &= vrend_link_stage(gs); |
| if (tcs) link_success &= vrend_link_stage(tcs); |
| if (tes) link_success &= vrend_link_stage(tes); |
| } else { /* non-separable programs */ |
| link_success = vrend_link(prog_id); |
| } |
| |
| if (!link_success) { |
| if (separable) { |
| glDeleteProgramPipelines(1, &pipeline_id); |
| } else { |
| glDeleteProgram(prog_id); |
| } |
| |
| free(sprog); |
| |
| /* dump shaders */ |
| vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0); |
| vrend_shader_dump(vs); |
| if (tcs) |
| vrend_shader_dump(tcs); |
| if (tes) |
| vrend_shader_dump(tes); |
| if (gs) |
| vrend_shader_dump(gs); |
| vrend_shader_dump(fs); |
| return NULL; |
| } |
| |
| if (separable) { |
| glUseProgramStages(pipeline_id, GL_VERTEX_SHADER_BIT, vs->program_id); |
| if (tcs) glUseProgramStages(pipeline_id, GL_TESS_CONTROL_SHADER_BIT, tcs->program_id); |
| if (tes) glUseProgramStages(pipeline_id, GL_TESS_EVALUATION_SHADER_BIT, tes->program_id); |
| if (gs) glUseProgramStages(pipeline_id, GL_GEOMETRY_SHADER_BIT, gs->program_id); |
| glUseProgramStages(pipeline_id, GL_FRAGMENT_SHADER_BIT, fs->program_id); |
| |
| glValidateProgramPipeline(pipeline_id); |
| GLint validation_status; |
| glGetProgramPipelineiv(pipeline_id, GL_VALIDATE_STATUS, &validation_status); |
| if (!validation_status) { |
| vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_PROGRAM_PIPELINE, 0); |
| } |
| } |
| |
| sprog->ss[PIPE_SHADER_VERTEX] = vs; |
| sprog->ss[PIPE_SHADER_FRAGMENT] = fs; |
| sprog->vs_fs_key = (((uint64_t)fs->id) << 32) | (vs->id & ~VREND_PROGRAM_NQUEUE_MASK) | |
| (sprog->dual_src_linked ? 1 : 0); |
| |
| sprog->ss[PIPE_SHADER_GEOMETRY] = gs; |
| sprog->ss[PIPE_SHADER_TESS_CTRL] = tcs; |
| sprog->ss[PIPE_SHADER_TESS_EVAL] = tes; |
| |
| list_add(&sprog->sl[PIPE_SHADER_VERTEX], &vs->programs); |
| list_add(&sprog->sl[PIPE_SHADER_FRAGMENT], &fs->programs); |
| if (gs) |
| list_add(&sprog->sl[PIPE_SHADER_GEOMETRY], &gs->programs); |
| if (tcs) |
| list_add(&sprog->sl[PIPE_SHADER_TESS_CTRL], &tcs->programs); |
| if (tes) |
| list_add(&sprog->sl[PIPE_SHADER_TESS_EVAL], &tes->programs); |
| |
| last_shader = tes ? PIPE_SHADER_TESS_EVAL : (gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT); |
| |
| sprog->is_pipeline = separable; |
| if (sprog->is_pipeline) |
| sprog->id.pipeline = pipeline_id; |
| else |
| sprog->id.program = prog_id; |
| |
| list_addtail(&sprog->head, &sub_ctx->gl_programs[vs->id & VREND_PROGRAM_NQUEUE_MASK]); |
| |
| sprog->virgl_block_bind = -1; |
| sprog->ubo_sysval_buffer_id = -1; |
| sprog->sysvalue_data_cookie = UINT32_MAX; |
| |
| vrend_use_program(sub_ctx, sprog); |
| |
| for (enum pipe_shader_type shader_type = PIPE_SHADER_VERTEX; |
| shader_type <= last_shader; |
| shader_type++) { |
| if (!sprog->ss[shader_type]) |
| continue; |
| |
| bind_const_locs(sprog, shader_type); |
| bind_image_locs(sprog, shader_type); |
| bind_ssbo_locs(sprog, shader_type); |
| |
| if (sprog->ss[shader_type]->sel->sinfo.reads_drawid) |
| sprog->reads_drawid = true; |
| } |
| rebind_ubo_and_sampler_locs(sprog, last_shader); |
| |
| if (!has_feature(feat_gles31_vertex_attrib_binding)) { |
| if (vs->sel->sinfo.num_inputs) { |
| sprog->attrib_locs = calloc(vs->sel->sinfo.num_inputs, sizeof(uint32_t)); |
| if (sprog->attrib_locs) { |
| for (i = 0; i < vs->sel->sinfo.num_inputs; i++) { |
| snprintf(name, 32, "in_%d", i); |
| sprog->attrib_locs[i] = glGetAttribLocation(vs_id, name); |
| } |
| } |
| } else |
| sprog->attrib_locs = NULL; |
| } |
| |
| return sprog; |
| } |
| |
| static struct vrend_linked_shader_program *lookup_cs_shader_program(struct vrend_context *ctx, |
| GLuint cs_id) |
| { |
| struct vrend_linked_shader_program *ent; |
| LIST_FOR_EACH_ENTRY(ent, &ctx->sub->cs_programs, head) { |
| if (ent->ss[PIPE_SHADER_COMPUTE]->id == cs_id) { |
| list_del(&ent->head); |
| list_add(&ent->head, &ctx->sub->cs_programs); |
| return ent; |
| } |
| } |
| return NULL; |
| } |
| |
| static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_sub_context *sub_ctx, |
| GLuint vs_id, |
| GLuint fs_id, |
| GLuint gs_id, |
| GLuint tcs_id, |
| GLuint tes_id, |
| bool dual_src) |
| { |
| uint64_t vs_fs_key = (((uint64_t)fs_id) << 32) | (vs_id & ~VREND_PROGRAM_NQUEUE_MASK) | |
| (dual_src ? 1 : 0); |
| |
| struct vrend_linked_shader_program *ent; |
| |
| struct list_head *programs = &sub_ctx->gl_programs[vs_id & VREND_PROGRAM_NQUEUE_MASK]; |
| LIST_FOR_EACH_ENTRY(ent, programs, head) { |
| if (likely(ent->vs_fs_key != vs_fs_key)) |
| continue; |
| if (ent->ss[PIPE_SHADER_GEOMETRY] && |
| ent->ss[PIPE_SHADER_GEOMETRY]->id != gs_id) |
| continue; |
| if (ent->ss[PIPE_SHADER_TESS_CTRL] && |
| ent->ss[PIPE_SHADER_TESS_CTRL]->id != tcs_id) |
| continue; |
| if (ent->ss[PIPE_SHADER_TESS_EVAL] && |
| ent->ss[PIPE_SHADER_TESS_EVAL]->id != tes_id) |
| continue; |
| /* put the entry in front */ |
| if (programs->next != &ent->head) { |
| list_del(&ent->head); |
| list_add(&ent->head, programs); |
| } |
| return ent; |
| } |
| |
| return NULL; |
| } |
| |
| static void vrend_destroy_program(struct vrend_linked_shader_program *ent) |
| { |
| int i; |
| if (ent->ref_context && ent->ref_context->prog == ent) |
| ent->ref_context->prog = NULL; |
| |
| if (ent->ubo_sysval_buffer_id != -1) { |
| glDeleteBuffers(1, (GLuint *) &ent->ubo_sysval_buffer_id); |
| } |
| |
| if (ent->is_pipeline) |
| glDeleteProgramPipelines(1, &ent->id.pipeline); |
| else |
| glDeleteProgram(ent->id.program); |
| |
| list_del(&ent->head); |
| |
| for (i = PIPE_SHADER_VERTEX; i <= PIPE_SHADER_COMPUTE; i++) { |
| if (ent->ss[i]) |
| list_del(&ent->sl[i]); |
| free(ent->shadow_samp_mask_locs[i]); |
| free(ent->shadow_samp_add_locs[i]); |
| free(ent->img_locs[i]); |
| } |
| free(ent->attrib_locs); |
| free(ent); |
| } |
| |
| static void vrend_free_programs(struct vrend_sub_context *sub) |
| { |
| struct vrend_linked_shader_program *ent, *tmp; |
| |
| if (!LIST_IS_EMPTY(&sub->cs_programs)) { |
| LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->cs_programs, head) |
| vrend_destroy_program(ent); |
| } |
| |
| for (unsigned i = 0; i < VREND_PROGRAM_NQUEUES; ++i) { |
| if (!LIST_IS_EMPTY(&sub->gl_programs[i])) { |
| LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->gl_programs[i], head) |
| vrend_destroy_program(ent); |
| } |
| } |
| } |
| |
| static void vrend_destroy_streamout_object(struct vrend_streamout_object *obj) |
| { |
| unsigned i; |
| list_del(&obj->head); |
| for (i = 0; i < obj->num_targets; i++) |
| vrend_so_target_reference(&obj->so_targets[i], NULL); |
| if (has_feature(feat_transform_feedback2)) |
| glDeleteTransformFeedbacks(1, &obj->id); |
| FREE(obj); |
| } |
| |
| void vrend_sync_make_current(virgl_gl_context gl_cxt) { |
| GLsync sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); |
| vrend_clicbs->make_current(gl_cxt); |
| glWaitSync(sync, 0, GL_TIMEOUT_IGNORED); |
| glDeleteSync(sync); |
| } |
| |
| int vrend_create_surface(struct vrend_context *ctx, |
| uint32_t handle, |
| uint32_t res_handle, uint32_t format, |
| uint32_t val0, uint32_t val1, |
| uint32_t nr_samples) |
| { |
| struct vrend_surface *surf; |
| struct vrend_resource *res; |
| uint32_t ret_handle; |
| |
| if (format >= PIPE_FORMAT_COUNT) { |
| return EINVAL; |
| } |
| |
| res = vrend_renderer_ctx_res_lookup(ctx, res_handle); |
| if (!res) { |
| vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle); |
| return EINVAL; |
| } |
| |
| surf = CALLOC_STRUCT(vrend_surface); |
| if (!surf) |
| return ENOMEM; |
| |
| surf->res_handle = res_handle; |
| surf->format = format; |
| |
| surf->val0 = val0; |
| surf->val1 = val1; |
| surf->id = res->id; |
| surf->nr_samples = nr_samples; |
| |
| if (!has_bit(res->storage_bits, VREND_STORAGE_GL_BUFFER) && |
| has_bit(res->storage_bits, VREND_STORAGE_GL_IMMUTABLE) && |
| has_feature(feat_texture_view)) { |
| /* We don't need texture views for buffer objects. |
| * Otherwise we only need a texture view if the |
| * a) formats differ between the surface and base texture |
| * b) we need to map a sub range > 1 layer to a surface, |
| * GL can make a single layer fine without a view, and it |
| * can map the whole texure fine. In those cases we don't |
| * create a texture view. |
| */ |
| int first_layer = surf->val1 & 0xffff; |
| int last_layer = (surf->val1 >> 16) & 0xffff; |
| |
| bool needs_view = first_layer != last_layer && |
| (first_layer != 0 || (last_layer != (int)util_max_layer(&res->base, surf->val0))); |
| if (!needs_view && surf->format != res->base.format) |
| needs_view = true; |
| |
| if (needs_view && vrend_resource_supports_view(res, surf->format)) { |
| GLenum target = res->target; |
| GLenum internalformat = tex_conv_table[format].internalformat; |
| |
| if (target == GL_TEXTURE_CUBE_MAP && first_layer == last_layer) { |
| first_layer = 0; |
| last_layer = 5; |
| } |
| |
| VREND_DEBUG(dbg_tex, ctx, "Create texture view from %s for %s\n", |
| util_format_name(res->base.format), |
| util_format_name(surf->format)); |
| |
| glGenTextures(1, &surf->id); |
| if (vrend_state.use_gles) { |
| if (target == GL_TEXTURE_1D) |
| target = GL_TEXTURE_2D; |
| else if (target == GL_TEXTURE_1D_ARRAY) |
| target = GL_TEXTURE_2D_ARRAY; |
| } |
| |
| if (target == GL_TEXTURE_RECTANGLE_NV && |
| !(tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TARGET_RECTANGLE)) { |
| target = GL_TEXTURE_2D; |
| } |
| |
| glTextureView(surf->id, target, res->id, internalformat, |
| 0, res->base.last_level + 1, |
| first_layer, last_layer - first_layer + 1); |
| } |
| } |
| |
| pipe_reference_init(&surf->reference, 1); |
| |
| vrend_resource_reference(&surf->texture, res); |
| |
| ret_handle = vrend_renderer_object_insert(ctx, surf, handle, VIRGL_OBJECT_SURFACE); |
| if (ret_handle == 0) { |
| FREE(surf); |
| return ENOMEM; |
| } |
| return 0; |
| } |
| |
| int vrend_create_dsa(struct vrend_context *ctx, |
| uint32_t handle, |
| const struct pipe_depth_stencil_alpha_state *dsa_state) |
| { |
| struct vrend_depth_stencil_alpha_state *vdsa_state; |
| uint32_t ret_handle; |
| |
| vdsa_state = CALLOC_STRUCT(vrend_depth_stencil_alpha_state); |
| if (!vdsa_state) |
| return ENOMEM; |
| |
| vdsa_state->base = *dsa_state; |
| |
| ret_handle = vrend_renderer_object_insert(ctx, vdsa_state, handle, VIRGL_OBJECT_DSA); |
| if (ret_handle == 0) { |
| FREE(vdsa_state); |
| return ENOMEM; |
| } |
| |
| return 0; |
| } |
| |
| static void vrend_destroy_surface_object(void *obj_ptr) |
| { |
| struct vrend_surface *surface = obj_ptr; |
| |
| vrend_surface_reference(&surface, NULL); |
| } |
| |
| static void vrend_destroy_sampler_view_object(void *obj_ptr) |
| { |
| struct vrend_sampler_view *samp = obj_ptr; |
| |
| vrend_sampler_view_reference(&samp, NULL); |
| } |
| |
| static void vrend_destroy_so_target_object(void *obj_ptr) |
| { |
| struct vrend_so_target *target = obj_ptr; |
| struct vrend_sub_context *sub_ctx = target->sub_ctx; |
| struct vrend_streamout_object *obj, *tmp; |
| bool found; |
| unsigned i; |
| |
| LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub_ctx->streamout_list, head) { |
| found = false; |
| for (i = 0; i < obj->num_targets; i++) { |
| if (obj->so_targets[i] == target) { |
| found = true; |
| break; |
| } |
| } |
| if (found) { |
| if (obj == sub_ctx->current_so) |
| sub_ctx->current_so = NULL; |
| if (obj->xfb_state == XFB_STATE_PAUSED) { |
| if (has_feature(feat_transform_feedback2)) |
| glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id); |
| glEndTransformFeedback(); |
| if (sub_ctx->current_so && has_feature(feat_transform_feedback2)) |
| glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, sub_ctx->current_so->id); |
| } |
| vrend_destroy_streamout_object(obj); |
| } |
| } |
| |
| vrend_so_target_reference(&target, NULL); |
| } |
| |
| static void vrend_destroy_vertex_elements_object(void *obj_ptr) |
| { |
| struct vrend_vertex_element_array *v = obj_ptr; |
| |
| if (v == v->owning_sub->ve) |
| v->owning_sub->ve = NULL; |
| |
| if (has_feature(feat_gles31_vertex_attrib_binding)) { |
| glDeleteVertexArrays(1, &v->id); |
| } |
| FREE(v); |
| } |
| |
| static void vrend_destroy_sampler_state_object(void *obj_ptr) |
| { |
| struct vrend_sampler_state *state = obj_ptr; |
| |
| if (has_feature(feat_samplers)) |
| glDeleteSamplers(2, state->ids); |
| |
| if (state->sub_ctx) { |
| struct vrend_sub_context *sub_ctx = state->sub_ctx; |
| for (enum pipe_shader_type shader_type = PIPE_SHADER_VERTEX; |
| shader_type < PIPE_SHADER_TYPES; |
| shader_type++) { |
| int deleted_samplers = 0; |
| for (uint32_t sampler = 0; sampler < PIPE_MAX_SAMPLERS; sampler++) { |
| if (sub_ctx->sampler_state[shader_type][sampler] == state) { |
| sub_ctx->sampler_state[shader_type][sampler] = NULL; |
| sub_ctx->num_sampler_states[shader_type]--; |
| sub_ctx->sampler_views_dirty[shader_type] |= (1u << sampler); |
| deleted_samplers++; |
| } else if (deleted_samplers) { |
| sub_ctx->sampler_state[shader_type][sampler-deleted_samplers] = sub_ctx->sampler_state[shader_type][sampler]; |
| sub_ctx->sampler_state[shader_type][sampler] = NULL; |
| sub_ctx->sampler_views_dirty[shader_type] |= (1u << sampler); |
| } |
| } |
| } |
| } |
| |
| FREE(state); |
| } |
| |
| static void vrend_destroy_dsa_object(void *obj_ptr) |
| { |
| struct vrend_depth_stencil_alpha_state *state = obj_ptr; |
| |
| if (state->owning_sub && state == state->owning_sub->dsa) |
| vrend_object_bind_dsa(state->owning_sub->parent, 0 /* unbind */); |
| |
| FREE(state); |
| } |
| |
| static GLuint convert_wrap(int wrap) |
| { |
| switch(wrap){ |
| case PIPE_TEX_WRAP_REPEAT: return GL_REPEAT; |
| case PIPE_TEX_WRAP_CLAMP: if (vrend_state.use_core_profile == false) return GL_CLAMP; else return GL_CLAMP_TO_EDGE; |
| |
| case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return GL_CLAMP_TO_EDGE; |
| case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return GL_CLAMP_TO_BORDER; |
| |
| case PIPE_TEX_WRAP_MIRROR_REPEAT: return GL_MIRRORED_REPEAT; |
| case PIPE_TEX_WRAP_MIRROR_CLAMP: return GL_MIRROR_CLAMP_EXT; |
| case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return GL_MIRROR_CLAMP_TO_EDGE_EXT; |
| case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return GL_MIRROR_CLAMP_TO_BORDER_EXT; |
| default: |
| assert(0); |
| return -1; |
| } |
| } |
| |
| static inline GLenum convert_mag_filter(enum pipe_tex_filter filter) |
| { |
| if (filter == PIPE_TEX_FILTER_NEAREST) |
| return GL_NEAREST; |
| return GL_LINEAR; |
| } |
| |
| static inline GLenum convert_min_filter(enum pipe_tex_filter filter, enum pipe_tex_mipfilter mip_filter) |
| { |
| if (mip_filter == PIPE_TEX_MIPFILTER_NONE) |
| return convert_mag_filter(filter); |
| else if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) { |
| if (filter == PIPE_TEX_FILTER_NEAREST) |
| return GL_NEAREST_MIPMAP_LINEAR; |
| else |
| return GL_LINEAR_MIPMAP_LINEAR; |
| } else if (mip_filter == PIPE_TEX_MIPFILTER_NEAREST) { |
| if (filter == PIPE_TEX_FILTER_NEAREST) |
| return GL_NEAREST_MIPMAP_NEAREST; |
| else |
| return GL_LINEAR_MIPMAP_NEAREST; |
| } |
| assert(0); |
| return 0; |
| } |
| |
| static void apply_sampler_border_color(GLuint sampler, |
| const GLuint colors[static 4]) |
| { |
| if (has_feature(feat_sampler_border_colors)) { |
| glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, colors); |
| } else if (colors[0] || colors[1] || colors[2] || colors[3]) { |
| vrend_printf("sampler border color setting requested but not supported\n"); |
| } |
| } |
| |
| int vrend_create_sampler_state(struct vrend_context *ctx, |
| uint32_t handle, |
| struct pipe_sampler_state *templ) |
| { |
| struct vrend_sampler_state *state = CALLOC_STRUCT(vrend_sampler_state); |
| int ret_handle; |
| |
| if (!state) |
| return ENOMEM; |
| |
| state->base = *templ; |
| |
| if (has_feature(feat_samplers)) { |
| glGenSamplers(2, state->ids); |
| |
| for (int i = 0; i < 2; ++i) { |
| glSamplerParameteri(state->ids[i], GL_TEXTURE_WRAP_S, convert_wrap(templ->wrap_s)); |
| glSamplerParameteri(state->ids[i], GL_TEXTURE_WRAP_T, convert_wrap(templ->wrap_t)); |
| glSamplerParameteri(state->ids[i], GL_TEXTURE_WRAP_R, convert_wrap(templ->wrap_r)); |
| glSamplerParameterf(state->ids[i], GL_TEXTURE_MIN_FILTER, convert_min_filter(templ->min_img_filter, templ->min_mip_filter)); |
| glSamplerParameterf(state->ids[i], GL_TEXTURE_MAG_FILTER, convert_mag_filter(templ->mag_img_filter)); |
| glSamplerParameterf(state->ids[i], GL_TEXTURE_MIN_LOD, templ->min_lod); |
| glSamplerParameterf(state->ids[i], GL_TEXTURE_MAX_LOD, templ->max_lod); |
| glSamplerParameteri(state->ids[i], GL_TEXTURE_COMPARE_MODE, templ->compare_mode ? GL_COMPARE_R_TO_TEXTURE : GL_NONE); |
| glSamplerParameteri(state->ids[i], GL_TEXTURE_COMPARE_FUNC, GL_NEVER + templ->compare_func); |
| if (vrend_state.use_gles) { |
| if (templ->lod_bias) |
| report_gles_warn(ctx, GLES_WARN_LOD_BIAS); |
| } else |
| glSamplerParameterf(state->ids[i], GL_TEXTURE_LOD_BIAS, templ->lod_bias); |
| |
| if (vrend_state.use_gles) { |
| if (templ->seamless_cube_map != 0) { |
| report_gles_warn(ctx, GLES_WARN_SEAMLESS_CUBE_MAP); |
| } |
| } else { |
| if (has_feature(feat_seamless_cubemap_per_texture)) { |
| glSamplerParameteri(state->ids[i], GL_TEXTURE_CUBE_MAP_SEAMLESS, templ->seamless_cube_map); |
| } |
| } |
| |
| apply_sampler_border_color(state->ids[i], templ->border_color.ui); |
| if (has_feature(feat_texture_srgb_decode)) |
| glSamplerParameteri(state->ids[i], GL_TEXTURE_SRGB_DECODE_EXT, |
| i == 0 ? GL_SKIP_DECODE_EXT : GL_DECODE_EXT); |
| } |
| } |
| ret_handle = vrend_renderer_object_insert(ctx, state, handle, |
| VIRGL_OBJECT_SAMPLER_STATE); |
| if (!ret_handle) { |
| if (has_feature(feat_samplers)) |
| glDeleteSamplers(2, state->ids); |
| FREE(state); |
| return ENOMEM; |
| } |
| return 0; |
| } |
| |
| static inline GLenum to_gl_swizzle(enum pipe_swizzle swizzle) |
| { |
| switch (swizzle) { |
| case PIPE_SWIZZLE_RED: return GL_RED; |
| case PIPE_SWIZZLE_GREEN: return GL_GREEN; |
| case PIPE_SWIZZLE_BLUE: return GL_BLUE; |
| case PIPE_SWIZZLE_ALPHA: return GL_ALPHA; |
| case PIPE_SWIZZLE_ZERO: return GL_ZERO; |
| case PIPE_SWIZZLE_ONE: return GL_ONE; |
| default: |
| assert(0); |
| return 0; |
| } |
| } |
| |
| static inline enum pipe_swizzle to_pipe_swizzle(GLenum swizzle) |
| { |
| switch (swizzle) { |
| case GL_RED: return PIPE_SWIZZLE_RED; |
| case GL_GREEN: return PIPE_SWIZZLE_GREEN; |
| case GL_BLUE: return PIPE_SWIZZLE_BLUE; |
| case GL_ALPHA: return PIPE_SWIZZLE_ALPHA; |
| case GL_ZERO: return PIPE_SWIZZLE_ZERO; |
| case GL_ONE: return PIPE_SWIZZLE_ONE; |
| default: |
| assert(0); |
| return 0; |
| } |
| } |
| |
| int vrend_create_sampler_view(struct vrend_context *ctx, |
| uint32_t handle, |
| uint32_t res_handle, uint32_t format, |
| uint32_t val0, uint32_t val1, uint32_t swizzle_packed) |
| { |
| struct vrend_sampler_view *view; |
| struct vrend_resource *res; |
| int ret_handle; |
| enum pipe_swizzle swizzle[4]; |
| |
| res = vrend_renderer_ctx_res_lookup(ctx, res_handle); |
| if (!res) { |
| vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle); |
| return EINVAL; |
| } |
| |
| view = CALLOC_STRUCT(vrend_sampler_view); |
| if (!view) |
| return ENOMEM; |
| |
| pipe_reference_init(&view->reference, 1); |
| view->format = format & 0xffffff; |
| |
| if (!view->format || view->format >= VIRGL_FORMAT_MAX) { |
| vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_FORMAT, view->format); |
| FREE(view); |
| return EINVAL; |
| } |
| |
| uint32_t pipe_target = (format >> 24) & 0xff; |
| if (pipe_target >= PIPE_MAX_TEXTURE_TYPES) { |
| vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SAMPLER_VIEW_TARGET, |
| view->format); |
| FREE(view); |
| return EINVAL; |
| } |
| |
| view->target = tgsitargettogltarget(pipe_target, res->base.nr_samples); |
| |
| /* Work around TEXTURE_1D missing on GLES */ |
| if (vrend_state.use_gles) { |
| if (view->target == GL_TEXTURE_1D) |
| view->target = GL_TEXTURE_2D; |
| else if (view->target == GL_TEXTURE_1D_ARRAY) |
| view->target = GL_TEXTURE_2D_ARRAY; |
| } |
| |
| if (view->target == GL_TEXTURE_RECTANGLE_NV && |
| !(tex_conv_table[view->format].flags & VIRGL_TEXTURE_CAN_TARGET_RECTANGLE)) { |
| view->emulated_rect = true; |
| view->target = GL_TEXTURE_2D; |
| } |
| |
| view->val0 = val0; |
| view->val1 = val1; |
| |
| swizzle[0] = swizzle_packed & 0x7; |
| swizzle[1] = (swizzle_packed >> 3) & 0x7; |
| swizzle[2] = (swizzle_packed >> 6) & 0x7; |
| swizzle[3] = (swizzle_packed >> 9) & 0x7; |
| |
| vrend_resource_reference(&view->texture, res); |
| |
| view->id = view->texture->id; |
| if (view->target == PIPE_BUFFER) |
| view->target = view->texture->target; |
| |
| view->srgb_decode = GL_DECODE_EXT; |
| if (view->format != view->texture->base.format) { |
| if (util_format_is_srgb(view->texture->base.format) && |
| !util_format_is_srgb(view->format)) |
| view->srgb_decode = GL_SKIP_DECODE_EXT; |
| } |
| |
| if (!(util_format_has_alpha(view->format) || util_format_is_depth_or_stencil(view->format))) { |
| if (swizzle[0] == PIPE_SWIZZLE_ALPHA) |
| swizzle[0] = PIPE_SWIZZLE_ONE; |
| if (swizzle[1] == PIPE_SWIZZLE_ALPHA) |
| swizzle[1] = PIPE_SWIZZLE_ONE; |
| if (swizzle[2] == PIPE_SWIZZLE_ALPHA) |
| swizzle[2] = PIPE_SWIZZLE_ONE; |
| if (swizzle[3] == PIPE_SWIZZLE_ALPHA) |
| swizzle[3] = PIPE_SWIZZLE_ONE; |
| } |
| |
| if (tex_conv_table[view->format].flags & VIRGL_TEXTURE_NEED_SWIZZLE) { |
| if (swizzle[0] <= PIPE_SWIZZLE_ALPHA) |
| swizzle[0] = tex_conv_table[view->format].swizzle[swizzle[0]]; |
| if (swizzle[1] <= PIPE_SWIZZLE_ALPHA) |
| swizzle[1] = tex_conv_table[view->format].swizzle[swizzle[1]]; |
| if (swizzle[2] <= PIPE_SWIZZLE_ALPHA) |
| swizzle[2] = tex_conv_table[view->format].swizzle[swizzle[2]]; |
| if (swizzle[3] <= PIPE_SWIZZLE_ALPHA) |
| swizzle[3] = tex_conv_table[view->format].swizzle[swizzle[3]]; |
| } |
| |
| for (enum pipe_swizzle i = 0; i < 4; ++i) |
| view->gl_swizzle[i] = to_gl_swizzle(swizzle[i]); |
| |
| if (!has_bit(view->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) { |
| enum virgl_formats format; |
| bool needs_view = false; |
| |
| /* |
| * Need to use a texture view if the gallium |
| * view target is different than the underlying |
| * texture target. |
| */ |
| if (view->target != view->texture->target) |
| needs_view = true; |
| |
| /* |
| * If the formats are different and this isn't |
| * a DS texture a view is required. |
| * DS are special as they use different gallium |
| * formats for DS views into a combined resource. |
| * GL texture views can't be use for this, stencil |
| * texturing is used instead. For DS formats |
| * aways program the underlying DS format as a |
| * view could be required for layers. |
| */ |
| format = view->format; |
| if (util_format_is_depth_or_stencil(view->texture->base.format)) |
| format = view->texture->base.format; |
| else if (view->format != view->texture->base.format) |
| needs_view = true; |
| |
| if (needs_view && |
| has_bit(view->texture->storage_bits, VREND_STORAGE_GL_IMMUTABLE) && |
| has_feature(feat_texture_view)) { |
| glGenTextures(1, &view->id); |
| GLenum internalformat = tex_conv_table[format].internalformat; |
| unsigned base_layer = view->val0 & 0xffff; |
| unsigned max_layer = (view->val0 >> 16) & 0xffff; |
| int base_level = view->val1 & 0xff; |
| int max_level = (view->val1 >> 8) & 0xff; |
| view->levels = (max_level - base_level) + 1; |
| |
| /* texture views for eglimage-backed bgr* resources are usually not |
| * supported since they cause unintended red/blue channel-swapping. |
| * Since we have control over the swizzle parameters of the sampler, we |
| * can just compensate in this case by swapping the red/blue channels |
| * back, and still benefit from automatic srgb decoding. |
| * If the red/blue swap is intended, we just let it happen and don't |
| * need to explicit change to the sampler's swizzle parameters. */ |
| if (!vrend_resource_supports_view(view->texture, view->format) && |
| vrend_format_is_bgra(view->format)) { |
| VREND_DEBUG(dbg_tex, ctx, "texture view with red/blue swizzle created for EGL-backed texture sampler" |
| " (format: %s; view: %s)\n", |
| util_format_name(view->texture->base.format), |
| util_format_name(view->format)); |
| GLint temp = view->gl_swizzle[0]; |
| view->gl_swizzle[0] = view->gl_swizzle[2]; |
| view->gl_swizzle[2] = temp; |
| } |
| |
| glTextureView(view->id, view->target, view->texture->id, internalformat, |
| base_level, view->levels, |
| base_layer, max_layer - base_layer + 1); |
| |
| glBindTexture(view->target, view->id); |
| |
| if (util_format_is_depth_or_stencil(view->format)) { |
| if (vrend_state.use_core_profile == false) { |
| /* setting depth texture mode is deprecated in core profile */ |
| glTexParameteri(view->target, GL_DEPTH_TEXTURE_MODE, GL_RED); |
| } |
| if (has_feature(feat_stencil_texturing)) { |
| const struct util_format_description *desc = util_format_description(view->format); |
| if (!util_format_has_depth(desc)) { |
| glTexParameteri(view->target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_STENCIL_INDEX); |
| } else { |
| glTexParameteri(view->target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_DEPTH_COMPONENT); |
| } |
| } |
| } |
| |
| glTexParameteri(view->target, GL_TEXTURE_BASE_LEVEL, base_level); |
| glTexParameteri(view->target, GL_TEXTURE_MAX_LEVEL, max_level); |
| if (vrend_state.use_gles) { |
| for (unsigned int i = 0; i < 4; ++i) { |
| glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_R + i, view->gl_swizzle[i]); |
| } |
| } else |
| glTexParameteriv(view->target, GL_TEXTURE_SWIZZLE_RGBA, view->gl_swizzle); |
| if (util_format_is_srgb(view->format) && |
| has_feature(feat_texture_srgb_decode)) { |
| glTexParameteri(view->target, GL_TEXTURE_SRGB_DECODE_EXT, |
| view->srgb_decode); |
| } |
| glBindTexture(view->target, 0); |
| } else if (needs_view && view->val0 < ARRAY_SIZE(res->aux_plane_egl_image) && |
| res->aux_plane_egl_image[view->val0]) { |
| void *image = res->aux_plane_egl_image[view->val0]; |
| glGenTextures(1, &view->id); |
| glBindTexture(view->target, view->id); |
| glEGLImageTargetTexture2DOES(view->target, (GLeglImageOES) image); |
| glBindTexture(view->target, 0); |
| } |
| } |
| |
| ret_handle = vrend_renderer_object_insert(ctx, view, handle, VIRGL_OBJECT_SAMPLER_VIEW); |
| if (ret_handle == 0) { |
| FREE(view); |
| return ENOMEM; |
| } |
| return 0; |
| } |
| |
| static void vrend_framebuffer_texture_2d(struct vrend_resource *res, |
| GLenum target, GLenum attachment, |
| GLenum textarget, uint32_t texture, |
| int32_t level, uint32_t samples) |
| { |
| if (samples == 0) { |
| glFramebufferTexture2D(target, attachment, textarget, texture, level); |
| } else if (!has_feature(feat_implicit_msaa)) { |
| /* fallback to non-msaa */ |
| report_gles_warn(vrend_state.current_ctx, GLES_WARN_IMPLICIT_MSAA_SURFACE); |
| glFramebufferTexture2D(target, attachment, textarget, texture, level); |
| } else if (attachment == GL_COLOR_ATTACHMENT0){ |
| glFramebufferTexture2DMultisampleEXT(target, attachment, textarget, |
| texture, level, samples); |
| } else if (attachment == GL_STENCIL_ATTACHMENT || attachment == GL_DEPTH_ATTACHMENT) { |
| GLenum internalformat = |
| attachment == GL_STENCIL_ATTACHMENT ? GL_STENCIL_INDEX8 : GL_DEPTH_COMPONENT16; |
| |
| glGenRenderbuffers(1, &res->rbo_id); |
| glBindRenderbuffer(GL_RENDERBUFFER, res->rbo_id); |
| glRenderbufferStorageMultisampleEXT(GL_RENDERBUFFER, samples, |
| internalformat, res->base.width0, |
| res->base.height0); |
| glFramebufferRenderbuffer(GL_FRAMEBUFFER, attachment, |
| GL_RENDERBUFFER, res->rbo_id); |
| glBindRenderbuffer(GL_RENDERBUFFER, 0); |
| } else { |
| /* unsupported attachment for EXT_multisampled_render_to_texture, fallback to non-msaa */ |
| report_gles_warn(vrend_state.current_ctx, GLES_WARN_IMPLICIT_MSAA_SURFACE); |
| |