| /************************************************************************** |
| * |
| * Copyright (C) 2014 Red Hat Inc. |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a |
| * copy of this software and associated documentation files (the "Software"), |
| * to deal in the Software without restriction, including without limitation |
| * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| * and/or sell copies of the Software, and to permit persons to whom the |
| * Software is furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included |
| * in all copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| * OTHER DEALINGS IN THE SOFTWARE. |
| * |
| **************************************************************************/ |
| |
| #include "tgsi/tgsi_info.h" |
| #include "tgsi/tgsi_iterate.h" |
| #include "tgsi/tgsi_scan.h" |
| #include "util/u_memory.h" |
| #include "util/u_math.h" |
| #include <string.h> |
| #include <stdio.h> |
| #include <math.h> |
| #include <errno.h> |
| #include "vrend_shader.h" |
| #include "vrend_debug.h" |
| |
| #include "vrend_strbuf.h" |
| |
| /* start convert of tgsi to glsl */ |
| |
| #define INVARI_PREFIX "invariant" |
| |
| #define SHADER_REQ_NONE 0 |
| #define SHADER_REQ_SAMPLER_RECT (1ULL << 0) |
| #define SHADER_REQ_CUBE_ARRAY (1ULL << 1) |
| #define SHADER_REQ_INTS (1ULL << 2) |
| #define SHADER_REQ_SAMPLER_MS (1ULL << 3) |
| #define SHADER_REQ_INSTANCE_ID (1ULL << 4) |
| #define SHADER_REQ_LODQ (1ULL << 5) |
| #define SHADER_REQ_TXQ_LEVELS (1ULL << 6) |
| #define SHADER_REQ_TG4 (1ULL << 7) |
| #define SHADER_REQ_VIEWPORT_IDX (1ULL << 8) |
| #define SHADER_REQ_STENCIL_EXPORT (1ULL << 9) |
| #define SHADER_REQ_LAYER (1ULL << 10) |
| #define SHADER_REQ_SAMPLE_SHADING (1ULL << 11) |
| #define SHADER_REQ_GPU_SHADER5 (1ULL << 12) |
| #define SHADER_REQ_DERIVATIVE_CONTROL (1ULL << 13) |
| #define SHADER_REQ_FP64 (1ULL << 14) |
| #define SHADER_REQ_IMAGE_LOAD_STORE (1ULL << 15) |
| #define SHADER_REQ_ES31_COMPAT (1ULL << 16) |
| #define SHADER_REQ_IMAGE_SIZE (1ULL << 17) |
| #define SHADER_REQ_TXQS (1ULL << 18) |
| #define SHADER_REQ_FBFETCH (1ULL << 19) |
| #define SHADER_REQ_SHADER_CLOCK (1ULL << 20) |
| #define SHADER_REQ_PSIZE (1ULL << 21) |
| #define SHADER_REQ_IMAGE_ATOMIC (1ULL << 22) |
| #define SHADER_REQ_CLIP_DISTANCE (1ULL << 23) |
| #define SHADER_REQ_ENHANCED_LAYOUTS (1ULL << 24) |
| #define SHADER_REQ_SEPERATE_SHADER_OBJECTS (1ULL << 25) |
| #define SHADER_REQ_ARRAYS_OF_ARRAYS (1ULL << 26) |
| #define SHADER_REQ_SHADER_INTEGER_FUNC (1ULL << 27) |
| #define SHADER_REQ_SHADER_ATOMIC_FLOAT (1ULL << 28) |
| #define SHADER_REQ_NV_IMAGE_FORMATS (1ULL << 29) |
| #define SHADER_REQ_CONSERVATIVE_DEPTH (1ULL << 30) |
| #define SHADER_REQ_SAMPLER_BUF (1ULL << 31) |
| #define SHADER_REQ_GEOMETRY_SHADER (1ULL << 32) |
| #define SHADER_REQ_BLEND_EQUATION_ADVANCED (1ULL << 33) |
| #define SHADER_REQ_EXPLICIT_ATTRIB_LOCATION (1ULL << 34) |
| #define SHADER_REQ_SHADER_NOPERSPECTIVE_INTERPOLATION (1ULL << 35) |
| #define SHADER_REQ_TEXTURE_SHADOW_LOD (1ULL << 36) |
| #define SHADER_REQ_AMD_VS_LAYER (1ULL << 37) |
| #define SHADER_REQ_AMD_VIEWPORT_IDX (1ULL << 38) |
| #define SHADER_REQ_SHADER_DRAW_PARAMETERS (1ULL << 39) |
| #define SHADER_REQ_SHADER_GROUP_VOTE (1ULL << 40) |
| #define SHADER_REQ_EXPLICIT_UNIFORM_LOCATION (1ULL << 41) |
| |
| #define FRONT_COLOR_EMITTED (1 << 0) |
| #define BACK_COLOR_EMITTED (1 << 1); |
| |
| #define MAX_VARYING 32 |
| |
| enum vrend_sysval_uniform { |
| UNIFORM_WINSYS_ADJUST_Y, |
| UNIFORM_CLIP_PLANE, |
| UNIFORM_ALPHA_REF_VAL, |
| UNIFORM_PSTIPPLE_SAMPLER, |
| UNIFORM_DRAWID_BASE |
| }; |
| |
| enum vec_type { |
| VEC_FLOAT = 0, |
| VEC_INT = 1, |
| VEC_UINT = 2 |
| }; |
| |
| struct vrend_shader_sampler { |
| int tgsi_sampler_type; |
| enum tgsi_return_type tgsi_sampler_return; |
| }; |
| |
| struct vrend_shader_table { |
| uint64_t key; |
| const char *string; |
| }; |
| |
| struct vrend_shader_image { |
| struct tgsi_declaration_image decl; |
| enum tgsi_return_type image_return; |
| bool vflag; |
| bool coherent; |
| }; |
| |
| #define MAX_IMMEDIATE 1024 |
| struct immed { |
| enum tgsi_imm_type type; |
| union imm { |
| uint32_t ui; |
| int32_t i; |
| float f; |
| } val[4]; |
| }; |
| |
| struct vrend_temp_range { |
| int first; |
| int last; |
| int array_id; |
| bool precise_result; |
| }; |
| |
| struct vrend_shader_io { |
| char glsl_name[128]; |
| struct vrend_shader_io *overlapping_array; |
| unsigned sid : 16; |
| unsigned first : 16; |
| unsigned last : 16; |
| unsigned array_id : 10; |
| enum tgsi_interpolate_mode interpolate : 4; |
| enum tgsi_interpolate_loc location : 2; |
| |
| unsigned array_offset : 8; |
| enum tgsi_semantic name : 8; |
| unsigned stream : 2; |
| unsigned usage_mask : 4; |
| enum vec_type type : 2; |
| unsigned num_components : 3; |
| |
| bool invariant : 1; |
| bool precise : 1; |
| bool glsl_predefined_no_emit : 1; |
| bool glsl_no_index : 1; |
| bool glsl_gl_block : 1; |
| bool override_no_wm : 1; |
| bool is_int : 1; |
| bool fbfetch_used : 1; |
| bool needs_override : 1; |
| }; |
| |
| struct vrend_io_range { |
| struct vrend_shader_io io; |
| bool used; |
| }; |
| |
| struct vrend_glsl_strbufs { |
| int indent_level; |
| uint8_t required_sysval_uniform_decls; |
| struct vrend_strbuf glsl_main; |
| struct vrend_strbuf glsl_hdr; |
| struct vrend_strbuf glsl_ver_ext; |
| }; |
| |
| struct vrend_interface_bits { |
| uint64_t outputs_expected_mask; |
| uint64_t inputs_emitted_mask; |
| uint64_t outputs_emitted_mask; |
| }; |
| |
| struct vrend_generic_ios { |
| struct vrend_interface_bits match; |
| struct vrend_io_range input_range; |
| struct vrend_io_range output_range; |
| }; |
| |
| struct vrend_texcoord_ios { |
| struct vrend_interface_bits match; |
| }; |
| |
| struct vrend_patch_ios { |
| struct vrend_io_range input_range; |
| struct vrend_io_range output_range; |
| }; |
| |
| struct dump_ctx { |
| struct tgsi_iterate_context iter; |
| const struct vrend_shader_cfg *cfg; |
| struct tgsi_shader_info info; |
| enum tgsi_processor_type prog_type; |
| int size; |
| struct vrend_glsl_strbufs glsl_strbufs; |
| uint instno; |
| |
| struct vrend_strbuf src_bufs[4]; |
| struct vrend_strbuf dst_bufs[3]; |
| |
| uint64_t interp_input_mask; |
| uint32_t num_inputs; |
| uint32_t attrib_input_mask; |
| struct vrend_shader_io inputs[64]; |
| uint32_t num_outputs; |
| struct vrend_shader_io outputs[64]; |
| uint8_t front_back_color_emitted_flags[64]; |
| uint32_t num_system_values; |
| struct vrend_shader_io system_values[32]; |
| |
| bool guest_sent_io_arrays; |
| struct vrend_texcoord_ios texcoord_ios; |
| struct vrend_generic_ios generic_ios; |
| struct vrend_patch_ios patch_ios; |
| |
| uint32_t num_temp_ranges; |
| struct vrend_temp_range *temp_ranges; |
| |
| struct vrend_shader_sampler samplers[32]; |
| uint32_t samplers_used; |
| |
| uint32_t ssbo_first_binding; |
| uint32_t ssbo_used_mask; |
| uint32_t ssbo_atomic_mask; |
| uint32_t ssbo_array_base; |
| uint32_t ssbo_atomic_array_base; |
| uint32_t ssbo_integer_mask; |
| uint8_t ssbo_memory_qualifier[32]; |
| int32_t ssbo_last_binding; |
| |
| struct vrend_shader_image images[32]; |
| uint32_t images_used_mask; |
| int32_t image_last_binding; |
| |
| struct vrend_array *image_arrays; |
| uint32_t num_image_arrays; |
| |
| struct vrend_array *sampler_arrays; |
| uint32_t num_sampler_arrays; |
| |
| uint32_t fog_input_mask; |
| uint32_t fog_output_mask; |
| |
| int num_consts; |
| int num_imm; |
| struct immed imm[MAX_IMMEDIATE]; |
| |
| uint32_t req_local_mem; |
| bool integer_memory; |
| |
| uint32_t ubo_base; |
| uint32_t ubo_used_mask; |
| int ubo_sizes[32]; |
| uint32_t num_address; |
| |
| uint32_t num_abo; |
| int abo_idx[32]; |
| int abo_sizes[32]; |
| int abo_offsets[32]; |
| |
| uint64_t shader_req_bits; |
| uint64_t patches_emitted_mask; |
| |
| struct pipe_stream_output_info *so; |
| char **so_names; |
| bool write_so_outputs[PIPE_MAX_SO_OUTPUTS]; |
| bool write_all_cbufs; |
| uint32_t shadow_samp_mask; |
| |
| bool fs_lower_left_origin, fs_integer_pixel_center; |
| int fs_depth_layout; |
| uint32_t fs_blend_equation_advanced; |
| |
| bool separable_program; |
| |
| int gs_in_prim, gs_out_prim, gs_max_out_verts; |
| int gs_num_invocations; |
| |
| const struct vrend_shader_key *key; |
| int num_in_clip_dist; |
| int num_out_clip_dist; |
| int fs_uses_clipdist_input; |
| int glsl_ver_required; |
| int color_in_mask; |
| int color_out_mask; |
| /* only used when cull is enabled */ |
| uint8_t num_cull_dist_prop, num_clip_dist_prop; |
| bool has_pervertex; |
| bool front_face_emitted; |
| |
| bool has_clipvertex; |
| bool has_clipvertex_so; |
| bool write_mul_utemp; |
| bool write_mul_itemp; |
| bool has_sample_input; |
| bool has_noperspective; |
| bool early_depth_stencil; |
| bool has_file_memory; |
| bool force_color_two_side; |
| bool gles_use_tex_query_level; |
| bool has_pointsize_input; |
| bool has_pointsize_output; |
| |
| bool has_input_arrays; |
| bool has_output_arrays; |
| |
| int tcs_vertices_out; |
| int tes_prim_mode; |
| int tes_spacing; |
| int tes_vertex_order; |
| int tes_point_mode; |
| bool is_last_vertex_stage; |
| bool require_dummy_value; |
| |
| uint16_t local_cs_block_size[3]; |
| }; |
| |
| static const struct vrend_shader_table shader_req_table[] = { |
| { SHADER_REQ_SAMPLER_RECT, "ARB_texture_rectangle" }, |
| { SHADER_REQ_CUBE_ARRAY, "ARB_texture_cube_map_array" }, |
| { SHADER_REQ_INTS, "ARB_shader_bit_encoding" }, |
| { SHADER_REQ_SAMPLER_MS, "ARB_texture_multisample" }, |
| { SHADER_REQ_INSTANCE_ID, "ARB_draw_instanced" }, |
| { SHADER_REQ_LODQ, "ARB_texture_query_lod" }, |
| { SHADER_REQ_TXQ_LEVELS, "ARB_texture_query_levels" }, |
| { SHADER_REQ_TG4, "ARB_texture_gather" }, |
| { SHADER_REQ_VIEWPORT_IDX, "ARB_viewport_array" }, |
| { SHADER_REQ_STENCIL_EXPORT, "ARB_shader_stencil_export" }, |
| { SHADER_REQ_LAYER, "ARB_fragment_layer_viewport" }, |
| { SHADER_REQ_SAMPLE_SHADING, "ARB_sample_shading" }, |
| { SHADER_REQ_GPU_SHADER5, "ARB_gpu_shader5" }, |
| { SHADER_REQ_DERIVATIVE_CONTROL, "ARB_derivative_control" }, |
| { SHADER_REQ_FP64, "ARB_gpu_shader_fp64" }, |
| { SHADER_REQ_IMAGE_LOAD_STORE, "ARB_shader_image_load_store" }, |
| { SHADER_REQ_ES31_COMPAT, "ARB_ES3_1_compatibility" }, |
| { SHADER_REQ_IMAGE_SIZE, "ARB_shader_image_size" }, |
| { SHADER_REQ_TXQS, "ARB_shader_texture_image_samples" }, |
| { SHADER_REQ_FBFETCH, "EXT_shader_framebuffer_fetch" }, |
| { SHADER_REQ_SHADER_CLOCK, "ARB_shader_clock" }, |
| { SHADER_REQ_SHADER_INTEGER_FUNC, "MESA_shader_integer_functions" }, |
| { SHADER_REQ_SHADER_ATOMIC_FLOAT, "NV_shader_atomic_float"}, |
| { SHADER_REQ_CONSERVATIVE_DEPTH, "ARB_conservative_depth"}, |
| {SHADER_REQ_BLEND_EQUATION_ADVANCED, "KHR_blend_equation_advanced"}, |
| { SHADER_REQ_TEXTURE_SHADOW_LOD, "EXT_texture_shadow_lod"}, |
| { SHADER_REQ_AMD_VS_LAYER, "AMD_vertex_shader_layer"}, |
| { SHADER_REQ_AMD_VIEWPORT_IDX, "AMD_vertex_shader_viewport_index"}, |
| { SHADER_REQ_SHADER_DRAW_PARAMETERS, "ARB_shader_draw_parameters"}, |
| { SHADER_REQ_SHADER_GROUP_VOTE, "ARB_shader_group_vote"}, |
| { SHADER_REQ_EXPLICIT_UNIFORM_LOCATION, "ARB_explicit_uniform_location"}, |
| }; |
| |
| enum vrend_type_qualifier { |
| TYPE_CONVERSION_NONE = 0, |
| FLOAT = 1, |
| VEC2 = 2, |
| VEC3 = 3, |
| VEC4 = 4, |
| INT = 5, |
| IVEC2 = 6, |
| IVEC3 = 7, |
| IVEC4 = 8, |
| UINT = 9, |
| UVEC2 = 10, |
| UVEC3 = 11, |
| UVEC4 = 12, |
| FLOAT_BITS_TO_UINT = 13, |
| UINT_BITS_TO_FLOAT = 14, |
| FLOAT_BITS_TO_INT = 15, |
| INT_BITS_TO_FLOAT = 16, |
| DOUBLE = 17, |
| DVEC2 = 18, |
| }; |
| |
| struct dest_info { |
| enum vrend_type_qualifier dtypeprefix; |
| enum vrend_type_qualifier dstconv; |
| enum vrend_type_qualifier udstconv; |
| enum vrend_type_qualifier idstconv; |
| bool dst_override_no_wm[2]; |
| int32_t dest_index; |
| }; |
| |
| struct source_info { |
| enum vrend_type_qualifier svec4; |
| int32_t sreg_index; |
| bool tg4_has_component; |
| bool override_no_wm[3]; |
| bool override_no_cast[3]; |
| int imm_value; |
| }; |
| |
| static const struct vrend_shader_table conversion_table[] = |
| { |
| {TYPE_CONVERSION_NONE, ""}, |
| {FLOAT, "float"}, |
| {VEC2, "vec2"}, |
| {VEC3, "vec3"}, |
| {VEC4, "vec4"}, |
| {INT, "int"}, |
| {IVEC2, "ivec2"}, |
| {IVEC3, "ivec3"}, |
| {IVEC4, "ivec4"}, |
| {UINT, "uint"}, |
| {UVEC2, "uvec2"}, |
| {UVEC3, "uvec3"}, |
| {UVEC4, "uvec4"}, |
| {FLOAT_BITS_TO_UINT, "floatBitsToUint"}, |
| {UINT_BITS_TO_FLOAT, "uintBitsToFloat"}, |
| {FLOAT_BITS_TO_INT, "floatBitsToInt"}, |
| {INT_BITS_TO_FLOAT, "intBitsToFloat"}, |
| {DOUBLE, "double"}, |
| {DVEC2, "dvec2"}, |
| }; |
| |
| enum io_type { |
| io_in, |
| io_out |
| }; |
| |
| enum io_decl_type { |
| decl_plain, |
| decl_block |
| }; |
| |
| static |
| void vrend_shader_write_io_as_src(struct vrend_strbuf *buf, |
| const char *arrayname, |
| const struct vrend_shader_io *io, |
| const struct tgsi_full_src_register *src, |
| enum io_decl_type decl_type); |
| |
| static |
| void vrend_shader_write_io_as_dst(struct vrend_strbuf *buf, |
| const char *arrayname, |
| const struct vrend_shader_io *io, |
| const struct tgsi_full_dst_register *src, |
| enum io_decl_type decl_type); |
| |
| /* We prefer arrays of arrays, but if this is not available then TCS, GEOM, and TES |
| * inputs must be blocks, but FS input should not because interpolateAt* doesn't |
| * support dereferencing block members. */ |
| static inline bool prefer_generic_io_block(const struct dump_ctx *ctx, enum io_type io) |
| { |
| if (ctx->cfg->has_arrays_of_arrays && !ctx->cfg->use_gles) |
| return false; |
| |
| switch (ctx->prog_type) { |
| case TGSI_PROCESSOR_FRAGMENT: |
| return false; |
| |
| case TGSI_PROCESSOR_TESS_CTRL: |
| return true; |
| |
| case TGSI_PROCESSOR_TESS_EVAL: |
| return io == io_in ? true : (ctx->key->gs_present ? true : false); |
| |
| case TGSI_PROCESSOR_GEOMETRY: |
| return io == io_in; |
| |
| case TGSI_PROCESSOR_VERTEX: |
| if (io == io_in) |
| return false; |
| return (ctx->key->gs_present || ctx->key->tes_present); |
| |
| default: |
| return false; |
| } |
| } |
| |
| static inline const char *get_string(enum vrend_type_qualifier key) |
| { |
| if (key >= ARRAY_SIZE(conversion_table)) { |
| printf("Unable to find the correct conversion\n"); |
| return conversion_table[TYPE_CONVERSION_NONE].string; |
| } |
| |
| return conversion_table[key].string; |
| } |
| |
| static inline const char *get_wm_string(unsigned wm) |
| { |
| switch(wm) { |
| case TGSI_WRITEMASK_NONE: |
| return ""; |
| case TGSI_WRITEMASK_X: |
| return ".x"; |
| case TGSI_WRITEMASK_XY: |
| return ".xy"; |
| case TGSI_WRITEMASK_XYZ: |
| return ".xyz"; |
| case TGSI_WRITEMASK_W: |
| return ".w"; |
| default: |
| printf("Unable to unknown writemask\n"); |
| return ""; |
| } |
| } |
| |
| static inline const char *get_swizzle_string(uint8_t swizzle) |
| { |
| switch (swizzle) { |
| case PIPE_SWIZZLE_RED: return ".x"; |
| case PIPE_SWIZZLE_GREEN: return ".y"; |
| case PIPE_SWIZZLE_BLUE: return ".z"; |
| case PIPE_SWIZZLE_ALPHA: return ".w"; |
| case PIPE_SWIZZLE_ZERO: |
| case PIPE_SWIZZLE_ONE: return ".0"; |
| default: |
| assert(0); |
| return ""; |
| } |
| } |
| |
| const char *get_internalformat_string(int virgl_format, enum tgsi_return_type *stype); |
| |
| static inline const char *tgsi_proc_to_prefix(int shader_type) |
| { |
| switch (shader_type) { |
| case TGSI_PROCESSOR_VERTEX: return "vs"; |
| case TGSI_PROCESSOR_FRAGMENT: return "fs"; |
| case TGSI_PROCESSOR_GEOMETRY: return "gs"; |
| case TGSI_PROCESSOR_TESS_CTRL: return "tc"; |
| case TGSI_PROCESSOR_TESS_EVAL: return "te"; |
| case TGSI_PROCESSOR_COMPUTE: return "cs"; |
| default: |
| return NULL; |
| }; |
| } |
| |
| static inline const char *prim_to_name(int prim) |
| { |
| switch (prim) { |
| case PIPE_PRIM_POINTS: return "points"; |
| case PIPE_PRIM_LINES: return "lines"; |
| case PIPE_PRIM_LINE_STRIP: return "line_strip"; |
| case PIPE_PRIM_LINES_ADJACENCY: return "lines_adjacency"; |
| case PIPE_PRIM_TRIANGLES: return "triangles"; |
| case PIPE_PRIM_TRIANGLE_STRIP: return "triangle_strip"; |
| case PIPE_PRIM_TRIANGLES_ADJACENCY: return "triangles_adjacency"; |
| case PIPE_PRIM_QUADS: return "quads"; |
| default: return "UNKNOWN"; |
| }; |
| } |
| |
| static inline const char *prim_to_tes_name(int prim) |
| { |
| switch (prim) { |
| case PIPE_PRIM_QUADS: return "quads"; |
| case PIPE_PRIM_TRIANGLES: return "triangles"; |
| case PIPE_PRIM_LINES: return "isolines"; |
| default: return "UNKNOWN"; |
| } |
| } |
| |
| static inline const char *blend_to_name(enum gl_advanced_blend_mode blend) |
| { |
| switch (blend) { |
| case BLEND_MULTIPLY: return "multiply"; |
| case BLEND_SCREEN: return "screen"; |
| case BLEND_OVERLAY: return "overlay"; |
| case BLEND_DARKEN: return "darken"; |
| case BLEND_LIGHTEN: return "lighten"; |
| case BLEND_COLORDODGE: return "colordodge"; |
| case BLEND_COLORBURN: return "colorburn"; |
| case BLEND_HARDLIGHT: return "hardlight"; |
| case BLEND_SOFTLIGHT: return "softlight"; |
| case BLEND_DIFFERENCE: return "difference"; |
| case BLEND_EXCLUSION: return "exclusion"; |
| case BLEND_HSL_HUE: return "hsl_hue"; |
| case BLEND_HSL_SATURATION: return "hsl_saturation"; |
| case BLEND_HSL_COLOR: return "hsl_color"; |
| case BLEND_HSL_LUMINOSITY: return "hsl_luminosity"; |
| case BLEND_ALL: return "all_equations"; |
| default: return "UNKNOWN"; |
| }; |
| } |
| |
| static const char *get_spacing_string(int spacing) |
| { |
| switch (spacing) { |
| case PIPE_TESS_SPACING_FRACTIONAL_ODD: |
| return "fractional_odd_spacing"; |
| case PIPE_TESS_SPACING_FRACTIONAL_EVEN: |
| return "fractional_even_spacing"; |
| case PIPE_TESS_SPACING_EQUAL: |
| default: |
| return "equal_spacing"; |
| } |
| } |
| |
| static inline int gs_input_prim_to_size(int prim) |
| { |
| switch (prim) { |
| case PIPE_PRIM_POINTS: return 1; |
| case PIPE_PRIM_LINES: return 2; |
| case PIPE_PRIM_LINES_ADJACENCY: return 4; |
| case PIPE_PRIM_TRIANGLES: return 3; |
| case PIPE_PRIM_TRIANGLES_ADJACENCY: return 6; |
| default: return -1; |
| }; |
| } |
| |
| static inline bool fs_emit_layout(const struct dump_ctx *ctx) |
| { |
| if (ctx->fs_integer_pixel_center) |
| return true; |
| |
| /* if fs_lower_left_origin is 0 and lower_left_origin is 0 - emit origin_upper_left, |
| if fs_lower_left_origin is 0 and lower_left_origin is 1 - emit nothing (lower) |
| if fs_lower_left_origin is 1 and lower_left_origin is 0 - emit nothing (lower) |
| if fs_lower_left_origin is 1 and lower_left_origin is 1 - emit origin_upper_left */ |
| return ctx->fs_lower_left_origin == ctx->key->fs.lower_left_origin; |
| } |
| |
| static const char *get_stage_input_name_prefix(const struct dump_ctx *ctx, int processor) |
| { |
| const char *name_prefix; |
| switch (processor) { |
| case TGSI_PROCESSOR_FRAGMENT: |
| if (ctx->key->gs_present) |
| name_prefix = "gso"; |
| else if (ctx->key->tes_present) |
| name_prefix = "teo"; |
| else |
| name_prefix = "vso"; |
| break; |
| case TGSI_PROCESSOR_GEOMETRY: |
| if (ctx->key->tes_present) |
| name_prefix = "teo"; |
| else |
| name_prefix = "vso"; |
| break; |
| case TGSI_PROCESSOR_TESS_EVAL: |
| if (ctx->key->tcs_present) |
| name_prefix = "tco"; |
| else |
| name_prefix = "vso"; |
| break; |
| case TGSI_PROCESSOR_TESS_CTRL: |
| name_prefix = "vso"; |
| break; |
| case TGSI_PROCESSOR_VERTEX: |
| default: |
| name_prefix = "in"; |
| break; |
| } |
| return name_prefix; |
| } |
| |
| static const char *get_stage_output_name_prefix(int processor) |
| { |
| const char *name_prefix; |
| switch (processor) { |
| case TGSI_PROCESSOR_FRAGMENT: |
| name_prefix = "fsout"; |
| break; |
| case TGSI_PROCESSOR_GEOMETRY: |
| name_prefix = "gso"; |
| break; |
| case TGSI_PROCESSOR_VERTEX: |
| name_prefix = "vso"; |
| break; |
| case TGSI_PROCESSOR_TESS_CTRL: |
| name_prefix = "tco"; |
| break; |
| case TGSI_PROCESSOR_TESS_EVAL: |
| name_prefix = "teo"; |
| break; |
| default: |
| name_prefix = "out"; |
| break; |
| } |
| return name_prefix; |
| } |
| |
| static int require_glsl_ver(const struct dump_ctx *ctx, int glsl_ver) |
| { |
| return glsl_ver > ctx->glsl_ver_required ? glsl_ver : ctx->glsl_ver_required; |
| } |
| |
| static void emit_indent(struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| if (glsl_strbufs->indent_level > 0) { |
| /* very high levels of indentation doesn't improve readability */ |
| int indent_level = MIN2(glsl_strbufs->indent_level, 15); |
| char buf[16]; |
| memset(buf, '\t', indent_level); |
| buf[indent_level] = '\0'; |
| strbuf_append(&glsl_strbufs->glsl_main, buf); |
| } |
| } |
| |
| static void emit_buf(struct vrend_glsl_strbufs *glsl_strbufs, const char *buf) |
| { |
| emit_indent(glsl_strbufs); |
| strbuf_append(&glsl_strbufs->glsl_main, buf); |
| } |
| |
| static void indent_buf(struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| glsl_strbufs->indent_level++; |
| } |
| |
| static void outdent_buf(struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| if (glsl_strbufs->indent_level <= 0) { |
| strbuf_set_error(&glsl_strbufs->glsl_main); |
| return; |
| } |
| glsl_strbufs->indent_level--; |
| } |
| |
| static void set_buf_error(struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| strbuf_set_error(&glsl_strbufs->glsl_main); |
| } |
| |
| __attribute__((format(printf, 2, 3))) |
| static void emit_buff(struct vrend_glsl_strbufs *glsl_strbufs, const char *fmt, ...) |
| { |
| va_list va; |
| va_start(va, fmt); |
| emit_indent(glsl_strbufs); |
| strbuf_vappendf(&glsl_strbufs->glsl_main, fmt, va); |
| va_end(va); |
| } |
| |
| static void emit_hdr(struct vrend_glsl_strbufs *glsl_strbufs, const char *buf) |
| { |
| strbuf_append(&glsl_strbufs->glsl_hdr, buf); |
| } |
| |
| static void set_hdr_error(struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| strbuf_set_error(&glsl_strbufs->glsl_hdr); |
| } |
| |
| __attribute__((format(printf, 2, 3))) |
| static void emit_hdrf(struct vrend_glsl_strbufs *glsl_strbufs, const char *fmt, ...) |
| { |
| va_list va; |
| va_start(va, fmt); |
| strbuf_vappendf(&glsl_strbufs->glsl_hdr, fmt, va); |
| va_end(va); |
| } |
| |
| static void emit_ver_ext(struct vrend_glsl_strbufs *glsl_strbufs, const char *buf) |
| { |
| strbuf_append(&glsl_strbufs->glsl_ver_ext, buf); |
| } |
| |
| __attribute__((format(printf, 2, 3))) |
| static void emit_ver_extf(struct vrend_glsl_strbufs *glsl_strbufs, const char *fmt, ...) |
| { |
| va_list va; |
| va_start(va, fmt); |
| strbuf_vappendf(&glsl_strbufs->glsl_ver_ext, fmt, va); |
| va_end(va); |
| } |
| |
| static bool allocate_temp_range(struct vrend_temp_range **temp_ranges, uint32_t *num_temp_ranges, int first, int last, |
| int array_id) |
| { |
| int idx = *num_temp_ranges; |
| |
| if (array_id > 0) { |
| |
| *temp_ranges = realloc(*temp_ranges, sizeof(struct vrend_temp_range) * (idx + 1)); |
| if (unlikely(!*temp_ranges)) |
| return false; |
| |
| (*temp_ranges)[idx].first = first; |
| (*temp_ranges)[idx].last = last; |
| (*temp_ranges)[idx].array_id = array_id; |
| (*temp_ranges)[idx].precise_result = false; |
| (*num_temp_ranges)++; |
| } else { |
| int ntemps = last - first + 1; |
| *temp_ranges = realloc(*temp_ranges, sizeof(struct vrend_temp_range) * (idx + ntemps)); |
| if (unlikely(!*temp_ranges)) |
| return false; |
| |
| for (int i = 0; i < ntemps; ++i) { |
| (*temp_ranges)[idx + i].first = first + i; |
| (*temp_ranges)[idx + i].last = first + i; |
| (*temp_ranges)[idx + i].array_id = 0; |
| (*temp_ranges)[idx + i].precise_result = false; |
| } |
| (*num_temp_ranges) += ntemps; |
| |
| |
| } |
| return true; |
| } |
| |
| static struct vrend_temp_range *find_temp_range(const struct dump_ctx *ctx, int index) |
| { |
| uint32_t i; |
| for (i = 0; i < ctx->num_temp_ranges; i++) { |
| if (index >= ctx->temp_ranges[i].first && |
| index <= ctx->temp_ranges[i].last) |
| return &ctx->temp_ranges[i]; |
| } |
| return NULL; |
| } |
| |
| static bool samplertype_is_shadow(int sampler_type) |
| { |
| switch (sampler_type) { |
| case TGSI_TEXTURE_SHADOW1D: |
| case TGSI_TEXTURE_SHADOW1D_ARRAY: |
| case TGSI_TEXTURE_SHADOW2D: |
| case TGSI_TEXTURE_SHADOWRECT: |
| case TGSI_TEXTURE_SHADOW2D_ARRAY: |
| case TGSI_TEXTURE_SHADOWCUBE: |
| case TGSI_TEXTURE_SHADOWCUBE_ARRAY: |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| static uint32_t samplertype_to_req_bits(int sampler_type) |
| { |
| |
| switch (sampler_type) { |
| case TGSI_TEXTURE_SHADOWCUBE_ARRAY: |
| case TGSI_TEXTURE_CUBE_ARRAY: |
| return SHADER_REQ_CUBE_ARRAY; |
| case TGSI_TEXTURE_2D_MSAA: |
| case TGSI_TEXTURE_2D_ARRAY_MSAA: |
| return SHADER_REQ_SAMPLER_MS; |
| case TGSI_TEXTURE_BUFFER: |
| return SHADER_REQ_SAMPLER_BUF; |
| case TGSI_TEXTURE_SHADOWRECT: |
| case TGSI_TEXTURE_RECT: |
| return SHADER_REQ_SAMPLER_RECT; |
| default: |
| return 0; |
| } |
| } |
| |
| // TODO Consider exposing non-const ctx-> members as args to make *ctx const |
| static bool add_images(struct dump_ctx *ctx, int first, int last, |
| const struct tgsi_declaration_image *img_decl) |
| { |
| int i; |
| |
| const struct util_format_description *descr = util_format_description(img_decl->Format); |
| if (descr->nr_channels == 2 && |
| descr->swizzle[0] == UTIL_FORMAT_SWIZZLE_X && |
| descr->swizzle[1] == UTIL_FORMAT_SWIZZLE_Y && |
| descr->swizzle[2] == UTIL_FORMAT_SWIZZLE_0 && |
| descr->swizzle[3] == UTIL_FORMAT_SWIZZLE_1) { |
| ctx->shader_req_bits |= SHADER_REQ_NV_IMAGE_FORMATS; |
| } else if (img_decl->Format == PIPE_FORMAT_R11G11B10_FLOAT || |
| img_decl->Format == PIPE_FORMAT_R10G10B10A2_UINT || |
| img_decl->Format == PIPE_FORMAT_R10G10B10A2_UNORM || |
| img_decl->Format == PIPE_FORMAT_R16G16B16A16_UNORM|| |
| img_decl->Format == PIPE_FORMAT_R16G16B16A16_SNORM) |
| ctx->shader_req_bits |= SHADER_REQ_NV_IMAGE_FORMATS; |
| else if (descr->nr_channels == 1 && |
| descr->swizzle[0] == UTIL_FORMAT_SWIZZLE_X && |
| descr->swizzle[1] == UTIL_FORMAT_SWIZZLE_0 && |
| descr->swizzle[2] == UTIL_FORMAT_SWIZZLE_0 && |
| descr->swizzle[3] == UTIL_FORMAT_SWIZZLE_1 && |
| (descr->channel[0].size == 8 || descr->channel[0].size ==16)) |
| ctx->shader_req_bits |= SHADER_REQ_NV_IMAGE_FORMATS; |
| |
| for (i = first; i <= last; i++) { |
| ctx->images[i].decl = *img_decl; |
| ctx->images[i].vflag = false; |
| ctx->images_used_mask |= (1 << i); |
| |
| if (!samplertype_is_shadow(ctx->images[i].decl.Resource)) |
| ctx->shader_req_bits |= samplertype_to_req_bits(ctx->images[i].decl.Resource); |
| } |
| |
| if (ctx->info.indirect_files & (1 << TGSI_FILE_IMAGE)) { |
| if (ctx->num_image_arrays) { |
| struct vrend_array *last_array = &ctx->image_arrays[ctx->num_image_arrays - 1]; |
| /* |
| * If this set of images is consecutive to the last array, |
| * and has compatible return and decls, then increase the array size. |
| */ |
| if ((last_array->first + last_array->array_size == first) && |
| !memcmp(&ctx->images[last_array->first].decl, &ctx->images[first].decl, sizeof(ctx->images[first].decl)) && |
| ctx->images[last_array->first].image_return == ctx->images[first].image_return) { |
| last_array->array_size += last - first + 1; |
| if (ctx->image_last_binding < last) |
| ctx->image_last_binding = last; |
| return true; |
| } |
| } |
| |
| /* allocate a new image array for this range of images */ |
| ctx->num_image_arrays++; |
| ctx->image_arrays = realloc(ctx->image_arrays, sizeof(struct vrend_array) * ctx->num_image_arrays); |
| if (!ctx->image_arrays) |
| return false; |
| ctx->image_arrays[ctx->num_image_arrays - 1].first = first; |
| ctx->image_arrays[ctx->num_image_arrays - 1].array_size = last - first + 1; |
| } |
| |
| if (ctx->image_last_binding < last) |
| ctx->image_last_binding = last; |
| return true; |
| } |
| |
| // TODO Consider exposing non-const ctx-> members as args to make *ctx const |
| static bool add_sampler_array(struct dump_ctx *ctx, int first, int last) |
| { |
| int idx = ctx->num_sampler_arrays; |
| ctx->num_sampler_arrays++; |
| ctx->sampler_arrays = realloc(ctx->sampler_arrays, sizeof(struct vrend_array) * ctx->num_sampler_arrays); |
| if (!ctx->sampler_arrays) |
| return false; |
| |
| ctx->sampler_arrays[idx].first = first; |
| ctx->sampler_arrays[idx].array_size = last - first + 1; |
| return true; |
| } |
| |
| static int lookup_sampler_array(const struct dump_ctx *ctx, int index) |
| { |
| uint32_t i; |
| for (i = 0; i < ctx->num_sampler_arrays; i++) { |
| int last = ctx->sampler_arrays[i].first + ctx->sampler_arrays[i].array_size - 1; |
| if (index >= ctx->sampler_arrays[i].first && |
| index <= last) { |
| return ctx->sampler_arrays[i].first; |
| } |
| } |
| return -1; |
| } |
| |
| int vrend_shader_lookup_sampler_array(const struct vrend_shader_info *sinfo, int index) |
| { |
| int i; |
| for (i = 0; i < sinfo->num_sampler_arrays; i++) { |
| int last = sinfo->sampler_arrays[i].first + sinfo->sampler_arrays[i].array_size - 1; |
| if (index >= sinfo->sampler_arrays[i].first && |
| index <= last) { |
| return sinfo->sampler_arrays[i].first; |
| } |
| } |
| return -1; |
| } |
| |
| // TODO Consider exposing non-const ctx-> members as args to make *ctx const |
| static bool add_samplers(struct dump_ctx *ctx, int first, int last, int sview_type, enum tgsi_return_type sview_rtype) |
| { |
| if (sview_rtype == TGSI_RETURN_TYPE_SINT || |
| sview_rtype == TGSI_RETURN_TYPE_UINT) |
| ctx->shader_req_bits |= SHADER_REQ_INTS; |
| |
| for (int i = first; i <= last; i++) { |
| ctx->samplers[i].tgsi_sampler_return = sview_rtype; |
| ctx->samplers[i].tgsi_sampler_type = sview_type; |
| } |
| |
| if (ctx->info.indirect_files & (1 << TGSI_FILE_SAMPLER)) { |
| if (ctx->num_sampler_arrays) { |
| struct vrend_array *last_array = &ctx->sampler_arrays[ctx->num_sampler_arrays - 1]; |
| if ((last_array->first + last_array->array_size == first) && |
| ctx->samplers[last_array->first].tgsi_sampler_type == sview_type && |
| ctx->samplers[last_array->first].tgsi_sampler_return == sview_rtype) { |
| last_array->array_size += last - first + 1; |
| return true; |
| } |
| } |
| |
| /* allocate a new image array for this range of images */ |
| return add_sampler_array(ctx, first, last); |
| } |
| return true; |
| } |
| |
| typedef enum |
| { |
| VARYING_SLOT_POS, |
| VARYING_SLOT_COL0, /* COL0 and COL1 must be contiguous */ |
| VARYING_SLOT_COL1, |
| VARYING_SLOT_FOGC, |
| VARYING_SLOT_TEX0, /* TEX0-TEX7 must be contiguous */ |
| VARYING_SLOT_TEX1, |
| VARYING_SLOT_TEX2, |
| VARYING_SLOT_TEX3, |
| VARYING_SLOT_TEX4, |
| VARYING_SLOT_TEX5, |
| VARYING_SLOT_TEX6, |
| VARYING_SLOT_TEX7, |
| VARYING_SLOT_PSIZ, /* Does not appear in FS */ |
| VARYING_SLOT_BFC0, /* Does not appear in FS */ |
| VARYING_SLOT_BFC1, /* Does not appear in FS */ |
| VARYING_SLOT_EDGE, /* Does not appear in FS */ |
| VARYING_SLOT_CLIP_VERTEX, /* Does not appear in FS */ |
| VARYING_SLOT_CLIP_DIST0, |
| VARYING_SLOT_CLIP_DIST1, |
| VARYING_SLOT_CULL_DIST0, |
| VARYING_SLOT_CULL_DIST1, |
| VARYING_SLOT_PRIMITIVE_ID, /* Does not appear in VS */ |
| VARYING_SLOT_LAYER, /* Appears as VS or GS output */ |
| VARYING_SLOT_VIEWPORT, /* Appears as VS or GS output */ |
| VARYING_SLOT_FACE, /* FS only */ |
| VARYING_SLOT_PNTC, /* FS only */ |
| VARYING_SLOT_TESS_LEVEL_OUTER, /* Only appears as TCS output. */ |
| VARYING_SLOT_TESS_LEVEL_INNER, /* Only appears as TCS output. */ |
| VARYING_SLOT_BOUNDING_BOX0, /* Only appears as TCS output. */ |
| VARYING_SLOT_BOUNDING_BOX1, /* Only appears as TCS output. */ |
| VARYING_SLOT_VIEW_INDEX, |
| VARYING_SLOT_VIEWPORT_MASK, /* Does not appear in FS */ |
| VARYING_SLOT_PRIMITIVE_SHADING_RATE = VARYING_SLOT_FACE, /* Does not appear in FS. */ |
| |
| VARYING_SLOT_PRIMITIVE_COUNT = VARYING_SLOT_TESS_LEVEL_OUTER, /* Only appears in MESH. */ |
| VARYING_SLOT_PRIMITIVE_INDICES = VARYING_SLOT_TESS_LEVEL_INNER, /* Only appears in MESH. */ |
| VARYING_SLOT_TASK_COUNT = VARYING_SLOT_BOUNDING_BOX0, /* Only appears in TASK. */ |
| |
| VARYING_SLOT_VAR0 = 32, /* First generic varying slot */ |
| /* the remaining are simply for the benefit of gl_varying_slot_name() |
| * and not to be construed as an upper bound: |
| */ |
| VARYING_SLOT_VAR1, |
| VARYING_SLOT_VAR2, |
| VARYING_SLOT_VAR3, |
| VARYING_SLOT_VAR4, |
| VARYING_SLOT_VAR5, |
| VARYING_SLOT_VAR6, |
| VARYING_SLOT_VAR7, |
| VARYING_SLOT_VAR8, |
| VARYING_SLOT_VAR9, |
| VARYING_SLOT_VAR10, |
| VARYING_SLOT_VAR11, |
| VARYING_SLOT_VAR12, |
| VARYING_SLOT_VAR13, |
| VARYING_SLOT_VAR14, |
| VARYING_SLOT_VAR15, |
| VARYING_SLOT_VAR16, |
| VARYING_SLOT_VAR17, |
| VARYING_SLOT_VAR18, |
| VARYING_SLOT_VAR19, |
| VARYING_SLOT_VAR20, |
| VARYING_SLOT_VAR21, |
| VARYING_SLOT_VAR22, |
| VARYING_SLOT_VAR23, |
| VARYING_SLOT_VAR24, |
| VARYING_SLOT_VAR25, |
| VARYING_SLOT_VAR26, |
| VARYING_SLOT_VAR27, |
| VARYING_SLOT_VAR28, |
| VARYING_SLOT_VAR29, |
| VARYING_SLOT_VAR30, |
| VARYING_SLOT_VAR31, |
| /* Account for the shift without CAP_TEXCOORD in mesa*/ |
| VARYING_SLOT_PATCH0 = VARYING_SLOT_VAR31 + 9 |
| } gl_varying_slot; |
| |
| static uint32_t |
| varying_bit_from_semantic_and_index(enum tgsi_semantic semantic, int index) |
| { |
| switch (semantic) { |
| case TGSI_SEMANTIC_POSITION: |
| return VARYING_SLOT_POS; |
| case TGSI_SEMANTIC_COLOR: |
| if (index == 0) |
| return VARYING_SLOT_COL0; |
| else |
| return VARYING_SLOT_COL1; |
| case TGSI_SEMANTIC_BCOLOR: |
| if (index == 0) |
| return VARYING_SLOT_BFC0; |
| else |
| return VARYING_SLOT_BFC1; |
| case TGSI_SEMANTIC_FOG: |
| return VARYING_SLOT_FOGC; |
| case TGSI_SEMANTIC_PSIZE: |
| return VARYING_SLOT_PSIZ; |
| case TGSI_SEMANTIC_GENERIC: |
| return VARYING_SLOT_VAR0 + index; |
| case TGSI_SEMANTIC_FACE: |
| return VARYING_SLOT_FACE; |
| case TGSI_SEMANTIC_EDGEFLAG: |
| return VARYING_SLOT_EDGE; |
| case TGSI_SEMANTIC_PRIMID: |
| return VARYING_SLOT_PRIMITIVE_ID; |
| case TGSI_SEMANTIC_CLIPDIST: |
| if (index == 0) |
| return VARYING_SLOT_CLIP_DIST0; |
| else |
| return VARYING_SLOT_CLIP_DIST1; |
| case TGSI_SEMANTIC_CLIPVERTEX: |
| return VARYING_SLOT_CLIP_VERTEX; |
| case TGSI_SEMANTIC_TEXCOORD: |
| assert(index < 8); |
| return (VARYING_SLOT_TEX0 + index); |
| case TGSI_SEMANTIC_PCOORD: |
| return VARYING_SLOT_PNTC; |
| case TGSI_SEMANTIC_VIEWPORT_INDEX: |
| return VARYING_SLOT_VIEWPORT; |
| case TGSI_SEMANTIC_LAYER: |
| return VARYING_SLOT_LAYER; |
| case TGSI_SEMANTIC_TESSINNER: |
| return VARYING_SLOT_TESS_LEVEL_INNER; |
| case TGSI_SEMANTIC_TESSOUTER: |
| return VARYING_SLOT_TESS_LEVEL_OUTER; |
| case TGSI_SEMANTIC_PATCH: |
| return VARYING_SLOT_PATCH0 + index; |
| default: |
| vrend_printf("Warning: Bad TGSI semantic: %d/%d\n", semantic, index); |
| return 0; |
| } |
| } |
| |
| static struct vrend_array *lookup_image_array_ptr(const struct dump_ctx *ctx, int index) |
| { |
| uint32_t i; |
| for (i = 0; i < ctx->num_image_arrays; i++) { |
| if (index >= ctx->image_arrays[i].first && |
| index <= ctx->image_arrays[i].first + ctx->image_arrays[i].array_size - 1) { |
| return &ctx->image_arrays[i]; |
| } |
| } |
| return NULL; |
| } |
| |
| static int lookup_image_array(const struct dump_ctx *ctx, int index) |
| { |
| struct vrend_array *image = lookup_image_array_ptr(ctx, index); |
| return image ? image->first : -1; |
| } |
| |
| static boolean |
| iter_decls(struct tgsi_iterate_context *iter, |
| struct tgsi_full_declaration *decl) |
| { |
| struct dump_ctx *ctx = (struct dump_ctx *)iter; |
| switch (decl->Declaration.File) { |
| case TGSI_FILE_INPUT: |
| /* Tag used semantic fog inputs */ |
| if (decl->Semantic.Name == TGSI_SEMANTIC_FOG) { |
| ctx->fog_input_mask |= (1 << decl->Semantic.Index); |
| } |
| |
| if (ctx->prog_type == TGSI_PROCESSOR_FRAGMENT) { |
| for (uint32_t j = 0; j < ctx->num_inputs; j++) { |
| if (ctx->inputs[j].name == decl->Semantic.Name && |
| ctx->inputs[j].sid == decl->Semantic.Index && |
| ctx->inputs[j].first == decl->Range.First) |
| return true; |
| } |
| ctx->inputs[ctx->num_inputs].name = decl->Semantic.Name; |
| ctx->inputs[ctx->num_inputs].first = decl->Range.First; |
| ctx->inputs[ctx->num_inputs].last = decl->Range.Last; |
| ctx->num_inputs++; |
| } |
| break; |
| |
| case TGSI_FILE_OUTPUT: |
| if (decl->Semantic.Name == TGSI_SEMANTIC_FOG) { |
| ctx->fog_output_mask |= (1 << decl->Semantic.Index); |
| } |
| break; |
| case TGSI_FILE_BUFFER: |
| if (ctx->ssbo_first_binding > decl->Range.First) |
| ctx->ssbo_first_binding = decl->Range.First; |
| default: |
| break; |
| } |
| return true; |
| } |
| |
| static bool logiop_require_inout(const struct vrend_shader_key *key) |
| { |
| if (!key->fs.logicop_enabled) |
| return false; |
| |
| switch (key->fs.logicop_func) { |
| case PIPE_LOGICOP_CLEAR: |
| case PIPE_LOGICOP_SET: |
| case PIPE_LOGICOP_COPY: |
| case PIPE_LOGICOP_COPY_INVERTED: |
| return false; |
| default: |
| return true; |
| } |
| } |
| |
| static enum vec_type get_type(uint32_t signed_int_mask, |
| uint32_t unsigned_int_mask, |
| int bit) |
| { |
| if (signed_int_mask & (1 << bit)) |
| return VEC_INT; |
| else if (unsigned_int_mask & (1 << bit)) |
| return VEC_UINT; |
| else |
| return VEC_FLOAT; |
| } |
| |
| static struct vrend_shader_io * |
| find_overlapping_io(struct vrend_shader_io io[static 64], |
| uint32_t num_io, |
| const struct tgsi_full_declaration *decl) |
| { |
| for (uint32_t j = 0; j < num_io - 1; j++) { |
| if (io[j].interpolate == decl->Interp.Interpolate && |
| io[j].name == decl->Semantic.Name && |
| ((io[j].first <= decl->Range.First && |
| io[j].last > decl->Range.First) || |
| (io[j].first < decl->Range.Last && |
| io[j].last >= decl->Range.Last))) { |
| return &io[j]; |
| } |
| } |
| return NULL; |
| } |
| |
| static void |
| map_overlapping_io_array(struct vrend_shader_io io[static 64], |
| struct vrend_shader_io *new_io, |
| uint32_t num_io, |
| const struct tgsi_full_declaration *decl) |
| { |
| struct vrend_shader_io *overlap_io = find_overlapping_io(io, num_io, decl); |
| if (overlap_io && !overlap_io->needs_override) { |
| int delta = new_io->first - overlap_io->first; |
| if (delta >= 0) { |
| new_io->array_offset = delta; |
| new_io->overlapping_array = overlap_io; |
| overlap_io->last = MAX2(overlap_io->last, new_io->last); |
| } else if (delta < 0) { |
| overlap_io->overlapping_array = new_io; |
| overlap_io->array_offset = -delta; |
| new_io->last = MAX2(overlap_io->last, new_io->last); |
| } |
| overlap_io->usage_mask |= new_io->usage_mask; |
| new_io->usage_mask = overlap_io->usage_mask; |
| } |
| } |
| |
| struct syvalue_prop_map { |
| const char *glsl_name; |
| uint64_t required_ext; |
| bool override_no_wm; |
| } sysvalue_map[TGSI_SEMANTIC_COUNT] = { |
| [TGSI_SEMANTIC_INSTANCEID] = {"gl_InstanceID", SHADER_REQ_INSTANCE_ID | SHADER_REQ_INTS, true}, |
| [TGSI_SEMANTIC_VERTEXID] = {"gl_VertexID", SHADER_REQ_INTS, true}, |
| [TGSI_SEMANTIC_HELPER_INVOCATION] = {"gl_HelperInvocation", SHADER_REQ_ES31_COMPAT, true}, |
| [TGSI_SEMANTIC_SAMPLEID] = {"gl_SampleID", SHADER_REQ_SAMPLE_SHADING | SHADER_REQ_INTS, true}, |
| [TGSI_SEMANTIC_SAMPLEPOS] = { "gl_SamplePosition", SHADER_REQ_SAMPLE_SHADING, true}, |
| [TGSI_SEMANTIC_INVOCATIONID] = { "gl_InvocationID", SHADER_REQ_INTS | SHADER_REQ_GPU_SHADER5, true}, |
| [TGSI_SEMANTIC_SAMPLEMASK] = {"gl_SampleMaskIn[0]", SHADER_REQ_INTS | SHADER_REQ_GPU_SHADER5, true}, |
| [TGSI_SEMANTIC_PRIMID] = {"gl_PrimitiveID", SHADER_REQ_INTS | SHADER_REQ_GPU_SHADER5, true}, |
| [TGSI_SEMANTIC_TESSCOORD] = {"gl_TessCoord", SHADER_REQ_NONE, false}, |
| [TGSI_SEMANTIC_VERTICESIN] = { "gl_PatchVerticesIn", SHADER_REQ_INTS, true}, |
| [TGSI_SEMANTIC_TESSOUTER] = {"gl_TessLevelOuter", SHADER_REQ_NONE, true}, |
| [TGSI_SEMANTIC_TESSINNER] = {"gl_TessLevelInner", SHADER_REQ_NONE, true}, |
| [TGSI_SEMANTIC_THREAD_ID] = {"gl_LocalInvocationID", SHADER_REQ_NONE, false}, |
| [TGSI_SEMANTIC_BLOCK_ID] = {"gl_WorkGroupID", SHADER_REQ_NONE, false}, |
| [TGSI_SEMANTIC_GRID_SIZE]= {"gl_NumWorkGroups", SHADER_REQ_NONE, false}, |
| [TGSI_SEMANTIC_BASEVERTEX]= {"gl_BaseVertexARB", SHADER_REQ_SHADER_DRAW_PARAMETERS | SHADER_REQ_INTS, true}, |
| [TGSI_SEMANTIC_BASEINSTANCE]= {"gl_BaseInstanceARB", SHADER_REQ_SHADER_DRAW_PARAMETERS | SHADER_REQ_INTS, true}, |
| [TGSI_SEMANTIC_DRAWID]= {"gl_DrawIDARB + drawid_base", SHADER_REQ_SHADER_DRAW_PARAMETERS | SHADER_REQ_INTS, true}, |
| }; |
| |
| |
| static boolean |
| iter_declaration(struct tgsi_iterate_context *iter, |
| struct tgsi_full_declaration *decl) |
| { |
| struct dump_ctx *ctx = (struct dump_ctx *)iter; |
| int i; |
| int color_offset = 0; |
| const char *name_prefix; |
| bool add_two_side = false; |
| |
| switch (decl->Declaration.File) { |
| case TGSI_FILE_INPUT: |
| for (uint32_t j = 0; j < ctx->num_inputs; j++) { |
| if (ctx->inputs[j].name == decl->Semantic.Name && |
| ctx->inputs[j].sid == decl->Semantic.Index && |
| ctx->inputs[j].first == decl->Range.First && |
| ((!decl->Declaration.Array && ctx->inputs[j].array_id == 0) || |
| (ctx->inputs[j].array_id == decl->Array.ArrayID))) { |
| return true; |
| } |
| } |
| |
| i = ctx->num_inputs++; |
| if (ctx->num_inputs > ARRAY_SIZE(ctx->inputs)) { |
| vrend_printf( "Number of inputs exceeded, max is %lu\n", ARRAY_SIZE(ctx->inputs)); |
| return false; |
| } |
| |
| if (unlikely(decl->Range.First > decl->Range.Last)) { |
| vrend_printf("Wrong range: First (%u) > Last (%u)\n", decl->Range.First, decl->Range.Last); |
| return false; |
| } |
| |
| if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX) { |
| ctx->attrib_input_mask |= (1 << decl->Range.First); |
| ctx->inputs[i].type = get_type(ctx->key->vs.attrib_signed_int_bitmask, |
| ctx->key->vs.attrib_unsigned_int_bitmask, |
| decl->Range.First); |
| } |
| ctx->inputs[i].name = decl->Semantic.Name; |
| ctx->inputs[i].sid = decl->Semantic.Index; |
| ctx->inputs[i].interpolate = decl->Interp.Interpolate; |
| ctx->inputs[i].location = decl->Interp.Location; |
| ctx->inputs[i].first = decl->Range.First; |
| ctx->inputs[i].last = decl->Range.Last; |
| ctx->inputs[i].array_id = decl->Declaration.Array ? decl->Array.ArrayID : 0; |
| ctx->inputs[i].usage_mask = decl->Declaration.UsageMask; |
| ctx->inputs[i].num_components = 4; |
| |
| ctx->inputs[i].glsl_predefined_no_emit = false; |
| ctx->inputs[i].glsl_no_index = false; |
| ctx->inputs[i].override_no_wm = ctx->inputs[i].num_components == 1; |
| ctx->inputs[i].glsl_gl_block = false; |
| ctx->inputs[i].overlapping_array = NULL; |
| |
| if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| if (decl->Interp.Location == TGSI_INTERPOLATE_LOC_SAMPLE) { |
| ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5; |
| ctx->has_sample_input = true; |
| } |
| if (decl->Interp.Interpolate == TGSI_INTERPOLATE_LINEAR && ctx->cfg->use_gles && |
| ctx->cfg->has_nopersective) { |
| ctx->shader_req_bits |= SHADER_REQ_SHADER_NOPERSPECTIVE_INTERPOLATION; |
| ctx->has_noperspective = true; |
| } |
| } |
| |
| map_overlapping_io_array(ctx->inputs, &ctx->inputs[i], ctx->num_inputs, decl); |
| |
| if (!ctx->inputs[i].glsl_predefined_no_emit) { |
| |
| /* If the output of the previous shader contained arrays we |
| * have to check whether a non-array input here should be part |
| * of an array */ |
| for (uint32_t j = 0; j < ctx->key->in_arrays.num_arrays; j++) { |
| const struct vrend_shader_io_array *array = &ctx->key->in_arrays.layout[j]; |
| |
| if (array->name == decl->Semantic.Name && |
| array->sid <= decl->Semantic.Index && |
| array->sid + array->size >= decl->Semantic.Index) { |
| ctx->inputs[i].sid = array->sid; |
| ctx->inputs[i].last = MAX2(ctx->inputs[i].first + array->size, ctx->inputs[i].last); |
| break; |
| } |
| } |
| } |
| |
| if (ctx->inputs[i].first != ctx->inputs[i].last) |
| ctx->glsl_ver_required = require_glsl_ver(ctx, 150); |
| |
| name_prefix = get_stage_input_name_prefix(ctx, iter->processor.Processor); |
| |
| switch (ctx->inputs[i].name) { |
| case TGSI_SEMANTIC_COLOR: |
| if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| if (ctx->glsl_ver_required < 140) { |
| if (decl->Semantic.Index == 0) |
| name_prefix = "gl_Color"; |
| else if (decl->Semantic.Index == 1) |
| name_prefix = "gl_SecondaryColor"; |
| else |
| vrend_printf( "got illegal color semantic index %d\n", decl->Semantic.Index); |
| ctx->inputs[i].glsl_no_index = true; |
| } else { |
| if (ctx->key->color_two_side) { |
| int j = ctx->num_inputs++; |
| if (ctx->num_inputs > ARRAY_SIZE(ctx->inputs)) { |
| vrend_printf( "Number of inputs exceeded, max is %lu\n", ARRAY_SIZE(ctx->inputs)); |
| return false; |
| } |
| |
| ctx->inputs[j].name = TGSI_SEMANTIC_BCOLOR; |
| ctx->inputs[j].sid = decl->Semantic.Index; |
| ctx->inputs[j].interpolate = decl->Interp.Interpolate; |
| ctx->inputs[j].location = decl->Interp.Location; |
| ctx->inputs[j].first = decl->Range.First; |
| ctx->inputs[j].last = decl->Range.Last; |
| ctx->inputs[j].glsl_predefined_no_emit = false; |
| ctx->inputs[j].glsl_no_index = false; |
| ctx->inputs[j].override_no_wm = false; |
| |
| ctx->color_in_mask |= (1 << decl->Semantic.Index); |
| |
| if (ctx->front_face_emitted == false) { |
| int k = ctx->num_inputs++; |
| if (ctx->num_inputs >= ARRAY_SIZE(ctx->inputs)) { |
| vrend_printf( "Number of inputs exceeded, max is %lu\n", ARRAY_SIZE(ctx->inputs)); |
| return false; |
| } |
| |
| ctx->inputs[k].name = TGSI_SEMANTIC_FACE; |
| ctx->inputs[k].sid = 0; |
| ctx->inputs[k].interpolate = TGSI_INTERPOLATE_CONSTANT; |
| ctx->inputs[k].location = TGSI_INTERPOLATE_LOC_CENTER; |
| ctx->inputs[k].first = 0; |
| ctx->inputs[k].override_no_wm = false; |
| ctx->inputs[k].glsl_predefined_no_emit = true; |
| ctx->inputs[k].glsl_no_index = true; |
| } |
| add_two_side = true; |
| } |
| } |
| } |
| break; |
| case TGSI_SEMANTIC_PRIMID: |
| if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY) { |
| name_prefix = "gl_PrimitiveIDIn"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->inputs[i].override_no_wm = true; |
| ctx->shader_req_bits |= SHADER_REQ_INTS; |
| } else if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| name_prefix = "gl_PrimitiveID"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->glsl_ver_required = require_glsl_ver(ctx, 150); |
| ctx->shader_req_bits |= SHADER_REQ_GEOMETRY_SHADER; |
| } |
| break; |
| case TGSI_SEMANTIC_VIEWPORT_INDEX: |
| if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->inputs[i].is_int = true; |
| ctx->inputs[i].type = VEC_INT; |
| ctx->inputs[i].override_no_wm = true; |
| name_prefix = "gl_ViewportIndex"; |
| if (ctx->glsl_ver_required >= 140) |
| ctx->shader_req_bits |= SHADER_REQ_LAYER; |
| if (ctx->cfg->use_gles) |
| ctx->shader_req_bits |= SHADER_REQ_VIEWPORT_IDX; |
| } |
| break; |
| case TGSI_SEMANTIC_LAYER: |
| if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| name_prefix = "gl_Layer"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->inputs[i].is_int = true; |
| ctx->inputs[i].type = VEC_INT; |
| ctx->inputs[i].override_no_wm = true; |
| ctx->shader_req_bits |= SHADER_REQ_LAYER; |
| } |
| break; |
| case TGSI_SEMANTIC_PSIZE: |
| if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY || |
| iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL || |
| iter->processor.Processor == TGSI_PROCESSOR_TESS_EVAL) { |
| name_prefix = "gl_PointSize"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->inputs[i].override_no_wm = true; |
| ctx->inputs[i].glsl_gl_block = true; |
| ctx->shader_req_bits |= SHADER_REQ_PSIZE; |
| ctx->has_pointsize_input = true; |
| } |
| break; |
| case TGSI_SEMANTIC_CLIPDIST: |
| if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY || |
| iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL || |
| iter->processor.Processor == TGSI_PROCESSOR_TESS_EVAL) { |
| name_prefix = "gl_ClipDistance"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->inputs[i].glsl_gl_block = true; |
| ctx->num_in_clip_dist += 4 * (ctx->inputs[i].last - ctx->inputs[i].first + 1); |
| ctx->shader_req_bits |= SHADER_REQ_CLIP_DISTANCE; |
| if (ctx->inputs[i].last != ctx->inputs[i].first) |
| ctx->guest_sent_io_arrays = true; |
| } else if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| name_prefix = "gl_ClipDistance"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->num_in_clip_dist += 4 * (ctx->inputs[i].last - ctx->inputs[i].first + 1); |
| ctx->shader_req_bits |= SHADER_REQ_CLIP_DISTANCE; |
| if (ctx->inputs[i].last != ctx->inputs[i].first) |
| ctx->guest_sent_io_arrays = true; |
| } |
| break; |
| case TGSI_SEMANTIC_POSITION: |
| if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY || |
| iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL || |
| iter->processor.Processor == TGSI_PROCESSOR_TESS_EVAL) { |
| name_prefix = "gl_Position"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->inputs[i].glsl_gl_block = true; |
| } else if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| if (ctx->cfg->use_gles && ctx->fs_integer_pixel_center) { |
| name_prefix = "(gl_FragCoord - vec4(0.5, 0.5, 0.0, 0.0))"; |
| } else |
| name_prefix = "gl_FragCoord"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| } |
| break; |
| case TGSI_SEMANTIC_FACE: |
| if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| if (ctx->front_face_emitted) { |
| ctx->num_inputs--; |
| return true; |
| } |
| name_prefix = "gl_FrontFacing"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->front_face_emitted = true; |
| } |
| break; |
| case TGSI_SEMANTIC_PCOORD: |
| if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| if (ctx->cfg->use_gles) { |
| name_prefix = "vec4(gl_PointCoord.x, mix(1.0 - gl_PointCoord.y, gl_PointCoord.y, clamp(winsys_adjust_y, 0.0, 1.0)), 0.0, 1.0)"; |
| ctx->glsl_strbufs.required_sysval_uniform_decls |= BIT(UNIFORM_WINSYS_ADJUST_Y); |
| } else |
| name_prefix = "vec4(gl_PointCoord, 0.0, 1.0)"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->inputs[i].num_components = 4; |
| ctx->inputs[i].usage_mask = 0xf; |
| } |
| break; |
| case TGSI_SEMANTIC_PATCH: |
| if (iter->processor.Processor == TGSI_PROCESSOR_TESS_EVAL) |
| name_prefix = "patch"; |
| /* fallthrough */ |
| case TGSI_SEMANTIC_GENERIC: |
| case TGSI_SEMANTIC_TEXCOORD: |
| if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| if (ctx->key->fs.coord_replace & (1 << ctx->inputs[i].sid)) { |
| if (ctx->cfg->use_gles) { |
| name_prefix = "vec4(gl_PointCoord.x, mix(1.0 - gl_PointCoord.y, gl_PointCoord.y, clamp(winsys_adjust_y, 0.0, 1.0)), 0.0, 1.0)"; |
| ctx->glsl_strbufs.required_sysval_uniform_decls |= BIT(UNIFORM_WINSYS_ADJUST_Y); |
| } else |
| name_prefix = "vec4(gl_PointCoord, 0.0, 1.0)"; |
| ctx->inputs[i].glsl_predefined_no_emit = true; |
| ctx->inputs[i].glsl_no_index = true; |
| ctx->inputs[i].num_components = 4; |
| ctx->inputs[i].usage_mask = 0xf; |
| break; |
| } |
| } |
| if (ctx->inputs[i].first != ctx->inputs[i].last || |
| ctx->inputs[i].array_id > 0) { |
| ctx->guest_sent_io_arrays = true; |
| if (!ctx->cfg->use_gles && |
| (ctx->prog_type == TGSI_PROCESSOR_GEOMETRY || |
| ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL || |
| ctx->prog_type == TGSI_PROCESSOR_TESS_EVAL)) { |
| ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS; |
| } |
| } |
| break; |
| default: |
| vrend_printf("unhandled input semantic: %x\n", ctx->inputs[i].name); |
| break; |
| } |
| |
| if (ctx->inputs[i].glsl_no_index) |
| snprintf(ctx->inputs[i].glsl_name, 128, "%s", name_prefix); |
| else { |
| if (ctx->inputs[i].name == TGSI_SEMANTIC_FOG){ |
| ctx->inputs[i].usage_mask = 0xf; |
| ctx->inputs[i].num_components = 4; |
| ctx->inputs[i].override_no_wm = false; |
| snprintf(ctx->inputs[i].glsl_name, 128, "%s_f%d", name_prefix, ctx->inputs[i].sid); |
| } else if (ctx->inputs[i].name == TGSI_SEMANTIC_COLOR) |
| snprintf(ctx->inputs[i].glsl_name, 128, "%s_c%d", name_prefix, ctx->inputs[i].sid); |
| else if (ctx->inputs[i].name == TGSI_SEMANTIC_BCOLOR) |
| snprintf(ctx->inputs[i].glsl_name, 128, "%s_bc%d", name_prefix, ctx->inputs[i].sid); |
| else if (ctx->inputs[i].name == TGSI_SEMANTIC_GENERIC) |
| snprintf(ctx->inputs[i].glsl_name, 128, "%s_g%d", name_prefix, ctx->inputs[i].sid); |
| else if (ctx->inputs[i].name == TGSI_SEMANTIC_PATCH) |
| snprintf(ctx->inputs[i].glsl_name, 128, "%s%d", name_prefix, ctx->inputs[i].sid); |
| else if (ctx->inputs[i].name == TGSI_SEMANTIC_TEXCOORD) |
| snprintf(ctx->inputs[i].glsl_name, 128, "%s_t%d", name_prefix, ctx->inputs[i].sid); |
| else |
| snprintf(ctx->inputs[i].glsl_name, 128, "%s_%d", name_prefix, ctx->inputs[i].first); |
| } |
| if (add_two_side) { |
| snprintf(ctx->inputs[i + 1].glsl_name, 128, "%s_bc%d", name_prefix, ctx->inputs[i + 1].sid); |
| if (!ctx->front_face_emitted) { |
| snprintf(ctx->inputs[i + 2].glsl_name, 128, "%s", "gl_FrontFacing"); |
| ctx->front_face_emitted = true; |
| } |
| } |
| break; |
| case TGSI_FILE_OUTPUT: |
| for (uint32_t j = 0; j < ctx->num_outputs; j++) { |
| if (ctx->outputs[j].name == decl->Semantic.Name && |
| ctx->outputs[j].sid == decl->Semantic.Index && |
| ctx->outputs[j].first == decl->Range.First && |
| ((!decl->Declaration.Array && ctx->outputs[j].array_id == 0) || |
| (ctx->outputs[j].array_id == decl->Array.ArrayID))) |
| return true; |
| } |
| i = ctx->num_outputs++; |
| if (ctx->num_outputs > ARRAY_SIZE(ctx->outputs)) { |
| vrend_printf( "Number of outputs exceeded, max is %lu\n", ARRAY_SIZE(ctx->outputs)); |
| return false; |
| } |
| |
| if (unlikely(decl->Range.First > decl->Range.Last)) { |
| vrend_printf("Wrong range: First (%u) > Last (%u)\n", decl->Range.First, decl->Range.Last); |
| return false; |
| } |
| |
| ctx->outputs[i].name = decl->Semantic.Name; |
| ctx->outputs[i].sid = decl->Semantic.Index; |
| ctx->outputs[i].interpolate = decl->Interp.Interpolate; |
| ctx->outputs[i].invariant = decl->Declaration.Invariant; |
| ctx->outputs[i].precise = false; |
| ctx->outputs[i].first = decl->Range.First; |
| ctx->outputs[i].last = decl->Range.Last; |
| ctx->outputs[i].array_id = decl->Declaration.Array ? decl->Array.ArrayID : 0; |
| ctx->outputs[i].usage_mask = decl->Declaration.UsageMask; |
| ctx->outputs[i].num_components = 4; |
| ctx->outputs[i].glsl_predefined_no_emit = false; |
| ctx->outputs[i].glsl_no_index = false; |
| ctx->outputs[i].override_no_wm = ctx->outputs[i].num_components == 1; |
| ctx->outputs[i].is_int = false; |
| ctx->outputs[i].fbfetch_used = false; |
| ctx->outputs[i].overlapping_array = NULL; |
| |
| map_overlapping_io_array(ctx->outputs, &ctx->outputs[i], ctx->num_outputs, decl); |
| |
| name_prefix = get_stage_output_name_prefix(iter->processor.Processor); |
| |
| switch (ctx->outputs[i].name) { |
| case TGSI_SEMANTIC_POSITION: |
| if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX || |
| iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY || |
| iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL || |
| iter->processor.Processor == TGSI_PROCESSOR_TESS_EVAL) { |
| if (ctx->outputs[i].first > 0) |
| vrend_printf("Illegal position input\n"); |
| name_prefix = "gl_Position"; |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL) |
| ctx->outputs[i].glsl_gl_block = true; |
| } else if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| name_prefix = "gl_FragDepth"; |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| ctx->outputs[i].override_no_wm = true; |
| } |
| break; |
| case TGSI_SEMANTIC_STENCIL: |
| if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| name_prefix = "gl_FragStencilRefARB"; |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| ctx->outputs[i].override_no_wm = true; |
| ctx->outputs[i].is_int = true; |
| ctx->shader_req_bits |= (SHADER_REQ_INTS | SHADER_REQ_STENCIL_EXPORT); |
| } |
| break; |
| case TGSI_SEMANTIC_CLIPDIST: |
| ctx->shader_req_bits |= SHADER_REQ_CLIP_DISTANCE; |
| name_prefix = "gl_ClipDistance"; |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| ctx->num_out_clip_dist += 4 * (ctx->outputs[i].last - ctx->outputs[i].first + 1); |
| if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX && |
| (ctx->key->gs_present || ctx->key->tcs_present)) |
| ctx->glsl_ver_required = require_glsl_ver(ctx, 150); |
| if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL) |
| ctx->outputs[i].glsl_gl_block = true; |
| if (ctx->outputs[i].last != ctx->outputs[i].first) |
| ctx->guest_sent_io_arrays = true; |
| break; |
| case TGSI_SEMANTIC_CLIPVERTEX: |
| ctx->outputs[i].override_no_wm = true; |
| ctx->outputs[i].invariant = false; |
| if (ctx->glsl_ver_required >= 140) { |
| ctx->has_clipvertex = true; |
| name_prefix = get_stage_output_name_prefix(iter->processor.Processor); |
| } else { |
| name_prefix = "gl_ClipVertex"; |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| } |
| break; |
| case TGSI_SEMANTIC_SAMPLEMASK: |
| if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| ctx->outputs[i].override_no_wm = true; |
| ctx->outputs[i].is_int = true; |
| ctx->shader_req_bits |= (SHADER_REQ_INTS | SHADER_REQ_SAMPLE_SHADING); |
| name_prefix = "gl_SampleMask"; |
| break; |
| } |
| break; |
| case TGSI_SEMANTIC_COLOR: |
| if (iter->processor.Processor == TGSI_PROCESSOR_FRAGMENT) { |
| ctx->outputs[i].type = get_type(ctx->key->fs.cbufs_signed_int_bitmask, |
| ctx->key->fs.cbufs_unsigned_int_bitmask, |
| ctx->outputs[i].sid); |
| name_prefix = ctx->key->fs.logicop_enabled ? "fsout_tmp" : "fsout"; |
| } else { |
| if (ctx->glsl_ver_required < 140) { |
| ctx->outputs[i].glsl_no_index = true; |
| if (ctx->outputs[i].sid == 0) |
| name_prefix = "gl_FrontColor"; |
| else if (ctx->outputs[i].sid == 1) |
| name_prefix = "gl_FrontSecondaryColor"; |
| } else { |
| ctx->color_out_mask |= (1 << decl->Semantic.Index); |
| } |
| } |
| ctx->outputs[i].override_no_wm = false; |
| break; |
| case TGSI_SEMANTIC_BCOLOR: |
| if (ctx->glsl_ver_required < 140) { |
| ctx->outputs[i].glsl_no_index = true; |
| if (ctx->outputs[i].sid == 0) |
| name_prefix = "gl_BackColor"; |
| else if (ctx->outputs[i].sid == 1) |
| name_prefix = "gl_BackSecondaryColor"; |
| } else { |
| ctx->outputs[i].override_no_wm = false; |
| ctx->color_out_mask |= (1 << decl->Semantic.Index) << 2; |
| } |
| break; |
| case TGSI_SEMANTIC_PSIZE: |
| if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX || |
| iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY || |
| iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL || |
| iter->processor.Processor == TGSI_PROCESSOR_TESS_EVAL) { |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| ctx->outputs[i].override_no_wm = true; |
| ctx->shader_req_bits |= SHADER_REQ_PSIZE; |
| name_prefix = "gl_PointSize"; |
| ctx->has_pointsize_output = true; |
| if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL) |
| ctx->outputs[i].glsl_gl_block = true; |
| } |
| break; |
| case TGSI_SEMANTIC_LAYER: |
| if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY || |
| (iter->processor.Processor == TGSI_PROCESSOR_VERTEX && |
| ctx->cfg->has_vs_layer)) { |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| ctx->outputs[i].override_no_wm = true; |
| ctx->outputs[i].is_int = true; |
| name_prefix = "gl_Layer"; |
| if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX) |
| ctx->shader_req_bits |= SHADER_REQ_AMD_VS_LAYER; |
| } |
| break; |
| case TGSI_SEMANTIC_PRIMID: |
| if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY) { |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| ctx->outputs[i].override_no_wm = true; |
| ctx->outputs[i].is_int = true; |
| name_prefix = "gl_PrimitiveID"; |
| } |
| break; |
| case TGSI_SEMANTIC_VIEWPORT_INDEX: |
| if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY || |
| (iter->processor.Processor == TGSI_PROCESSOR_VERTEX && |
| !ctx->cfg->use_gles && ctx->cfg->has_vs_viewport_index)) { |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| ctx->outputs[i].override_no_wm = true; |
| ctx->outputs[i].is_int = true; |
| name_prefix = "gl_ViewportIndex"; |
| if (ctx->glsl_ver_required >= 140 || ctx->cfg->use_gles) { |
| if (iter->processor.Processor == TGSI_PROCESSOR_GEOMETRY) |
| ctx->shader_req_bits |= SHADER_REQ_VIEWPORT_IDX; |
| else { |
| ctx->shader_req_bits |= SHADER_REQ_AMD_VIEWPORT_IDX; |
| } |
| } |
| } |
| break; |
| case TGSI_SEMANTIC_TESSOUTER: |
| if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL) { |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| ctx->outputs[i].override_no_wm = true; |
| name_prefix = "gl_TessLevelOuter"; |
| } |
| break; |
| case TGSI_SEMANTIC_TESSINNER: |
| if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL) { |
| ctx->outputs[i].glsl_predefined_no_emit = true; |
| ctx->outputs[i].glsl_no_index = true; |
| ctx->outputs[i].override_no_wm = true; |
| name_prefix = "gl_TessLevelInner"; |
| } |
| break; |
| case TGSI_SEMANTIC_PATCH: |
| if (iter->processor.Processor == TGSI_PROCESSOR_TESS_CTRL) |
| name_prefix = "patch"; |
| /* fallthrough */ |
| case TGSI_SEMANTIC_GENERIC: |
| case TGSI_SEMANTIC_TEXCOORD: |
| if (iter->processor.Processor == TGSI_PROCESSOR_VERTEX) |
| if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC) |
| color_offset = -1; |
| |
| if (ctx->outputs[i].first != ctx->outputs[i].last || |
| ctx->outputs[i].array_id > 0) { |
| ctx->guest_sent_io_arrays = true; |
| |
| if (!ctx->cfg->use_gles && |
| (ctx->prog_type == TGSI_PROCESSOR_GEOMETRY || |
| ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL || |
| ctx->prog_type == TGSI_PROCESSOR_TESS_EVAL)) { |
| ctx->shader_req_bits |= SHADER_REQ_ARRAYS_OF_ARRAYS; |
| } |
| } |
| break; |
| default: |
| vrend_printf("unhandled output semantic: %x\n", ctx->outputs[i].name); |
| break; |
| } |
| |
| if (ctx->outputs[i].glsl_no_index) |
| snprintf(ctx->outputs[i].glsl_name, 64, "%s", name_prefix); |
| else { |
| if (ctx->outputs[i].name == TGSI_SEMANTIC_FOG) { |
| ctx->outputs[i].usage_mask = 0xf; |
| ctx->outputs[i].num_components = 4; |
| ctx->outputs[i].override_no_wm = false; |
| snprintf(ctx->outputs[i].glsl_name, 64, "%s_f%d", name_prefix, ctx->outputs[i].sid); |
| } else if (ctx->outputs[i].name == TGSI_SEMANTIC_COLOR) |
| snprintf(ctx->outputs[i].glsl_name, 64, "%s_c%d", name_prefix, ctx->outputs[i].sid); |
| else if (ctx->outputs[i].name == TGSI_SEMANTIC_BCOLOR) |
| snprintf(ctx->outputs[i].glsl_name, 64, "%s_bc%d", name_prefix, ctx->outputs[i].sid); |
| else if (ctx->outputs[i].name == TGSI_SEMANTIC_PATCH) |
| snprintf(ctx->outputs[i].glsl_name, 64, "%s%d", name_prefix, ctx->outputs[i].sid); |
| else if (ctx->outputs[i].name == TGSI_SEMANTIC_GENERIC) |
| snprintf(ctx->outputs[i].glsl_name, 64, "%s_g%d", name_prefix, ctx->outputs[i].sid); |
| else if (ctx->outputs[i].name == TGSI_SEMANTIC_TEXCOORD) |
| snprintf(ctx->outputs[i].glsl_name, 64, "%s_t%d", name_prefix, ctx->outputs[i].sid); |
| else |
| snprintf(ctx->outputs[i].glsl_name, 64, "%s_%d", name_prefix, ctx->outputs[i].first + color_offset); |
| |
| } |
| break; |
| case TGSI_FILE_TEMPORARY: |
| if (unlikely(decl->Range.First > decl->Range.Last)) { |
| vrend_printf("Wrong range: First (%u) > Last (%u)\n", decl->Range.First, decl->Range.Last); |
| return false; |
| } |
| |
| if (!allocate_temp_range(&ctx->temp_ranges, &ctx->num_temp_ranges, decl->Range.First, decl->Range.Last, |
| decl->Array.ArrayID)) |
| return false; |
| break; |
| case TGSI_FILE_SAMPLER: |
| ctx->samplers_used |= (1 << decl->Range.Last); |
| break; |
| case TGSI_FILE_SAMPLER_VIEW: |
| if (unlikely(decl->Range.First > decl->Range.Last)) { |
| vrend_printf("Wrong range: First (%u) > Last (%u)\n", decl->Range.First, decl->Range.Last); |
| return false; |
| } |
| |
| if (decl->Range.Last >= ARRAY_SIZE(ctx->samplers)) { |
| vrend_printf( "Sampler view exceeded, max is %lu\n", ARRAY_SIZE(ctx->samplers)); |
| return false; |
| } |
| if (!add_samplers(ctx, decl->Range.First, decl->Range.Last, decl->SamplerView.Resource, decl->SamplerView.ReturnTypeX)) |
| return false; |
| break; |
| case TGSI_FILE_IMAGE: |
| if (unlikely(decl->Range.First > decl->Range.Last)) { |
| vrend_printf("Wrong range: First (%u) > Last (%u)\n", decl->Range.First, decl->Range.Last); |
| return false; |
| } |
| |
| ctx->shader_req_bits |= SHADER_REQ_IMAGE_LOAD_STORE; |
| ctx->shader_req_bits |= SHADER_REQ_EXPLICIT_UNIFORM_LOCATION; |
| ctx->shader_req_bits |= SHADER_REQ_EXPLICIT_ATTRIB_LOCATION; |
| if (decl->Range.Last >= ARRAY_SIZE(ctx->images)) { |
| vrend_printf( "Image view exceeded, max is %lu\n", ARRAY_SIZE(ctx->images)); |
| return false; |
| } |
| if (!add_images(ctx, decl->Range.First, decl->Range.Last, &decl->Image)) |
| return false; |
| break; |
| case TGSI_FILE_BUFFER: |
| if (decl->Range.First + ctx->key->ssbo_binding_offset >= VREND_MAX_COMBINED_SSBO_BINDING_POINTS) { |
| vrend_printf( "Buffer view exceeded, max is %d\n", VREND_MAX_COMBINED_SSBO_BINDING_POINTS); |
| return false; |
| } |
| ctx->ssbo_used_mask |= (1 << decl->Range.First); |
| |
| if (decl->Declaration.Atomic) { |
| if (decl->Range.First < ctx->ssbo_atomic_array_base) |
| ctx->ssbo_atomic_array_base = decl->Range.First; |
| ctx->ssbo_atomic_mask |= (1 << decl->Range.First); |
| } else { |
| if (decl->Range.First < ctx->ssbo_array_base) |
| ctx->ssbo_array_base = decl->Range.First; |
| } |
| if (ctx->ssbo_last_binding < decl->Range.Last) |
| ctx->ssbo_last_binding = decl->Range.Last; |
| break; |
| case TGSI_FILE_CONSTANT: |
| if (decl->Declaration.Dimension && decl->Dim.Index2D != 0) { |
| if (decl->Dim.Index2D > 31) { |
| vrend_printf( "Number of uniforms exceeded, max is 32\n"); |
| return false; |
| } |
| if (ctx->ubo_used_mask & (1 << decl->Dim.Index2D)) { |
| vrend_printf( "UBO #%d is already defined\n", decl->Dim.Index2D); |
| return false; |
| } |
| ctx->ubo_used_mask |= (1 << decl->Dim.Index2D); |
| ctx->ubo_sizes[decl->Dim.Index2D] = decl->Range.Last + 1; |
| } else { |
| /* if we have a normal single const set then ubo base should be 1 */ |
| ctx->ubo_base = 1; |
| if (decl->Range.Last) { |
| if (decl->Range.Last + 1 > ctx->num_consts) |
| ctx->num_consts = decl->Range.Last + 1; |
| } else |
| ctx->num_consts++; |
| } |
| break; |
| case TGSI_FILE_ADDRESS: |
| ctx->num_address = decl->Range.Last + 1; |
| break; |
| case TGSI_FILE_SYSTEM_VALUE: |
| i = ctx->num_system_values++; |
| if (ctx->num_system_values > ARRAY_SIZE(ctx->system_values)) { |
| vrend_printf( "Number of system values exceeded, max is %lu\n", ARRAY_SIZE(ctx->system_values)); |
| return false; |
| } |
| |
| ctx->system_values[i].name = decl->Semantic.Name; |
| ctx->system_values[i].sid = decl->Semantic.Index; |
| ctx->system_values[i].glsl_predefined_no_emit = true; |
| ctx->system_values[i].glsl_no_index = true; |
| ctx->system_values[i].first = decl->Range.First; |
| |
| if (decl->Semantic.Name < TGSI_SEMANTIC_COUNT) { |
| struct syvalue_prop_map *svmap = &sysvalue_map[decl->Semantic.Name]; |
| name_prefix = svmap->glsl_name; |
| if (!name_prefix) { |
| vrend_printf("Error: unsupported system value %d\n", decl->Semantic.Name); |
| return false; |
| } |
| ctx->shader_req_bits |= svmap->required_ext; |
| ctx->system_values[i].override_no_wm = svmap->override_no_wm; |
| snprintf(ctx->system_values[i].glsl_name, 64, "%s", name_prefix); |
| if (decl->Semantic.Name == TGSI_SEMANTIC_DRAWID) |
| ctx->glsl_strbufs.required_sysval_uniform_decls |= BIT(UNIFORM_DRAWID_BASE); |
| break; |
| } else { |
| vrend_printf("Error: system value %d out of range\n", decl->Semantic.Name); |
| return false; |
| } |
| case TGSI_FILE_MEMORY: |
| ctx->has_file_memory = true; |
| break; |
| case TGSI_FILE_HW_ATOMIC: |
| if (unlikely(decl->Range.First > decl->Range.Last)) { |
| vrend_printf("Wrong range: First (%u) > Last (%u)\n", decl->Range.First, decl->Range.Last); |
| return false; |
| } |
| |
| if (ctx->num_abo >= ARRAY_SIZE(ctx->abo_idx)) { |
| vrend_printf( "Number of atomic counter buffers exceeded, max is %lu\n", ARRAY_SIZE(ctx->abo_idx)); |
| return false; |
| } |
| ctx->abo_idx[ctx->num_abo] = decl->Dim.Index2D; |
| ctx->abo_sizes[ctx->num_abo] = decl->Range.Last - decl->Range.First + 1; |
| ctx->abo_offsets[ctx->num_abo] = decl->Range.First; |
| ctx->num_abo++; |
| break; |
| default: |
| vrend_printf("unsupported file %d declaration\n", decl->Declaration.File); |
| break; |
| } |
| |
| return true; |
| } |
| |
| static boolean |
| iter_property(struct tgsi_iterate_context *iter, |
| struct tgsi_full_property *prop) |
| { |
| struct dump_ctx *ctx = (struct dump_ctx *) iter; |
| |
| switch (prop->Property.PropertyName) { |
| case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS: |
| if (prop->u[0].Data == 1) |
| ctx->write_all_cbufs = true; |
| break; |
| case TGSI_PROPERTY_FS_COORD_ORIGIN: |
| ctx->fs_lower_left_origin = prop->u[0].Data ? true : false; |
| break; |
| case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER: |
| ctx->fs_integer_pixel_center = prop->u[0].Data ? true : false; |
| break; |
| case TGSI_PROPERTY_FS_DEPTH_LAYOUT: |
| /* If the host doesn't support this, then we can savely ignore this, |
| * we only lost an opportunity to optimize */ |
| if (ctx->cfg->has_conservative_depth) { |
| ctx->shader_req_bits |= SHADER_REQ_CONSERVATIVE_DEPTH; |
| ctx->fs_depth_layout = prop->u[0].Data; |
| } |
| break; |
| case TGSI_PROPERTY_GS_INPUT_PRIM: |
| ctx->gs_in_prim = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_GS_OUTPUT_PRIM: |
| ctx->gs_out_prim = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES: |
| ctx->gs_max_out_verts = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_GS_INVOCATIONS: |
| ctx->gs_num_invocations = prop->u[0].Data; |
| ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5; |
| break; |
| case TGSI_PROPERTY_NUM_CLIPDIST_ENABLED: |
| ctx->shader_req_bits |= SHADER_REQ_CLIP_DISTANCE; |
| ctx->num_clip_dist_prop = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_NUM_CULLDIST_ENABLED: |
| ctx->num_cull_dist_prop = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_TCS_VERTICES_OUT: |
| ctx->tcs_vertices_out = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_TES_PRIM_MODE: |
| ctx->tes_prim_mode = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_TES_SPACING: |
| ctx->tes_spacing = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_TES_VERTEX_ORDER_CW: |
| ctx->tes_vertex_order = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_TES_POINT_MODE: |
| ctx->tes_point_mode = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_FS_EARLY_DEPTH_STENCIL: |
| ctx->early_depth_stencil = prop->u[0].Data > 0; |
| if (ctx->early_depth_stencil) { |
| ctx->glsl_ver_required = require_glsl_ver(ctx, 150); |
| ctx->shader_req_bits |= SHADER_REQ_IMAGE_LOAD_STORE; |
| } |
| break; |
| case TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH: |
| ctx->local_cs_block_size[0] = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT: |
| ctx->local_cs_block_size[1] = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH: |
| ctx->local_cs_block_size[2] = prop->u[0].Data; |
| break; |
| case TGSI_PROPERTY_FS_BLEND_EQUATION_ADVANCED: |
| ctx->fs_blend_equation_advanced = prop->u[0].Data; |
| if (!ctx->cfg->use_gles || ctx->cfg->glsl_version < 320) { |
| ctx->glsl_ver_required = require_glsl_ver(ctx, 150); |
| ctx->shader_req_bits |= SHADER_REQ_BLEND_EQUATION_ADVANCED; |
| } |
| break; |
| case TGSI_PROPERTY_SEPARABLE_PROGRAM: |
| /* GLES is very strict in how separable shaders interfaces should be matched. |
| * It doesn't allow, for example, inputs without matching outputs. So, we just |
| * disable separable shaders for GLES. */ |
| if (!ctx->cfg->use_gles) { |
| ctx->separable_program = prop->u[0].Data; |
| ctx->shader_req_bits |= SHADER_REQ_SEPERATE_SHADER_OBJECTS; |
| ctx->shader_req_bits |= SHADER_REQ_EXPLICIT_ATTRIB_LOCATION; |
| } |
| break; |
| default: |
| vrend_printf("unhandled property: %x\n", prop->Property.PropertyName); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| static boolean |
| iter_immediate(struct tgsi_iterate_context *iter, |
| struct tgsi_full_immediate *imm) |
| { |
| struct dump_ctx *ctx = (struct dump_ctx *) iter; |
| int i; |
| uint32_t first = ctx->num_imm; |
| |
| if (first >= ARRAY_SIZE(ctx->imm)) { |
| vrend_printf( "Number of immediates exceeded, max is: %lu\n", ARRAY_SIZE(ctx->imm)); |
| return false; |
| } |
| |
| ctx->imm[first].type = imm->Immediate.DataType; |
| for (i = 0; i < 4; i++) { |
| if (imm->Immediate.DataType == TGSI_IMM_FLOAT32) { |
| ctx->imm[first].val[i].f = imm->u[i].Float; |
| } else if (imm->Immediate.DataType == TGSI_IMM_UINT32 || |
| imm->Immediate.DataType == TGSI_IMM_FLOAT64) { |
| ctx->shader_req_bits |= SHADER_REQ_INTS; |
| ctx->imm[first].val[i].ui = imm->u[i].Uint; |
| } else if (imm->Immediate.DataType == TGSI_IMM_INT32) { |
| ctx->shader_req_bits |= SHADER_REQ_INTS; |
| ctx->imm[first].val[i].i = imm->u[i].Int; |
| } |
| } |
| ctx->num_imm++; |
| return true; |
| } |
| |
| static char get_swiz_char(int swiz) |
| { |
| switch(swiz){ |
| case TGSI_SWIZZLE_X: return 'x'; |
| case TGSI_SWIZZLE_Y: return 'y'; |
| case TGSI_SWIZZLE_Z: return 'z'; |
| case TGSI_SWIZZLE_W: return 'w'; |
| default: return 0; |
| } |
| } |
| |
| static void emit_cbuf_writes(const struct dump_ctx *ctx, |
| struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| int i; |
| |
| for (i = ctx->num_outputs; i < ctx->cfg->max_draw_buffers; i++) { |
| emit_buff(glsl_strbufs, "fsout_c%d = fsout_c0;\n", i); |
| } |
| } |
| |
| static void emit_a8_swizzle(struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| emit_buf(glsl_strbufs, "fsout_c0.x = fsout_c0.w;\n"); |
| } |
| |
| static const char *atests[PIPE_FUNC_ALWAYS + 1] = { |
| "false", |
| "<", |
| "==", |
| "<=", |
| ">", |
| "!=", |
| ">=", |
| "true" |
| }; |
| |
| static void emit_alpha_test(const struct dump_ctx *ctx, |
| struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| char comp_buf[128]; |
| |
| if (!ctx->num_outputs) |
| return; |
| |
| if (!ctx->write_all_cbufs) { |
| /* only emit alpha stanza if first output is 0 */ |
| if (ctx->outputs[0].sid != 0) |
| return; |
| } |
| switch (ctx->key->alpha_test) { |
| case PIPE_FUNC_NEVER: |
| case PIPE_FUNC_ALWAYS: |
| snprintf(comp_buf, 128, "%s", atests[ctx->key->alpha_test]); |
| break; |
| case PIPE_FUNC_LESS: |
| case PIPE_FUNC_EQUAL: |
| case PIPE_FUNC_LEQUAL: |
| case PIPE_FUNC_GREATER: |
| case PIPE_FUNC_NOTEQUAL: |
| case PIPE_FUNC_GEQUAL: |
| snprintf(comp_buf, 128, "%s %s alpha_ref_val", "fsout_c0.w", atests[ctx->key->alpha_test]); |
| glsl_strbufs->required_sysval_uniform_decls |= BIT(UNIFORM_ALPHA_REF_VAL); |
| break; |
| default: |
| vrend_printf( "invalid alpha-test: %x\n", ctx->key->alpha_test); |
| set_buf_error(glsl_strbufs); |
| return; |
| } |
| |
| emit_buff(glsl_strbufs, "if (!(%s)) {\n\tdiscard;\n}\n", comp_buf); |
| } |
| |
| static void emit_pstipple_pass(struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| static_assert(VREND_POLYGON_STIPPLE_SIZE == 32, |
| "According to the spec stipple size must be 32"); |
| |
| const int mask = VREND_POLYGON_STIPPLE_SIZE - 1; |
| |
| emit_buf(glsl_strbufs, "{\n"); |
| emit_buff(glsl_strbufs, " int spx = int(gl_FragCoord.x) & %d;\n", mask); |
| emit_buff(glsl_strbufs, " int spy = int(gl_FragCoord.y) & %d;\n", mask); |
| emit_buf(glsl_strbufs, " stip_temp = stipple_pattern[spy] & (0x80000000u >> spx);\n"); |
| emit_buf(glsl_strbufs, " if (stip_temp == 0u) {\n discard;\n }\n"); |
| emit_buf(glsl_strbufs, "}\n"); |
| glsl_strbufs->required_sysval_uniform_decls |= BIT(UNIFORM_PSTIPPLE_SAMPLER); |
| } |
| |
| static void emit_color_select(const struct dump_ctx *ctx, |
| struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| if (!ctx->key->color_two_side || !(ctx->color_in_mask & 0x3)) |
| return; |
| |
| const char *name_prefix = get_stage_input_name_prefix(ctx, ctx->prog_type); |
| if (ctx->color_in_mask & 1) |
| emit_buff(glsl_strbufs, "realcolor0 = gl_FrontFacing ? %s_c0 : %s_bc0;\n", |
| name_prefix, name_prefix); |
| |
| if (ctx->color_in_mask & 2) |
| emit_buff(glsl_strbufs, "realcolor1 = gl_FrontFacing ? %s_c1 : %s_bc1;\n", |
| name_prefix, name_prefix); |
| } |
| |
| static void emit_prescale(struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| emit_buf(glsl_strbufs, "gl_Position.y = gl_Position.y * winsys_adjust_y;\n"); |
| glsl_strbufs->required_sysval_uniform_decls |= BIT(UNIFORM_WINSYS_ADJUST_Y); |
| } |
| |
| // TODO Consider exposing non-const ctx-> members as args to make *ctx const |
| static void prepare_so_movs(struct dump_ctx *ctx) |
| { |
| uint32_t i; |
| for (i = 0; i < ctx->so->num_outputs; i++) { |
| ctx->write_so_outputs[i] = true; |
| if (ctx->so->output[i].start_component != 0) |
| continue; |
| if (ctx->so->output[i].num_components != 4) |
| continue; |
| if (ctx->outputs[ctx->so->output[i].register_index].name == TGSI_SEMANTIC_CLIPDIST) |
| continue; |
| if (ctx->outputs[ctx->so->output[i].register_index].name == TGSI_SEMANTIC_POSITION) |
| continue; |
| |
| ctx->outputs[ctx->so->output[i].register_index].stream = ctx->so->output[i].stream; |
| if (ctx->prog_type == TGSI_PROCESSOR_GEOMETRY && ctx->so->output[i].stream) |
| ctx->shader_req_bits |= SHADER_REQ_GPU_SHADER5; |
| |
| ctx->write_so_outputs[i] = false; |
| } |
| } |
| |
| static const struct vrend_shader_io *get_io_slot(const struct vrend_shader_io *slots, unsigned nslots, int idx) |
| { |
| const struct vrend_shader_io *result = slots; |
| for (unsigned i = 0; i < nslots; ++i, ++result) { |
| if ((result->first <= idx) && (result->last >= idx)) |
| return result; |
| } |
| assert(0 && "Output not found"); |
| return NULL; |
| } |
| |
| static inline void |
| get_blockname(char outvar[64], const char *stage_prefix, const struct vrend_shader_io *io) |
| { |
| snprintf(outvar, 64, "block_%sg%d", stage_prefix, io->sid); |
| } |
| |
| static inline void |
| get_blockvarname(char outvar[64], const char *stage_prefix, const struct vrend_shader_io *io, const char *postfix) |
| { |
| snprintf(outvar, 64, "%sg%d%s", stage_prefix, io->first, postfix); |
| } |
| |
| static void get_so_name(const struct dump_ctx *ctx, bool from_block, const struct vrend_shader_io *output, int index, char out_var[255], char *wm) |
| { |
| if (output->first == output->last || |
| (output->name != TGSI_SEMANTIC_GENERIC && |
| output->name != TGSI_SEMANTIC_TEXCOORD)) |
| snprintf(out_var, 255, "%s%s", output->glsl_name, wm); |
| else { |
| if ((output->name == TGSI_SEMANTIC_GENERIC) && prefer_generic_io_block(ctx, io_out)) { |
| char blockname[64]; |
| const char *stage_prefix = get_stage_output_name_prefix(ctx->prog_type); |
| if (from_block) |
| get_blockname(blockname, stage_prefix, output); |
| else |
| get_blockvarname(blockname, stage_prefix, output, ""); |
| snprintf(out_var, 255, "%s.%s[%d]%s", blockname, output->glsl_name, index - output->first, wm); |
| } else { |
| snprintf(out_var, 255, "%s[%d]%s", output->glsl_name, index - output->first, wm); |
| } |
| } |
| } |
| |
| static void emit_so_movs(const struct dump_ctx *ctx, |
| struct vrend_glsl_strbufs *glsl_strbufs, |
| bool *has_clipvertex_so) |
| { |
| uint32_t i, j; |
| char outtype[15] = ""; |
| char writemask[6]; |
| |
| if (ctx->so->num_outputs >= PIPE_MAX_SO_OUTPUTS) { |
| vrend_printf( "Num outputs exceeded, max is %u\n", PIPE_MAX_SO_OUTPUTS); |
| set_buf_error(glsl_strbufs); |
| return; |
| } |
| |
| for (i = 0; i < ctx->so->num_outputs; i++) { |
| const struct vrend_shader_io *output = get_io_slot(&ctx->outputs[0], ctx->num_outputs, ctx->so->output[i].register_index); |
| if (ctx->so->output[i].start_component != 0) { |
| int wm_idx = 0; |
| writemask[wm_idx++] = '.'; |
| for (j = 0; j < ctx->so->output[i].num_components; j++) { |
| unsigned idx = ctx->so->output[i].start_component + j; |
| if (idx >= 4) |
| break; |
| if (idx <= 2) |
| writemask[wm_idx++] = 'x' + idx; |
| else |
| writemask[wm_idx++] = 'w'; |
| } |
| writemask[wm_idx] = '\0'; |
| } else |
| writemask[0] = 0; |
| |
| if (!ctx->write_so_outputs[i]) { |
| if (ctx->so_names[i]) |
| free(ctx->so_names[i]); |
| if (ctx->so->output[i].register_index > ctx->num_outputs) |
| ctx->so_names[i] = NULL; |
| else if (output->name == TGSI_SEMANTIC_CLIPVERTEX && ctx->has_clipvertex) { |
| ctx->so_names[i] = strdup("clipv_tmp"); |
| *has_clipvertex_so = true; |
| } else { |
| char out_var[255]; |
| const struct vrend_shader_io *used_output_io = output; |
| if (output->name == TGSI_SEMANTIC_GENERIC && ctx->generic_ios.output_range.used) { |
| used_output_io = &ctx->generic_ios.output_range.io; |
| } else if (output->name == TGSI_SEMANTIC_PATCH && ctx->patch_ios.output_range.used) { |
| used_output_io = &ctx->patch_ios.output_range.io; |
| } |
| get_so_name(ctx, true, used_output_io, ctx->so->output[i].register_index, out_var, ""); |
| ctx->so_names[i] = strdup(out_var); |
| } |
| } else { |
| char ntemp[8]; |
| snprintf(ntemp, 8, "tfout%d", i); |
| ctx->so_names[i] = strdup(ntemp); |
| } |
| if (ctx->so->output[i].num_components == 1) { |
| if (output->is_int) |
| snprintf(outtype, 15, "intBitsToFloat"); |
| else |
| snprintf(outtype, 15, "float"); |
| } else |
| snprintf(outtype, 15, "vec%d", ctx->so->output[i].num_components); |
| |
| if (ctx->so->output[i].register_index >= 255) |
| continue; |
| |
| if (output->name == TGSI_SEMANTIC_CLIPDIST) { |
| if (output->first == output->last) |
| emit_buff(glsl_strbufs, "tfout%d = %s(clip_dist_temp[%d]%s);\n", i, outtype, output->sid, |
| writemask); |
| else |
| emit_buff(glsl_strbufs, "tfout%d = %s(clip_dist_temp[%d]%s);\n", i, outtype, |
| output->sid + ctx->so->output[i].register_index - output->first, |
| writemask); |
| } else { |
| if (ctx->write_so_outputs[i]) { |
| char out_var[255]; |
| if (ctx->so->output[i].need_temp || ctx->prog_type == TGSI_PROCESSOR_GEOMETRY || |
| output->glsl_predefined_no_emit) { |
| get_so_name(ctx, false, output, ctx->so->output[i].register_index, out_var, writemask); |
| emit_buff(glsl_strbufs, "tfout%d = %s(%s);\n", i, outtype, out_var); |
| } else { |
| get_so_name(ctx, true, output, ctx->so->output[i].register_index, out_var, writemask); |
| free(ctx->so_names[i]); |
| ctx->so_names[i] = strdup(out_var); |
| } |
| } |
| } |
| } |
| } |
| |
| static void emit_clip_dist_movs(const struct dump_ctx *ctx, |
| struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| int i; |
| bool has_prop = (ctx->num_clip_dist_prop + ctx->num_cull_dist_prop) > 0; |
| int num_clip = has_prop ? ctx->num_clip_dist_prop : ctx->key->num_out_clip; |
| int num_cull = has_prop ? ctx->num_cull_dist_prop : ctx->key->num_out_cull; |
| |
| |
| int num_clip_cull = num_cull + num_clip; |
| if (ctx->num_out_clip_dist && !num_clip_cull) |
| num_clip = ctx->num_out_clip_dist; |
| |
| int ndists; |
| const char *prefix=""; |
| |
| if (ctx->prog_type == TGSI_PROCESSOR_TESS_CTRL) |
| prefix = "gl_out[gl_InvocationID]."; |
| |
| if (ctx->num_out_clip_dist == 0 && |
| ctx->is_last_vertex_stage && |
| ctx->num_outputs + 2 <= MAX_VARYING) { |
| emit_buff(glsl_strbufs, "if (clip_plane_enabled) {\n"); |
| for (i = 0; i < 8; i++) { |
| emit_buff(glsl_strbufs, " %sgl_ClipDistance[%d] = dot(%s, clipp[%d]);\n", |
| prefix, i, ctx->has_clipvertex ? "clipv_tmp" : "gl_Position", i); |
| } |
| emit_buff(glsl_strbufs, "}\n"); |
| glsl_strbufs->required_sysval_uniform_decls |= BIT(UNIFORM_CLIP_PLANE); |
| } |
| ndists = ctx->num_out_clip_dist; |
| if (has_prop) |
| ndists = num_clip + num_cull; |
| for (i = 0; i < ndists; i++) { |
| int clipidx = i < 4 ? 0 : 1; |
| char swiz = i & 3; |
| char wm = 0; |
| switch (swiz) { |
| default: |
| case 0: wm = 'x'; break; |
| case 1: wm = 'y'; break; |
| case 2: wm = 'z'; break; |
| case 3: wm = 'w'; break; |
| } |
| bool is_cull = false; |
| const char *clip_cull = "Clip"; |
| |
| if (i >= num_clip) { |
| if (i < ndists) { |
| is_cull = true; |
| clip_cull = "Cull"; |
| } else { |
| clip_cull = "ERROR"; |
| } |
| } |
| |
| emit_buff(glsl_strbufs, "%sgl_%sDistance[%d] = clip_dist_temp[%d].%c;\n", prefix, clip_cull, |
| is_cull ? i - num_clip : i, clipidx, wm); |
| } |
| } |
| |
| static void emit_fog_fixup_hdr(const struct dump_ctx *ctx, |
| struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| uint32_t fixup_mask = ctx->key->vs.fog_fixup_mask; |
| int semantic; |
| const char *prefix = get_stage_output_name_prefix(TGSI_PROCESSOR_VERTEX); |
| |
| while (fixup_mask) { |
| semantic = ffs(fixup_mask) - 1; |
| |
| emit_hdrf(glsl_strbufs, "out vec4 %s_f%d;\n", prefix, semantic); |
| fixup_mask &= (~(1 << semantic)); |
| } |
| } |
| |
| static void emit_fog_fixup_write(const struct dump_ctx *ctx, |
| struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| uint32_t fixup_mask = ctx->key->vs.fog_fixup_mask; |
| int semantic; |
| const char *prefix = get_stage_output_name_prefix(TGSI_PROCESSOR_VERTEX); |
| |
| while (fixup_mask) { |
| semantic = ffs(fixup_mask) - 1; |
| |
| /* |
| * Force unwritten fog outputs to 0,0,0,1 |
| */ |
| emit_buff(glsl_strbufs, "%s_f%d = vec4(0.0, 0.0, 0.0, 1.0);\n", |
| prefix, semantic); |
| fixup_mask &= (~(1 << semantic)); |
| } |
| } |
| |
| #define emit_arit_op2(op) emit_buff(&ctx->glsl_strbufs, "%s = %s(%s((%s %s %s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), srcs[0], op, srcs[1], writemask) |
| #define emit_op1(op) emit_buff(&ctx->glsl_strbufs, "%s = %s(%s(%s(%s))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), op, srcs[0], writemask) |
| #define emit_compare(op) emit_buff(&ctx->glsl_strbufs, "%s = %s(%s((%s(%s(%s), %s(%s))))%s);\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.dtypeprefix), op, get_string(sinfo.svec4), srcs[0], get_string(sinfo.svec4), srcs[1], writemask) |
| |
| #define emit_ucompare(op) emit_buff(&ctx->glsl_strbufs, "%s = %s(uintBitsToFloat(%s(%s(%s(%s), %s(%s))%s) * %s(0xffffffff)));\n", dsts[0], get_string(dinfo.dstconv), get_string(dinfo.udstconv), op, get_string(sinfo.svec4), srcs[0], get_string(sinfo.svec4), srcs[1], writemask, get_string(dinfo.udstconv)) |
| |
| static void handle_vertex_proc_exit(const struct dump_ctx *ctx, |
| struct vrend_glsl_strbufs *glsl_strbufs, |
| bool *has_clipvertex_so) |
| { |
| if (ctx->so && !ctx->key->gs_present && !ctx->key->tes_present) |
| emit_so_movs(ctx, glsl_strbufs, has_clipvertex_so); |
| |
| if (ctx->cfg->has_cull_distance) |
| emit_clip_dist_movs(ctx, glsl_strbufs); |
| |
| if (!ctx->key->gs_present && !ctx->key->tes_present) |
| emit_prescale(glsl_strbufs); |
| |
| if (ctx->key->vs.fog_fixup_mask) |
| emit_fog_fixup_write(ctx, glsl_strbufs); |
| } |
| |
| static void emit_fragment_logicop(const struct dump_ctx *ctx, |
| struct vrend_glsl_strbufs *glsl_strbufs) |
| { |
| char src[PIPE_MAX_COLOR_BUFS][64]; |
| char src_fb[PIPE_MAX_COLOR_BUFS][64]; |
| double scale[PIPE_MAX_COLOR_BUFS]; |
| int mask[PIPE_MAX_COLOR_BUFS]; |
| |
| struct vrend_strbuf full_op_buf[PIPE_MAX_COLOR_BUFS]; |
| for (int i = 0; i < PIPE_MAX_COLOR_BUFS; ++i) { |
| strbuf_alloc(&full_op_buf[i], 134); |
| } |
| |
| |
| for (unsigned i = 0; i < ctx->num_outputs; i++) { |
| mask[i] = (1 << ctx->key->fs.surface_component_bits[i]) - 1; |
| scale[i] = mask[i]; |
| switch (ctx->key->fs.logicop_func) { |
| case PIPE_LOGICOP_INVERT: |
| snprintf(src_fb[i], ARRAY_SIZE(src_fb[i]), |
| "ivec4(%f * fsout_c%d + 0.5)", scale[i], i); |
| break; |
| case PIPE_LOGICOP_NOR: |
| case PIPE_LOGICOP_AND_INVERTED: |
| case PIPE_LOGICOP_AND_REVERSE: |
| case PIPE_LOGICOP_XOR: |
| case PIPE_LOGICOP_NAND: |
| case PIPE_LOGICOP_AND: |
| case PIPE_LOGICOP_EQUIV: |
| case PIPE_LOGICOP_OR_INVERTED: |
| case PIPE_LOGICOP_OR_REVERSE: |
| case PIPE_LOGICOP_OR: |
| snprintf(src_fb[i], ARRAY_SIZE(src_fb[i]), |
| "ivec4(%f * fsout_c%d + 0.5)", scale[i], i); |
| /* fallthrough */ |
| case PIPE_LOGICOP_COPY_INVERTED: |
| snprintf(src[i], ARRAY_SIZE(src[i]), |
| "ivec4(%f * fsout_tmp_c%d + 0.5)", scale[i], i); |
| break; |
| case PIPE_LOGICOP_COPY: |
| case PIPE_LOGICOP_NOOP: |
| case PIPE_LOGICOP_CLEAR: |
| case PIPE_LOGICOP_SET: |
| break; |
| } |
| } |
| |
| for (unsigned i = 0; i < ctx->num_outputs; i++) { |
| switch (ctx->key->fs.logicop_func) { |
| case PIPE_LOGICOP_CLEAR: |
| strbuf_fmt(&full_op_buf[i], "%s", "vec4(0)"); |
| break; |
| case PIPE_LOGICOP_NOOP: |
| strbuf_fmt(&full_op_buf[i], "%s", ""); |
| break; |
| case PIPE_LOGICOP_SET: |
| strbuf_fmt(&full_op_buf[i], "%s", "vec4(1)"); |
| break; |
| case PIPE_LOGICOP_COPY: |
| strbuf_fmt(&full_op_buf[i], "fsout_tmp_c%d", i); |
| break; |
| case PIPE_LOGICOP_COPY_INVERTED: |
| strbuf_fmt(&full_op_buf[i], "~%s", src[i]); |
| break; |
| case PIPE_LOGICOP_INVERT: |
| strbuf_fmt(&full_op_buf[i], "~%s", src_fb[i]); |
| break; |
| case PIPE_LOGICOP_AND: |
| strbuf_fmt(&full_op_buf[i], "%s & %s", src[i], src_fb[i]); |
| break; |
| case PIPE_LOGICOP_NAND: |
| strbuf_fmt(&full_op_buf[i], "~( %s & %s )", src[i], src_fb[i]); |
| break; |
| case PIPE_LOGICOP_NOR: |
| strbuf_fmt(&full_op_buf[i], "~( %s | %s )", src[i], src_fb[i]); |
| break; |
| case PIPE_LOGICOP_AND_INVERTED: |
| strbuf_fmt(&full_op_buf[i], "~%s & %s", src[i], src_fb[i]); |
| break; |
| case PIPE_LOGICOP_AND_REVERSE: |
| strbuf_fmt(&full_op_buf[i], "%s & ~%s", src[i], src_fb[i]); |
| break; |
| case PIPE_LOGICOP_XOR: |
| strbuf_fmt(&full_op_buf[i], "%s ^%s", src[i], src_fb[i]); |
| break; |
| case PIPE_LOGICOP_EQUIV: |
| strbuf_fmt(&full_op_buf[i], "~( %s ^ %s )", src[i], src_fb[i]); |
| break; |
|