blob: 745789b314ade1ee89a4b67dbdf256c98458d4e5 [file] [log] [blame]
// Copyright 2017 The Clspv Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef _MSC_VER
#pragma warning(push, 0)
#endif
#include <cassert>
#include <cstring>
#include <iomanip>
#include <list>
#include <memory>
#include <set>
#include <sstream>
#include <string>
#include <tuple>
#include <unordered_set>
#include <utility>
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/UniqueVector.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ValueSymbolTable.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Cloning.h"
// enable spv::HasResultAndType
#define SPV_ENABLE_UTILITY_CODE
#include "spirv/unified1/spirv.hpp"
#include "clspv/AddressSpace.h"
#include "clspv/Option.h"
#include "clspv/PushConstant.h"
#include "clspv/SpecConstant.h"
#include "clspv/spirv_c_strings.hpp"
#include "clspv/spirv_glsl.hpp"
#include "clspv/spirv_reflection.hpp"
#include "ArgKind.h"
#include "Builtins.h"
#include "ComputeStructuredOrder.h"
#include "ConstantEmitter.h"
#include "Constants.h"
#include "DescriptorCounter.h"
#include "Layout.h"
#include "NormalizeGlobalVariable.h"
#include "Passes.h"
#include "SpecConstant.h"
#include "Types.h"
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
using namespace llvm;
using namespace clspv;
using namespace clspv::Builtins;
using namespace clspv::Option;
using namespace mdconst;
namespace {
cl::opt<bool> ShowResourceVars("show-rv", cl::init(false), cl::Hidden,
cl::desc("Show resource variable creation"));
cl::opt<bool>
ShowProducerIR("show-producer-ir", cl::init(false), cl::ReallyHidden,
cl::desc("Dump the IR at the start of SPIRVProducer"));
// These hacks exist to help transition code generation algorithms
// without making huge noise in detailed test output.
const bool Hack_generate_runtime_array_stride_early = true;
// The value of 1/pi. This value is from MSDN
// https://msdn.microsoft.com/en-us/library/4hwaceh6.aspx
const double kOneOverPi = 0.318309886183790671538;
const glsl::ExtInst kGlslExtInstBad = static_cast<glsl::ExtInst>(0);
// SPIRV Module Sections (per 2.4 of the SPIR-V spec)
// These are used to collect SPIRVInstructions by type on-the-fly.
enum SPIRVSection {
kCapabilities,
kExtensions,
kImports,
kMemoryModel,
kEntryPoints,
kExecutionModes,
kDebug,
kAnnotations,
kTypes,
kConstants = kTypes,
kGlobalVariables,
kFunctions,
// This is not a section of the SPIR-V spec and should always immediately
// precede kSectionCount. It is a convenient place for the embedded
// reflection data.
kReflection,
kSectionCount
};
class SPIRVID {
uint32_t id;
public:
SPIRVID(uint32_t _id = 0) : id(_id) {}
uint32_t get() const { return id; }
bool isValid() const { return id != 0; }
bool operator==(const SPIRVID &that) const { return id == that.id; }
bool operator<(const SPIRVID &that) const { return id < that.id; }
};
enum SPIRVOperandType { NUMBERID, LITERAL_WORD, LITERAL_DWORD, LITERAL_STRING };
struct SPIRVOperand {
explicit SPIRVOperand(SPIRVOperandType Ty, uint32_t Num) : Type(Ty) {
LiteralNum[0] = Num;
}
explicit SPIRVOperand(SPIRVOperandType Ty, const char *Str)
: Type(Ty), LiteralStr(Str) {}
explicit SPIRVOperand(SPIRVOperandType Ty, StringRef Str)
: Type(Ty), LiteralStr(Str) {}
explicit SPIRVOperand(ArrayRef<uint32_t> NumVec) {
auto sz = NumVec.size();
assert(sz >= 1 && sz <= 2);
Type = sz == 1 ? LITERAL_WORD : LITERAL_DWORD;
LiteralNum[0] = NumVec[0];
if (sz == 2) {
LiteralNum[1] = NumVec[1];
}
}
SPIRVOperandType getType() const { return Type; }
uint32_t getNumID() const { return LiteralNum[0]; }
std::string getLiteralStr() const { return LiteralStr; }
const uint32_t *getLiteralNum() const { return LiteralNum; }
uint32_t GetNumWords() const {
switch (Type) {
case NUMBERID:
case LITERAL_WORD:
return 1;
case LITERAL_DWORD:
return 2;
case LITERAL_STRING:
// Account for the terminating null character.
return uint32_t((LiteralStr.size() + 4) / 4);
}
llvm_unreachable("Unhandled case in SPIRVOperand::GetNumWords()");
}
private:
SPIRVOperandType Type;
std::string LiteralStr;
uint32_t LiteralNum[2];
};
typedef SmallVector<SPIRVOperand, 4> SPIRVOperandVec;
struct SPIRVInstruction {
// Primary constructor must have Opcode, initializes WordCount based on ResID.
SPIRVInstruction(spv::Op Opc, SPIRVID ResID = 0)
: Opcode(static_cast<uint16_t>(Opc)) {
setResult(ResID);
}
// Creates an instruction with an opcode and no result ID, and with the given
// operands. This calls primary constructor to initialize Opcode, WordCount.
// Takes ownership of the operands and clears |Ops|.
SPIRVInstruction(spv::Op Opc, SPIRVOperandVec &Ops) : SPIRVInstruction(Opc) {
setOperands(Ops);
}
// Creates an instruction with an opcode and no result ID, and with the given
// operands. This calls primary constructor to initialize Opcode, WordCount.
// Takes ownership of the operands and clears |Ops|.
SPIRVInstruction(spv::Op Opc, SPIRVID ResID, SPIRVOperandVec &Ops)
: SPIRVInstruction(Opc, ResID) {
setOperands(Ops);
}
uint32_t getWordCount() const { return WordCount; }
uint16_t getOpcode() const { return Opcode; }
SPIRVID getResultID() const { return ResultID; }
const SPIRVOperandVec &getOperands() const { return Operands; }
private:
void setResult(SPIRVID ResID = 0) {
WordCount = 1 + (ResID.isValid() ? 1 : 0);
ResultID = ResID;
}
void setOperands(SPIRVOperandVec &Ops) {
assert(Operands.empty());
Operands = std::move(Ops);
for (auto &opd : Operands) {
WordCount += uint16_t(opd.GetNumWords());
}
}
private:
uint32_t WordCount; // Check the 16-bit bound at code generation time.
uint16_t Opcode;
SPIRVID ResultID;
SPIRVOperandVec Operands;
};
struct SPIRVProducerPass final : public ModulePass {
typedef DenseMap<Type *, SPIRVID> TypeMapType;
typedef UniqueVector<Type *> TypeList;
typedef DenseMap<Value *, SPIRVID> ValueMapType;
typedef std::list<SPIRVID> SPIRVIDListType;
typedef std::vector<std::pair<Value *, SPIRVID>> EntryPointVecType;
typedef std::set<uint32_t> CapabilitySetType;
typedef std::list<SPIRVInstruction> SPIRVInstructionList;
typedef std::map<spv::BuiltIn, SPIRVID> BuiltinConstantMapType;
// A vector of pairs, each of which is:
// - the LLVM instruction that we will later generate SPIR-V code for
// - the SPIR-V instruction placeholder that will be replaced
typedef std::vector<std::pair<Value *, SPIRVInstruction *>>
DeferredInstVecType;
typedef DenseMap<FunctionType *, std::pair<FunctionType *, uint32_t>>
GlobalConstFuncMapType;
explicit SPIRVProducerPass(
raw_pwrite_stream &out,
ArrayRef<std::pair<unsigned, std::string>> samplerMap,
bool outputCInitList)
: ModulePass(ID), module(nullptr), samplerMap(samplerMap), out(out),
binaryTempOut(binaryTempUnderlyingVector), binaryOut(&out),
outputCInitList(outputCInitList), patchBoundOffset(0), nextID(1),
OpExtInstImportID(0), HasVariablePointersStorageBuffer(false),
HasVariablePointers(false), SamplerTy(nullptr), WorkgroupSizeValueID(0),
WorkgroupSizeVarID(0) {
addCapability(spv::CapabilityShader);
Ptr = this;
}
virtual ~SPIRVProducerPass() {
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
}
virtual bool runOnModule(Module &module) override;
// output the SPIR-V header block
void outputHeader();
// patch the SPIR-V header block
void patchHeader();
CapabilitySetType &getCapabilitySet() { return CapabilitySet; }
TypeMapType &getImageTypeMap() { return ImageTypeMap; }
TypeList &getTypeList() { return Types; }
ValueMapType &getValueMap() { return ValueMap; }
SPIRVInstructionList &getSPIRVInstList(SPIRVSection Section) {
return SPIRVSections[Section];
};
EntryPointVecType &getEntryPointVec() { return EntryPointVec; }
DeferredInstVecType &getDeferredInstVec() { return DeferredInstVec; }
SPIRVIDListType &getEntryPointInterfacesList() {
return EntryPointInterfacesList;
}
SPIRVID getOpExtInstImportID();
std::vector<SPIRVID> &getBuiltinDimVec() { return BuiltinDimensionVec; }
bool hasVariablePointersStorageBuffer() {
return HasVariablePointersStorageBuffer;
}
void setVariablePointersStorageBuffer() {
if (!HasVariablePointersStorageBuffer) {
addCapability(spv::CapabilityVariablePointersStorageBuffer);
HasVariablePointersStorageBuffer = true;
}
}
bool hasVariablePointers() { return HasVariablePointers; }
void setVariablePointers() {
if (!HasVariablePointers) {
addCapability(spv::CapabilityVariablePointers);
HasVariablePointers = true;
}
}
ArrayRef<std::pair<unsigned, std::string>> &getSamplerMap() {
return samplerMap;
}
GlobalConstFuncMapType &getGlobalConstFuncTypeMap() {
return GlobalConstFuncTypeMap;
}
SmallPtrSet<Value *, 16> &getGlobalConstArgSet() {
return GlobalConstArgumentSet;
}
TypeList &getTypesNeedingArrayStride() { return TypesNeedingArrayStride; }
void GenerateLLVMIRInfo();
// Populate GlobalConstFuncTypeMap. Also, if module-scope __constant will
// *not* be converted to a storage buffer, replace each such global variable
// with one in the storage class expecgted by SPIR-V.
void FindGlobalConstVars();
// Populate ResourceVarInfoList, FunctionToResourceVarsMap, and
// ModuleOrderedResourceVars.
void FindResourceVars();
void FindTypePerGlobalVar(GlobalVariable &GV);
void FindTypesForSamplerMap();
void FindTypesForResourceVars();
// Inserts |Ty| and relevant sub-types into the |Types| member, indicating
// that |Ty| and its subtypes will need a corresponding SPIR-V type.
void FindType(Type *Ty);
// Returns the canonical type of |type|.
//
// By default, clspv maps both __constant and __global address space pointers
// to StorageBuffer storage class. In order to prevent duplicate types from
// being generated, clspv uses the canonical type as a representative.
Type *CanonicalType(Type *type);
// Lookup or create Types, Constants.
// Returns SPIRVID once it has been created.
SPIRVID getSPIRVType(Type *Ty);
SPIRVID getSPIRVConstant(Constant *Cst);
SPIRVID getSPIRVInt32Constant(uint32_t CstVal);
// Lookup SPIRVID of llvm::Value, may create Constant.
SPIRVID getSPIRVValue(Value *V);
SPIRVID getSPIRVBuiltin(spv::BuiltIn BID, spv::Capability Cap);
// Generates instructions for SPIR-V types corresponding to the LLVM types
// saved in the |Types| member. A type follows its subtypes. IDs are
// allocated sequentially starting with the current value of nextID, and
// with a type following its subtypes. Also updates nextID to just beyond
// the last generated ID.
void GenerateSPIRVTypes();
void GenerateModuleInfo();
void GenerateGlobalVar(GlobalVariable &GV);
void GenerateWorkgroupVars();
// Generate reflection instructions for resource variables associated with
// arguments to F.
void GenerateSamplers();
// Generate OpVariables for %clspv.resource.var.* calls.
void GenerateResourceVars();
void GenerateFuncPrologue(Function &F);
void GenerateFuncBody(Function &F);
void GenerateEntryPointInitialStores();
spv::Op GetSPIRVCmpOpcode(CmpInst *CmpI);
spv::Op GetSPIRVCastOpcode(Instruction &I);
spv::Op GetSPIRVBinaryOpcode(Instruction &I);
SPIRVID GenerateClspvInstruction(CallInst *Call,
const FunctionInfo &FuncInfo);
SPIRVID GenerateImageInstruction(CallInst *Call,
const FunctionInfo &FuncInfo);
SPIRVID GenerateSubgroupInstruction(CallInst *Call,
const FunctionInfo &FuncInfo);
SPIRVID GenerateInstructionFromCall(CallInst *Call);
void GenerateInstruction(Instruction &I);
void GenerateFuncEpilogue();
void HandleDeferredInstruction();
void HandleDeferredDecorations();
bool is4xi8vec(Type *Ty) const;
spv::StorageClass GetStorageClass(unsigned AddrSpace) const;
spv::StorageClass GetStorageClassForArgKind(clspv::ArgKind arg_kind) const;
spv::BuiltIn GetBuiltin(StringRef globalVarName) const;
// Returns the GLSL extended instruction enum that the given function
// call maps to. If none, then returns the 0 value, i.e. GLSLstd4580Bad.
glsl::ExtInst getExtInstEnum(const Builtins::FunctionInfo &func_info);
// Returns the GLSL extended instruction enum indirectly used by the given
// function. That is, to implement the given function, we use an extended
// instruction plus one more instruction. If none, then returns the 0 value,
// i.e. GLSLstd4580Bad.
glsl::ExtInst getIndirectExtInstEnum(const Builtins::FunctionInfo &func_info);
// Returns the single GLSL extended instruction used directly or
// indirectly by the given function call.
glsl::ExtInst
getDirectOrIndirectExtInstEnum(const Builtins::FunctionInfo &func_info);
void WriteOneWord(uint32_t Word);
void WriteResultID(const SPIRVInstruction &Inst);
void WriteWordCountAndOpcode(const SPIRVInstruction &Inst);
void WriteOperand(const SPIRVOperand &Op);
void WriteSPIRVBinary();
void WriteSPIRVBinary(SPIRVInstructionList &SPIRVInstList);
// Returns true if |type| is compatible with OpConstantNull.
bool IsTypeNullable(const Type *type) const;
// Populate UBO remapped type maps.
void PopulateUBOTypeMaps();
// Populate the merge and continue block maps.
void PopulateStructuredCFGMaps();
// Wrapped methods of DataLayout accessors. If |type| was remapped for UBOs,
// uses the internal map, otherwise it falls back on the data layout.
uint64_t GetTypeSizeInBits(Type *type, const DataLayout &DL);
uint64_t GetTypeAllocSize(Type *type, const DataLayout &DL);
uint32_t GetExplicitLayoutStructMemberOffset(StructType *type,
unsigned member,
const DataLayout &DL);
// Returns the base pointer of |v|.
Value *GetBasePointer(Value *v);
// Add Capability if not already (e.g. CapabilityGroupNonUniformBroadcast)
void addCapability(uint32_t c) { CapabilitySet.emplace(c); }
// Sets |HasVariablePointersStorageBuffer| or |HasVariablePointers| base on
// |address_space|.
void setVariablePointersCapabilities(unsigned address_space);
// Returns true if |lhs| and |rhs| represent the same resource or workgroup
// variable.
bool sameResource(Value *lhs, Value *rhs) const;
// Returns true if |inst| is phi or select that selects from the same
// structure (or null).
bool selectFromSameObject(Instruction *inst);
// Returns true if |Arg| is called with a coherent resource.
bool CalledWithCoherentResource(Argument &Arg);
//
// Primary interface for adding SPIRVInstructions to a SPIRVSection.
template <enum SPIRVSection TSection = kFunctions>
SPIRVID addSPIRVInst(spv::Op Opcode, SPIRVOperandVec &Operands) {
bool has_result, has_result_type;
spv::HasResultAndType(Opcode, &has_result, &has_result_type);
SPIRVID RID = has_result ? incrNextID() : 0;
SPIRVSections[TSection].emplace_back(Opcode, RID, Operands);
return RID;
}
template <enum SPIRVSection TSection = kFunctions>
SPIRVID addSPIRVInst(spv::Op Op) {
SPIRVOperandVec Ops;
return addSPIRVInst<TSection>(Op, Ops);
}
template <enum SPIRVSection TSection = kFunctions>
SPIRVID addSPIRVInst(spv::Op Op, uint32_t V) {
SPIRVOperandVec Ops;
Ops.emplace_back(LITERAL_WORD, V);
return addSPIRVInst<TSection>(Op, Ops);
}
template <enum SPIRVSection TSection = kFunctions>
SPIRVID addSPIRVInst(spv::Op Op, const char *V) {
SPIRVOperandVec Ops;
Ops.emplace_back(LITERAL_STRING, V);
return addSPIRVInst<TSection>(Op, Ops);
}
//
// Add placeholder for llvm::Value that references future values.
// Must have result ID just in case final SPIRVInstruction requires.
SPIRVID addSPIRVPlaceholder(Value *I) {
SPIRVID RID = incrNextID();
SPIRVOperandVec Ops;
SPIRVSections[kFunctions].emplace_back(spv::OpExtInst, RID, Ops);
DeferredInstVec.push_back({I, &SPIRVSections[kFunctions].back()});
return RID;
}
// Replace placeholder with actual SPIRVInstruction on the final pass
// (HandleDeferredInstruction).
SPIRVID replaceSPIRVInst(SPIRVInstruction *I, spv::Op Opcode,
SPIRVOperandVec &Operands) {
bool has_result, has_result_type;
spv::HasResultAndType(Opcode, &has_result, &has_result_type);
SPIRVID RID = has_result ? I->getResultID() : 0;
*I = SPIRVInstruction(Opcode, RID, Operands);
return RID;
}
//
// Add global variable and capture entry point interface
SPIRVID addSPIRVGlobalVariable(const SPIRVID &TypeID, spv::StorageClass SC,
const SPIRVID &InitID = SPIRVID());
SPIRVID getReflectionImport();
void GenerateReflection();
void GenerateKernelReflection();
void GeneratePushConstantReflection();
void GenerateSpecConstantReflection();
void AddArgumentReflection(SPIRVID kernel_decl, const std::string &name,
clspv::ArgKind arg_kind, uint32_t ordinal,
uint32_t descriptor_set, uint32_t binding,
uint32_t offset, uint32_t size, uint32_t spec_id,
uint32_t elem_size);
private:
static char ID;
Module *module;
// Set of Capabilities required
CapabilitySetType CapabilitySet;
// Map from clspv::BuiltinType to SPIRV Global Variable
BuiltinConstantMapType BuiltinConstantMap;
ArrayRef<std::pair<unsigned, std::string>> samplerMap;
raw_pwrite_stream &out;
// TODO(dneto): Wouldn't it be better to always just emit a binary, and then
// convert to other formats on demand?
// When emitting a C initialization list, the WriteSPIRVBinary method
// will actually write its words to this vector via binaryTempOut.
SmallVector<char, 100> binaryTempUnderlyingVector;
raw_svector_ostream binaryTempOut;
// Binary output writes to this stream, which might be |out| or
// |binaryTempOut|. It's the latter when we really want to write a C
// initializer list.
raw_pwrite_stream *binaryOut;
const bool outputCInitList; // If true, output look like {0x7023, ... , 5}
uint64_t patchBoundOffset;
uint32_t nextID;
SPIRVID incrNextID() { return nextID++; }
// ID for OpTypeInt 32 1.
SPIRVID int32ID;
// ID for OpTypeVector %int 4.
SPIRVID v4int32ID;
// Maps an LLVM Value pointer to the corresponding SPIR-V Id.
TypeMapType TypeMap;
// Maps an LLVM image type to its SPIR-V ID.
TypeMapType ImageTypeMap;
// A unique-vector of LLVM types that map to a SPIR-V type.
TypeList Types;
// Maps an LLVM Value pointer to the corresponding SPIR-V Id.
ValueMapType ValueMap;
SPIRVInstructionList SPIRVSections[kSectionCount];
EntryPointVecType EntryPointVec;
DeferredInstVecType DeferredInstVec;
SPIRVIDListType EntryPointInterfacesList;
SPIRVID OpExtInstImportID;
std::vector<SPIRVID> BuiltinDimensionVec;
bool HasVariablePointersStorageBuffer;
bool HasVariablePointers;
Type *SamplerTy;
DenseMap<unsigned, SPIRVID> SamplerLiteralToIDMap;
// If a function F has a pointer-to-__constant parameter, then this variable
// will map F's type to (G, index of the parameter), where in a first phase
// G is F's type.
// TODO(dneto): This doesn't seem general enough? A function might have
// more than one such parameter.
GlobalConstFuncMapType GlobalConstFuncTypeMap;
SmallPtrSet<Value *, 16> GlobalConstArgumentSet;
// An ordered set of pointer types of Base arguments to OpPtrAccessChain,
// or array types, and which point into transparent memory (StorageBuffer
// storage class). These will require an ArrayStride decoration.
// See SPV_KHR_variable_pointers rev 13.
TypeList TypesNeedingArrayStride;
// This is truly ugly, but works around what look like driver bugs.
// For get_local_size, an earlier part of the flow has created a module-scope
// variable in Private address space to hold the value for the workgroup
// size. Its intializer is a uint3 value marked as builtin WorkgroupSize.
// When this is present, save the IDs of the initializer value and variable
// in these two variables. We only ever do a vector load from it, and
// when we see one of those, substitute just the value of the intializer.
// This mimics what Glslang does, and that's what drivers are used to.
// TODO(dneto): Remove this once drivers are fixed.
SPIRVID WorkgroupSizeValueID;
SPIRVID WorkgroupSizeVarID;
// Bookkeeping for mapping kernel arguments to resource variables.
struct ResourceVarInfo {
ResourceVarInfo(int index_arg, unsigned set_arg, unsigned binding_arg,
Function *fn, clspv::ArgKind arg_kind_arg, int coherent_arg)
: index(index_arg), descriptor_set(set_arg), binding(binding_arg),
var_fn(fn), arg_kind(arg_kind_arg), coherent(coherent_arg),
addr_space(fn->getReturnType()->getPointerAddressSpace()) {}
const int index; // Index into ResourceVarInfoList
const unsigned descriptor_set;
const unsigned binding;
Function *const var_fn; // The @clspv.resource.var.* function.
const clspv::ArgKind arg_kind;
const int coherent;
const unsigned addr_space; // The LLVM address space
// The SPIR-V ID of the OpVariable. Not populated at construction time.
SPIRVID var_id;
};
// A list of resource var info. Each one correponds to a module-scope
// resource variable we will have to create. Resource var indices are
// indices into this vector.
SmallVector<std::unique_ptr<ResourceVarInfo>, 8> ResourceVarInfoList;
// This is a vector of pointers of all the resource vars, but ordered by
// kernel function, and then by argument.
UniqueVector<ResourceVarInfo *> ModuleOrderedResourceVars;
// Map a function to the ordered list of resource variables it uses, one for
// each argument. If an argument does not use a resource variable, it
// will have a null pointer entry.
using FunctionToResourceVarsMapType =
DenseMap<Function *, SmallVector<ResourceVarInfo *, 8>>;
FunctionToResourceVarsMapType FunctionToResourceVarsMap;
// What LLVM types map to SPIR-V types needing layout? These are the
// arrays and structures supporting storage buffers and uniform buffers.
TypeList TypesNeedingLayout;
// What LLVM struct types map to a SPIR-V struct type with Block decoration?
UniqueVector<StructType *> StructTypesNeedingBlock;
// For a call that represents a load from an opaque type (samplers, images),
// map it to the variable id it should load from.
DenseMap<CallInst *, SPIRVID> ResourceVarDeferredLoadCalls;
// An ordered list of the kernel arguments of type pointer-to-local.
using LocalArgList = SmallVector<Argument *, 8>;
LocalArgList LocalArgs;
// Information about a pointer-to-local argument.
struct LocalArgInfo {
// The SPIR-V ID of the array variable.
SPIRVID variable_id;
// The element type of the
Type *elem_type;
// The ID of the array type.
SPIRVID array_size_id;
// The ID of the array type.
SPIRVID array_type_id;
// The ID of the pointer to the array type.
SPIRVID ptr_array_type_id;
// The specialization constant ID of the array size.
int spec_id;
};
// A mapping from Argument to its assigned SpecId.
DenseMap<const Argument *, int> LocalArgSpecIds;
// A mapping from SpecId to its LocalArgInfo.
DenseMap<int, LocalArgInfo> LocalSpecIdInfoMap;
// A mapping from a remapped type to its real offsets.
DenseMap<Type *, std::vector<uint32_t>> RemappedUBOTypeOffsets;
// A mapping from a remapped type to its real sizes.
DenseMap<Type *, std::tuple<uint64_t, uint64_t, uint64_t>>
RemappedUBOTypeSizes;
// Maps basic block to its merge block.
DenseMap<BasicBlock *, BasicBlock *> MergeBlocks;
// Maps basic block to its continue block.
DenseMap<BasicBlock *, BasicBlock *> ContinueBlocks;
SPIRVID ReflectionID;
DenseMap<Function *, SPIRVID> KernelDeclarations;
public:
static SPIRVProducerPass *Ptr;
};
char SPIRVProducerPass::ID;
SPIRVProducerPass *SPIRVProducerPass::Ptr = nullptr;
} // namespace
namespace clspv {
ModulePass *
createSPIRVProducerPass(raw_pwrite_stream &out,
ArrayRef<std::pair<unsigned, std::string>> samplerMap,
bool outputCInitList) {
return new SPIRVProducerPass(out, samplerMap, outputCInitList);
}
} // namespace clspv
namespace {
SPIRVOperandVec &operator<<(SPIRVOperandVec &list, uint32_t num) {
list.emplace_back(LITERAL_WORD, num);
return list;
}
SPIRVOperandVec &operator<<(SPIRVOperandVec &list, int32_t num) {
list.emplace_back(LITERAL_WORD, static_cast<uint32_t>(num));
return list;
}
SPIRVOperandVec &operator<<(SPIRVOperandVec &list, ArrayRef<uint32_t> num_vec) {
list.emplace_back(num_vec);
return list;
}
SPIRVOperandVec &operator<<(SPIRVOperandVec &list, StringRef str) {
list.emplace_back(LITERAL_STRING, str);
return list;
}
SPIRVOperandVec &operator<<(SPIRVOperandVec &list, Type *t) {
list.emplace_back(NUMBERID, SPIRVProducerPass::Ptr->getSPIRVType(t).get());
return list;
}
SPIRVOperandVec &operator<<(SPIRVOperandVec &list, Value *v) {
list.emplace_back(NUMBERID, SPIRVProducerPass::Ptr->getSPIRVValue(v).get());
return list;
}
SPIRVOperandVec &operator<<(SPIRVOperandVec &list, const SPIRVID &v) {
list.emplace_back(NUMBERID, v.get());
return list;
}
} // namespace
bool SPIRVProducerPass::runOnModule(Module &M) {
// TODO(sjw): Need to reset all data members for each Module, or better
// yet create a new SPIRVProducer for every module.. For now only
// allow 1 call.
assert(module == nullptr);
module = &M;
if (ShowProducerIR) {
llvm::outs() << *module << "\n";
}
binaryOut = outputCInitList ? &binaryTempOut : &out;
PopulateUBOTypeMaps();
PopulateStructuredCFGMaps();
// SPIR-V always begins with its header information
outputHeader();
// Gather information from the LLVM IR that we require.
GenerateLLVMIRInfo();
// Collect information on global variables too.
for (GlobalVariable &GV : module->globals()) {
// If the GV is one of our special __spirv_* variables, remove the
// initializer as it was only placed there to force LLVM to not throw the
// value away.
if (GV.getName().startswith("__spirv_") ||
GV.getAddressSpace() == clspv::AddressSpace::PushConstant) {
GV.setInitializer(nullptr);
}
// Collect types' information from global variable.
FindTypePerGlobalVar(GV);
}
// Generate SPIRV instructions for types.
GenerateSPIRVTypes();
// Generate literal samplers if necessary.
GenerateSamplers();
// Generate SPIRV variables.
for (GlobalVariable &GV : module->globals()) {
GenerateGlobalVar(GV);
}
GenerateResourceVars();
GenerateWorkgroupVars();
// Generate SPIRV instructions for each function.
for (Function &F : *module) {
if (F.isDeclaration()) {
continue;
}
// Generate Function Prologue.
GenerateFuncPrologue(F);
// Generate SPIRV instructions for function body.
GenerateFuncBody(F);
// Generate Function Epilogue.
GenerateFuncEpilogue();
}
HandleDeferredInstruction();
HandleDeferredDecorations();
// Generate SPIRV module information.
GenerateModuleInfo();
// Generate embedded reflection information.
GenerateReflection();
WriteSPIRVBinary();
// We need to patch the SPIR-V header to set bound correctly.
patchHeader();
if (outputCInitList) {
bool first = true;
std::ostringstream os;
auto emit_word = [&os, &first](uint32_t word) {
if (!first)
os << ",\n";
os << word;
first = false;
};
os << "{";
const std::string str(binaryTempOut.str());
for (unsigned i = 0; i < str.size(); i += 4) {
const uint32_t a = static_cast<unsigned char>(str[i]);
const uint32_t b = static_cast<unsigned char>(str[i + 1]);
const uint32_t c = static_cast<unsigned char>(str[i + 2]);
const uint32_t d = static_cast<unsigned char>(str[i + 3]);
emit_word(a | (b << 8) | (c << 16) | (d << 24));
}
os << "}\n";
out << os.str();
}
return false;
}
void SPIRVProducerPass::outputHeader() {
binaryOut->write(reinterpret_cast<const char *>(&spv::MagicNumber),
sizeof(spv::MagicNumber));
uint32_t minor = 0;
if (SpvVersion() == SPIRVVersion::SPIRV_1_3) {
minor = 3;
}
uint32_t version = (1 << 16) | (minor << 8);
binaryOut->write(reinterpret_cast<const char *>(&version), sizeof(version));
// use Google's vendor ID
const uint32_t vendor = 21 << 16;
binaryOut->write(reinterpret_cast<const char *>(&vendor), sizeof(vendor));
// we record where we need to come back to and patch in the bound value
patchBoundOffset = binaryOut->tell();
// output a bad bound for now
binaryOut->write(reinterpret_cast<const char *>(&nextID), sizeof(nextID));
// output the schema (reserved for use and must be 0)
const uint32_t schema = 0;
binaryOut->write(reinterpret_cast<const char *>(&schema), sizeof(schema));
}
void SPIRVProducerPass::patchHeader() {
// for a binary we just write the value of nextID over bound
binaryOut->pwrite(reinterpret_cast<char *>(&nextID), sizeof(nextID),
patchBoundOffset);
}
void SPIRVProducerPass::GenerateLLVMIRInfo() {
// This function generates LLVM IR for function such as global variable for
// argument, constant and pointer type for argument access. These information
// is artificial one because we need Vulkan SPIR-V output. This function is
// executed ahead of FindType and FindConstant.
FindGlobalConstVars();
FindResourceVars();
FindTypesForSamplerMap();
FindTypesForResourceVars();
}
void SPIRVProducerPass::FindGlobalConstVars() {
clspv::NormalizeGlobalVariables(*module);
const DataLayout &DL = module->getDataLayout();
SmallVector<GlobalVariable *, 8> GVList;
SmallVector<GlobalVariable *, 8> DeadGVList;
for (GlobalVariable &GV : module->globals()) {
if (GV.getType()->getAddressSpace() == AddressSpace::Constant) {
if (GV.use_empty()) {
DeadGVList.push_back(&GV);
} else {
GVList.push_back(&GV);
}
}
}
// Remove dead global __constant variables.
for (auto GV : DeadGVList) {
GV->eraseFromParent();
}
DeadGVList.clear();
if (clspv::Option::ModuleConstantsInStorageBuffer()) {
// For now, we only support a single storage buffer.
if (!GVList.empty()) {
assert(GVList.size() == 1);
const auto *GV = GVList[0];
const auto constants_byte_size =
(GetTypeSizeInBits(GV->getInitializer()->getType(), DL)) / 8;
const size_t kConstantMaxSize = 65536;
if (constants_byte_size > kConstantMaxSize) {
outs() << "Max __constant capacity of " << kConstantMaxSize
<< " bytes exceeded: " << constants_byte_size << " bytes used\n";
llvm_unreachable("Max __constant capacity exceeded");
}
}
} else {
// Change global constant variable's address space to ModuleScopePrivate.
auto &GlobalConstFuncTyMap = getGlobalConstFuncTypeMap();
for (auto GV : GVList) {
// Create new gv with ModuleScopePrivate address space.
Type *NewGVTy = GV->getType()->getPointerElementType();
GlobalVariable *NewGV = new GlobalVariable(
*module, NewGVTy, false, GV->getLinkage(), GV->getInitializer(), "",
nullptr, GV->getThreadLocalMode(), AddressSpace::ModuleScopePrivate);
NewGV->takeName(GV);
const SmallVector<User *, 8> GVUsers(GV->user_begin(), GV->user_end());
SmallVector<User *, 8> CandidateUsers;
auto record_called_function_type_as_user =
[&GlobalConstFuncTyMap](Value *gv, CallInst *call) {
// Find argument index.
unsigned index = 0;
for (unsigned i = 0; i < call->getNumArgOperands(); i++) {
if (gv == call->getOperand(i)) {
// TODO(dneto): Should we break here?
index = i;
}
}
// Record function type with global constant.
GlobalConstFuncTyMap[call->getFunctionType()] =
std::make_pair(call->getFunctionType(), index);
};
for (User *GVU : GVUsers) {
if (CallInst *Call = dyn_cast<CallInst>(GVU)) {
record_called_function_type_as_user(GV, Call);
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(GVU)) {
// Check GEP users.
for (User *GEPU : GEP->users()) {
if (CallInst *GEPCall = dyn_cast<CallInst>(GEPU)) {
record_called_function_type_as_user(GEP, GEPCall);
}
}
}
CandidateUsers.push_back(GVU);
}
for (User *U : CandidateUsers) {
// Update users of gv with new gv.
if (!isa<Constant>(U)) {
// #254: Can't change operands of a constant, but this shouldn't be
// something that sticks around in the module.
U->replaceUsesOfWith(GV, NewGV);
}
}
// Delete original gv.
GV->eraseFromParent();
}
}
}
void SPIRVProducerPass::FindResourceVars() {
ResourceVarInfoList.clear();
FunctionToResourceVarsMap.clear();
ModuleOrderedResourceVars.reset();
// Normally, there is one resource variable per clspv.resource.var.*
// function, since that is unique'd by arg type and index. By design,
// we can share these resource variables across kernels because all
// kernels use the same descriptor set.
//
// But if the user requested distinct descriptor sets per kernel, then
// the descriptor allocator has made different (set,binding) pairs for
// the same (type,arg_index) pair. Since we can decorate a resource
// variable with only exactly one DescriptorSet and Binding, we are
// forced in this case to make distinct resource variables whenever
// the same clspv.resource.var.X function is seen with disintct
// (set,binding) values.
const bool always_distinct_sets =
clspv::Option::DistinctKernelDescriptorSets();
for (Function &F : *module) {
// Rely on the fact the resource var functions have a stable ordering
// in the module.
if (Builtins::Lookup(&F) == Builtins::kClspvResource) {
// Find all calls to this function with distinct set and binding pairs.
// Save them in ResourceVarInfoList.
// Determine uniqueness of the (set,binding) pairs only withing this
// one resource-var builtin function.
using SetAndBinding = std::pair<unsigned, unsigned>;
// Maps set and binding to the resource var info.
DenseMap<SetAndBinding, ResourceVarInfo *> set_and_binding_map;
bool first_use = true;
for (auto &U : F.uses()) {
if (auto *call = dyn_cast<CallInst>(U.getUser())) {
const auto set = unsigned(
dyn_cast<ConstantInt>(call->getArgOperand(0))->getZExtValue());
const auto binding = unsigned(
dyn_cast<ConstantInt>(call->getArgOperand(1))->getZExtValue());
const auto arg_kind = clspv::ArgKind(
dyn_cast<ConstantInt>(call->getArgOperand(2))->getZExtValue());
const auto arg_index = unsigned(
dyn_cast<ConstantInt>(call->getArgOperand(3))->getZExtValue());
const auto coherent = unsigned(
dyn_cast<ConstantInt>(call->getArgOperand(5))->getZExtValue());
// Find or make the resource var info for this combination.
ResourceVarInfo *rv = nullptr;
if (always_distinct_sets) {
// Make a new resource var any time we see a different
// (set,binding) pair.
SetAndBinding key{set, binding};
auto where = set_and_binding_map.find(key);
if (where == set_and_binding_map.end()) {
rv = new ResourceVarInfo(
static_cast<int>(ResourceVarInfoList.size()), set, binding,
&F, arg_kind, coherent);
ResourceVarInfoList.emplace_back(rv);
set_and_binding_map[key] = rv;
} else {
rv = where->second;
}
} else {
// The default is to make exactly one resource for each
// clspv.resource.var.* function.
if (first_use) {
first_use = false;
rv = new ResourceVarInfo(
static_cast<int>(ResourceVarInfoList.size()), set, binding,
&F, arg_kind, coherent);
ResourceVarInfoList.emplace_back(rv);
} else {
rv = ResourceVarInfoList.back().get();
}
}
// Now populate FunctionToResourceVarsMap.
auto &mapping =
FunctionToResourceVarsMap[call->getParent()->getParent()];
while (mapping.size() <= arg_index) {
mapping.push_back(nullptr);
}
mapping[arg_index] = rv;
}
}
}
}
// Populate ModuleOrderedResourceVars.
for (Function &F : *module) {
auto where = FunctionToResourceVarsMap.find(&F);
if (where != FunctionToResourceVarsMap.end()) {
for (auto &rv : where->second) {
if (rv != nullptr) {
ModuleOrderedResourceVars.insert(rv);
}
}
}
}
if (ShowResourceVars) {
for (auto *info : ModuleOrderedResourceVars) {
outs() << "MORV index " << info->index << " (" << info->descriptor_set
<< "," << info->binding << ") " << *(info->var_fn->getReturnType())
<< "\n";
}
}
}
void SPIRVProducerPass::FindTypePerGlobalVar(GlobalVariable &GV) {
// Investigate global variable's type.
FindType(GV.getType());
}
void SPIRVProducerPass::FindTypesForSamplerMap() {
// If we are using a sampler map, find the type of the sampler.
if (module->getFunction(clspv::LiteralSamplerFunction()) ||
!getSamplerMap().empty()) {
auto SamplerStructTy =
StructType::getTypeByName(module->getContext(), "opencl.sampler_t");
if (!SamplerStructTy) {
SamplerStructTy =
StructType::create(module->getContext(), "opencl.sampler_t");
}
SamplerTy = SamplerStructTy->getPointerTo(AddressSpace::UniformConstant);
FindType(SamplerTy);
}
}
void SPIRVProducerPass::FindTypesForResourceVars() {
// Record types so they are generated.
TypesNeedingLayout.reset();
StructTypesNeedingBlock.reset();
// To match older clspv codegen, generate the float type first if required
// for images.
for (const auto *info : ModuleOrderedResourceVars) {
if (info->arg_kind == clspv::ArgKind::SampledImage ||
info->arg_kind == clspv::ArgKind::StorageImage) {
if (IsIntImageType(info->var_fn->getReturnType())) {
// Nothing for now...
} else if (IsUintImageType(info->var_fn->getReturnType())) {
FindType(Type::getInt32Ty(module->getContext()));
}
// We need "float" either for the sampled type or for the Lod operand.
FindType(Type::getFloatTy(module->getContext()));
}
}
for (const auto *info : ModuleOrderedResourceVars) {
Type *type = info->var_fn->getReturnType();
switch (info->arg_kind) {
case clspv::ArgKind::Buffer:
case clspv::ArgKind::BufferUBO:
if (auto *sty = dyn_cast<StructType>(type->getPointerElementType())) {
StructTypesNeedingBlock.insert(sty);
} else {
errs() << *type << "\n";
llvm_unreachable("Buffer arguments must map to structures!");
}
break;
case clspv::ArgKind::Pod:
case clspv::ArgKind::PodUBO:
case clspv::ArgKind::PodPushConstant:
if (auto *sty = dyn_cast<StructType>(type->getPointerElementType())) {
StructTypesNeedingBlock.insert(sty);
} else {
errs() << *type << "\n";
llvm_unreachable("POD arguments must map to structures!");
}
break;
case clspv::ArgKind::SampledImage:
case clspv::ArgKind::StorageImage:
case clspv::ArgKind::Sampler:
// Sampler and image types map to the pointee type but
// in the uniform constant address space.
type = PointerType::get(type->getPointerElementType(),
clspv::AddressSpace::UniformConstant);
break;
default:
break;
}
// The converted type is the type of the OpVariable we will generate.
// If the pointee type is an array of size zero, FindType will convert it
// to a runtime array.
FindType(type);
}
// If module constants are clustered in a storage buffer then that struct
// needs layout decorations.
if (clspv::Option::ModuleConstantsInStorageBuffer()) {
for (GlobalVariable &GV : module->globals()) {
PointerType *PTy = cast<PointerType>(GV.getType());
const auto AS = PTy->getAddressSpace();
const bool module_scope_constant_external_init =
(AS == AddressSpace::Constant) && GV.hasInitializer();
const spv::BuiltIn BuiltinType = GetBuiltin(GV.getName());
if (module_scope_constant_external_init &&
spv::BuiltInMax == BuiltinType) {
StructTypesNeedingBlock.insert(
cast<StructType>(PTy->getPointerElementType()));
}
}
}
for (const GlobalVariable &GV : module->globals()) {
if (GV.getAddressSpace() == clspv::AddressSpace::PushConstant) {
auto Ty = cast<PointerType>(GV.getType())->getPointerElementType();
assert(Ty->isStructTy() && "Push constants have to be structures.");
auto STy = cast<StructType>(Ty);
StructTypesNeedingBlock.insert(STy);
}
}
// Traverse the arrays and structures underneath each Block, and
// mark them as needing layout.
std::vector<Type *> work_list(StructTypesNeedingBlock.begin(),
StructTypesNeedingBlock.end());
while (!work_list.empty()) {
Type *type = work_list.back();
work_list.pop_back();
TypesNeedingLayout.insert(type);
switch (type->getTypeID()) {
case Type::ArrayTyID:
work_list.push_back(type->getArrayElementType());
if (!Hack_generate_runtime_array_stride_early) {
// Remember this array type for deferred decoration.
TypesNeedingArrayStride.insert(type);
}
break;
case Type::StructTyID:
for (auto *elem_ty : cast<StructType>(type)->elements()) {
work_list.push_back(elem_ty);
}
default:
// This type and its contained types don't get layout.
break;
}
}
}
void SPIRVProducerPass::GenerateWorkgroupVars() {
// The SpecId assignment for pointer-to-local arguments is recorded in
// module-level metadata. Translate that information into local argument
// information.
LLVMContext &Context = module->getContext();
NamedMDNode *nmd = module->getNamedMetadata(clspv::LocalSpecIdMetadataName());
if (!nmd)
return;
for (auto operand : nmd->operands()) {
MDTuple *tuple = cast<MDTuple>(operand);
ValueAsMetadata *fn_md = cast<ValueAsMetadata>(tuple->getOperand(0));
Function *func = cast<Function>(fn_md->getValue());
ConstantAsMetadata *arg_index_md =
cast<ConstantAsMetadata>(tuple->getOperand(1));
int arg_index = static_cast<int>(
cast<ConstantInt>(arg_index_md->getValue())->getSExtValue());
Argument *arg = &*(func->arg_begin() + arg_index);
ConstantAsMetadata *spec_id_md =
cast<ConstantAsMetadata>(tuple->getOperand(2));
int spec_id = static_cast<int>(
cast<ConstantInt>(spec_id_md->getValue())->getSExtValue());
LocalArgSpecIds[arg] = spec_id;
if (LocalSpecIdInfoMap.count(spec_id))
continue;
// Generate the spec constant.
SPIRVOperandVec Ops;
Ops << Type::getInt32Ty(Context) << 1;
SPIRVID ArraySizeID = addSPIRVInst<kConstants>(spv::OpSpecConstant, Ops);
// Generate the array type.
Type *ElemTy = arg->getType()->getPointerElementType();
Ops.clear();
// The element type must have been created.
Ops << ElemTy << ArraySizeID;
SPIRVID ArrayTypeID = addSPIRVInst<kTypes>(spv::OpTypeArray, Ops);
Ops.clear();
Ops << spv::StorageClassWorkgroup << ArrayTypeID;
SPIRVID PtrArrayTypeID = addSPIRVInst<kTypes>(spv::OpTypePointer, Ops);
// Generate OpVariable.
//
// Ops[0] : Result Type ID
// Ops[1] : Storage Class
SPIRVID VariableID =
addSPIRVGlobalVariable(PtrArrayTypeID, spv::StorageClassWorkgroup);
Ops.clear();
Ops << ArraySizeID << spv::DecorationSpecId << spec_id;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
LocalArgInfo info{VariableID, ElemTy, ArraySizeID,
ArrayTypeID, PtrArrayTypeID, spec_id};
LocalSpecIdInfoMap[spec_id] = info;
}
}
void SPIRVProducerPass::FindType(Type *Ty) {
TypeList &TyList = getTypeList();
if (0 != TyList.idFor(Ty)) {
return;
}
if (Ty->isPointerTy()) {
auto AddrSpace = Ty->getPointerAddressSpace();
if ((AddressSpace::Constant == AddrSpace) ||
(AddressSpace::Global == AddrSpace)) {
auto PointeeTy = Ty->getPointerElementType();
if (PointeeTy->isStructTy() &&
dyn_cast<StructType>(PointeeTy)->isOpaque()) {
FindType(PointeeTy);
auto ActualPointerTy =
PointeeTy->getPointerTo(AddressSpace::UniformConstant);
FindType(ActualPointerTy);
return;
}
}
}
// By convention, LLVM array type with 0 elements will map to
// OpTypeRuntimeArray. Otherwise, it will map to OpTypeArray, which
// has a constant number of elements. We need to support type of the
// constant.
if (auto *arrayTy = dyn_cast<ArrayType>(Ty)) {
if (arrayTy->getNumElements() > 0) {
LLVMContext &Context = Ty->getContext();
FindType(Type::getInt32Ty(Context));
}
}
for (Type *SubTy : Ty->subtypes()) {
FindType(SubTy);
}
TyList.insert(Ty);
}
spv::StorageClass SPIRVProducerPass::GetStorageClass(unsigned AddrSpace) const {
switch (AddrSpace) {
default:
llvm_unreachable("Unsupported OpenCL address space");
case AddressSpace::Private:
return spv::StorageClassFunction;
case AddressSpace::Global:
return spv::StorageClassStorageBuffer;
case AddressSpace::Constant:
return clspv::Option::ConstantArgsInUniformBuffer()
? spv::StorageClassUniform
: spv::StorageClassStorageBuffer;
case AddressSpace::Input:
return spv::StorageClassInput;
case AddressSpace::Local:
return spv::StorageClassWorkgroup;
case AddressSpace::UniformConstant:
return spv::StorageClassUniformConstant;
case AddressSpace::Uniform:
return spv::StorageClassUniform;
case AddressSpace::ModuleScopePrivate:
return spv::StorageClassPrivate;
case AddressSpace::PushConstant:
return spv::StorageClassPushConstant;
}
}
spv::StorageClass
SPIRVProducerPass::GetStorageClassForArgKind(clspv::ArgKind arg_kind) const {
switch (arg_kind) {
case clspv::ArgKind::Buffer:
return spv::StorageClassStorageBuffer;
case clspv::ArgKind::BufferUBO:
return spv::StorageClassUniform;
case clspv::ArgKind::Pod:
return spv::StorageClassStorageBuffer;
case clspv::ArgKind::PodUBO:
return spv::StorageClassUniform;
case clspv::ArgKind::PodPushConstant:
return spv::StorageClassPushConstant;
case clspv::ArgKind::Local:
return spv::StorageClassWorkgroup;
case clspv::ArgKind::SampledImage:
case clspv::ArgKind::StorageImage:
case clspv::ArgKind::Sampler:
return spv::StorageClassUniformConstant;
default:
llvm_unreachable("Unsupported storage class for argument kind");
}
}
spv::BuiltIn SPIRVProducerPass::GetBuiltin(StringRef Name) const {
return StringSwitch<spv::BuiltIn>(Name)
.Case("__spirv_GlobalInvocationId", spv::BuiltInGlobalInvocationId)
.Case("__spirv_LocalInvocationId", spv::BuiltInLocalInvocationId)
.Case("__spirv_WorkgroupSize", spv::BuiltInWorkgroupSize)
.Case("__spirv_NumWorkgroups", spv::BuiltInNumWorkgroups)
.Case("__spirv_WorkgroupId", spv::BuiltInWorkgroupId)
.Case("__spirv_WorkDim", spv::BuiltInWorkDim)
.Case("__spirv_GlobalOffset", spv::BuiltInGlobalOffset)
.Default(spv::BuiltInMax);
}
SPIRVID SPIRVProducerPass::getOpExtInstImportID() {
if (OpExtInstImportID == 0) {
//
// Generate OpExtInstImport.
//
// Ops[0] ... Ops[n] = Name (Literal String)
OpExtInstImportID =
addSPIRVInst<kImports>(spv::OpExtInstImport, "GLSL.std.450");
}
return OpExtInstImportID;
}
SPIRVID SPIRVProducerPass::addSPIRVGlobalVariable(const SPIRVID &TypeID,
spv::StorageClass SC,
const SPIRVID &InitID) {
// Generate OpVariable.
//
// Ops[0] : Result Type ID
// Ops[1] : Storage Class
// Ops[2] : Initialization Value ID (optional)
SPIRVOperandVec Ops;
Ops << TypeID << SC;
if (InitID.isValid()) {
Ops << InitID;
}
SPIRVID VID = addSPIRVInst<kGlobalVariables>(spv::OpVariable, Ops);
if (SC == spv::StorageClassInput) {
getEntryPointInterfacesList().push_back(VID);
}
return VID;
}
Type *SPIRVProducerPass::CanonicalType(Type *type) {
if (type->getNumContainedTypes() != 0) {
switch (type->getTypeID()) {
case Type::PointerTyID: {
// For the purposes of our Vulkan SPIR-V type system, constant and global
// are conflated.
auto *ptr_ty = cast<PointerType>(type);
unsigned AddrSpace = ptr_ty->getAddressSpace();
if (AddressSpace::Constant == AddrSpace) {
if (!clspv::Option::ConstantArgsInUniformBuffer()) {
AddrSpace = AddressSpace::Global;
// The canonical type of __constant is __global unless constants are
// passed in uniform buffers.
auto *GlobalTy =
ptr_ty->getPointerElementType()->getPointerTo(AddrSpace);
return GlobalTy;
}
}
break;
}
case Type::StructTyID: {
SmallVector<Type *, 8> subtypes;
bool changed = false;
for (auto *subtype : type->subtypes()) {
auto canonical = CanonicalType(subtype);
subtypes.push_back(canonical);
if (canonical != subtype) {
changed = true;
}
}
if (changed) {
return StructType::get(type->getContext(), subtypes,
cast<StructType>(type)->isPacked());
}
break;
}
case Type::ArrayTyID: {
auto *elem_ty = type->getArrayElementType();
auto *equiv_elem_ty = CanonicalType(elem_ty);
if (equiv_elem_ty != elem_ty) {
return ArrayType::get(equiv_elem_ty,
cast<ArrayType>(type)->getNumElements());
}
break;
}
case Type::FunctionTyID: {
auto *func_ty = cast<FunctionType>(type);
auto *return_ty = CanonicalType(func_ty->getReturnType());
SmallVector<Type *, 8> params;
for (unsigned i = 0; i < func_ty->getNumParams(); ++i) {
params.push_back(CanonicalType(func_ty->getParamType(i)));
}
return FunctionType::get(return_ty, params, func_ty->isVarArg());
}
default:
break;
}
}
return type;
}
SPIRVID SPIRVProducerPass::getSPIRVType(Type *Ty) {
auto TI = TypeMap.find(Ty);
if (TI != TypeMap.end()) {
assert(TI->second.isValid());
return TI->second;
}
auto Canonical = CanonicalType(Ty);
if (Canonical != Ty) {
auto CanonicalTI = TypeMap.find(Canonical);
if (CanonicalTI != TypeMap.end()) {
assert(CanonicalTI->second.isValid());
return CanonicalTI->second;
}
}
// Perform the mapping with the canonical type.
const auto &DL = module->getDataLayout();
SPIRVID RID;
switch (Canonical->getTypeID()) {
default: {
Canonical->print(errs());
llvm_unreachable("Unsupported type???");
break;
}
case Type::MetadataTyID:
case Type::LabelTyID: {
// Ignore these types.
break;
}
case Type::PointerTyID: {
PointerType *PTy = cast<PointerType>(Canonical);
unsigned AddrSpace = PTy->getAddressSpace();
if (AddrSpace != AddressSpace::UniformConstant) {
auto PointeeTy = PTy->getElementType();
if (PointeeTy->isStructTy() &&
dyn_cast<StructType>(PointeeTy)->isOpaque()) {
// TODO(sjw): assert always an image?
RID = getSPIRVType(PointeeTy);
break;
}
}
//
// Generate OpTypePointer.
//
// OpTypePointer
// Ops[0] = Storage Class
// Ops[1] = Element Type ID
SPIRVOperandVec Ops;
Ops << GetStorageClass(AddrSpace) << PTy->getElementType();
RID = addSPIRVInst<kTypes>(spv::OpTypePointer, Ops);
break;
}
case Type::StructTyID: {
StructType *STy = cast<StructType>(Canonical);
// Handle sampler type.
if (STy->isOpaque()) {
if (STy->getName().equals("opencl.sampler_t")) {
//
// Generate OpTypeSampler
//
// Empty Ops.
RID = addSPIRVInst<kTypes>(spv::OpTypeSampler);
break;
} else if (STy->getName().startswith("opencl.image1d_ro_t") ||
STy->getName().startswith("opencl.image1d_rw_t") ||
STy->getName().startswith("opencl.image1d_wo_t") ||
STy->getName().startswith("opencl.image1d_array_ro_t") ||
STy->getName().startswith("opencl.image1d_array_rw_t") ||
STy->getName().startswith("opencl.image1d_array_wo_t") ||
STy->getName().startswith("opencl.image2d_ro_t") ||
STy->getName().startswith("opencl.image2d_rw_t") ||
STy->getName().startswith("opencl.image2d_wo_t") ||
STy->getName().startswith("opencl.image2d_array_ro_t") ||
STy->getName().startswith("opencl.image2d_array_rw_t") ||
STy->getName().startswith("opencl.image2d_array_wo_t") ||
STy->getName().startswith("opencl.image3d_ro_t") ||
STy->getName().startswith("opencl.image3d_rw_t") ||
STy->getName().startswith("opencl.image3d_wo_t")) {
if (STy->getName().startswith("opencl.image1d_")) {
if (STy->getName().contains(".sampled"))
addCapability(spv::CapabilitySampled1D);
else
addCapability(spv::CapabilityImage1D);
}
//
// Generate OpTypeImage
//
// Ops[0] = Sampled Type ID
// Ops[1] = Dim ID
// Ops[2] = Depth (Literal Number)
// Ops[3] = Arrayed (Literal Number)
// Ops[4] = MS (Literal Number)
// Ops[5] = Sampled (Literal Number)
// Ops[6] = Image Format ID
//
SPIRVOperandVec Ops;
SPIRVID SampledTyID;
if (STy->getName().contains(".float")) {
SampledTyID = getSPIRVType(Type::getFloatTy(Canonical->getContext()));
} else if (STy->getName().contains(".uint")) {
SampledTyID = getSPIRVType(Type::getInt32Ty(Canonical->getContext()));
} else if (STy->getName().contains(".int")) {
// Generate a signed 32-bit integer if necessary.
if (int32ID == 0) {
SPIRVOperandVec intOps;
intOps << 32 << 1;
int32ID = addSPIRVInst<kTypes>(spv::OpTypeInt, intOps);
}
SampledTyID = int32ID;
// Generate a vec4 of the signed int if necessary.
if (v4int32ID == 0) {
SPIRVOperandVec vecOps;
vecOps << int32ID << 4;
v4int32ID = addSPIRVInst<kTypes>(spv::OpTypeVector, vecOps);
}
} else {
// This was likely an UndefValue.
SampledTyID = getSPIRVType(Type::getFloatTy(Canonical->getContext()));
}
Ops << SampledTyID;
spv::Dim DimID = spv::Dim2D;
if (STy->getName().startswith("opencl.image1d_ro_t") ||
STy->getName().startswith("opencl.image1d_rw_t") ||
STy->getName().startswith("opencl.image1d_wo_t") ||
STy->getName().startswith("opencl.image1d_array_ro_t") ||
STy->getName().startswith("opencl.image1d_array_rw_t") ||
STy->getName().startswith("opencl.image1d_array_wo_t")) {
DimID = spv::Dim1D;
} else if (STy->getName().startswith("opencl.image3d_ro_t") ||
STy->getName().startswith("opencl.image3d_rw_t") ||
STy->getName().startswith("opencl.image3d_wo_t")) {
DimID = spv::Dim3D;
}
Ops << DimID;
// TODO: Set up Depth.
Ops << 0;
uint32_t arrayed = STy->getName().contains("_array_") ? 1 : 0;
Ops << arrayed;
// TODO: Set up MS.
Ops << 0;
// Set up Sampled.
//
// From Spec
//
// 0 indicates this is only known at run time, not at compile time
// 1 indicates will be used with sampler
// 2 indicates will be used without a sampler (a storage image)
uint32_t Sampled = 1;
if (!STy->getName().contains(".sampled")) {
Sampled = 2;
}
Ops << Sampled;
// TODO: Set up Image Format.
Ops << spv::ImageFormatUnknown;
RID = addSPIRVInst<kTypes>(spv::OpTypeImage, Ops);
// Only need a sampled version of the type if it is used with a sampler.
if (Sampled == 1) {
Ops.clear();
Ops << RID;
getImageTypeMap()[Canonical] =
addSPIRVInst<kTypes>(spv::OpTypeSampledImage, Ops);
}
break;
}
}
//
// Generate OpTypeStruct
//
// Ops[0] ... Ops[n] = Member IDs
SPIRVOperandVec Ops;
for (auto *EleTy : STy->elements()) {
Ops << EleTy;
}
RID = addSPIRVInst<kTypes>(spv::OpTypeStruct, Ops);
// Generate OpMemberDecorate unless we are generating it for the canonical
// type.
StructType *canonical = cast<StructType>(CanonicalType(STy));
if (TypesNeedingLayout.idFor(STy) &&
(canonical == STy || !TypesNeedingLayout.idFor(canonical))) {
for (unsigned MemberIdx = 0; MemberIdx < STy->getNumElements();
MemberIdx++) {
// Ops[0] = Structure Type ID
// Ops[1] = Member Index(Literal Number)
// Ops[2] = Decoration (Offset)
// Ops[3] = Byte Offset (Literal Number)
const auto ByteOffset =
GetExplicitLayoutStructMemberOffset(STy, MemberIdx, DL);
Ops.clear();
Ops << RID << MemberIdx << spv::DecorationOffset << ByteOffset;
addSPIRVInst<kAnnotations>(spv::OpMemberDecorate, Ops);
}
}
// Generate OpDecorate unless we are generating it for the canonical type.
if (StructTypesNeedingBlock.idFor(STy) &&
(canonical == STy || !StructTypesNeedingBlock.idFor(canonical))) {
Ops.clear();
// Use Block decorations with StorageBuffer storage class.
Ops << RID << spv::DecorationBlock;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
}
break;
}
case Type::IntegerTyID: {
uint32_t bit_width =
static_cast<uint32_t>(Canonical->getPrimitiveSizeInBits());
if (clspv::Option::Int8Support() && bit_width == 8) {
addCapability(spv::CapabilityInt8);
} else if (bit_width == 16) {
addCapability(spv::CapabilityInt16);
} else if (bit_width == 64) {
addCapability(spv::CapabilityInt64);
}
if (bit_width == 1) {
RID = addSPIRVInst<kTypes>(spv::OpTypeBool);
} else {
if (!clspv::Option::Int8Support() && bit_width == 8) {
// i8 is added to TypeMap as i32.
RID = getSPIRVType(Type::getIntNTy(Canonical->getContext(), 32));
} else {
SPIRVOperandVec Ops;
Ops << bit_width << 0 /* not signed */;
RID = addSPIRVInst<kTypes>(spv::OpTypeInt, Ops);
}
}
break;
}
case Type::HalfTyID:
case Type::FloatTyID:
case Type::DoubleTyID: {
uint32_t bit_width =
static_cast<uint32_t>(Canonical->getPrimitiveSizeInBits());
if (bit_width == 16) {
addCapability(spv::CapabilityFloat16);
} else if (bit_width == 64) {
addCapability(spv::CapabilityFloat64);
}
SPIRVOperandVec Ops;
Ops << bit_width;
RID = addSPIRVInst<kTypes>(spv::OpTypeFloat, Ops);
break;
}
case Type::ArrayTyID: {
ArrayType *ArrTy = cast<ArrayType>(Canonical);
const uint64_t Length = ArrTy->getArrayNumElements();
if (Length == 0) {
// By convention, map it to a RuntimeArray.
Type *EleTy = ArrTy->getArrayElementType();
//
// Generate OpTypeRuntimeArray.
//
// OpTypeRuntimeArray
// Ops[0] = Element Type ID
SPIRVOperandVec Ops;
Ops << EleTy;
RID = addSPIRVInst<kTypes>(spv::OpTypeRuntimeArray, Ops);
if (Hack_generate_runtime_array_stride_early) {
// Generate OpDecorate.
// Ops[0] = Target ID
// Ops[1] = Decoration (ArrayStride)
// Ops[2] = Stride Number(Literal Number)
Ops.clear();
Ops << RID << spv::DecorationArrayStride
<< static_cast<uint32_t>(GetTypeAllocSize(EleTy, DL));
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
}
} else {
//
// Generate OpConstant and OpTypeArray.
//
//
// Generate OpConstant for array length.
//
// Add constant for length to constant list.
Constant *CstLength =
ConstantInt::get(Type::getInt32Ty(module->getContext()), Length);
// Remember to generate ArrayStride later
getTypesNeedingArrayStride().insert(Canonical);
//
// Generate OpTypeArray.
//
// Ops[0] = Element Type ID
// Ops[1] = Array Length Constant ID
SPIRVOperandVec Ops;
Ops << ArrTy->getElementType() << CstLength;
RID = addSPIRVInst<kTypes>(spv::OpTypeArray, Ops);
}
break;
}
case Type::FixedVectorTyID: {
auto VecTy = cast<VectorType>(Canonical);
// <4 x i8> is changed to i32 if i8 is not generally supported.
if (!clspv::Option::Int8Support() &&
VecTy->getElementType() == Type::getInt8Ty(module->getContext())) {
if (VecTy->getElementCount().getKnownMinValue() == 4) {
RID = getSPIRVType(VecTy->getElementType());
break;
} else {
Canonical->print(errs());
llvm_unreachable("Support above i8 vector type");
}
}
// Ops[0] = Component Type ID
// Ops[1] = Component Count (Literal Number)
SPIRVOperandVec Ops;
Ops << VecTy->getElementType()
<< VecTy->getElementCount().getKnownMinValue();
RID = addSPIRVInst<kTypes>(spv::OpTypeVector, Ops);
break;
}
case Type::VoidTyID: {
RID = addSPIRVInst<kTypes>(spv::OpTypeVoid);
break;
}
case Type::FunctionTyID: {
// Generate SPIRV instruction for function type.
FunctionType *FTy = cast<FunctionType>(Canonical);
// Ops[0] = Return Type ID
// Ops[1] ... Ops[n] = Parameter Type IDs
SPIRVOperandVec Ops;
// Find SPIRV instruction for return type
Ops << FTy->getReturnType();
// Find SPIRV instructions for parameter types
for (unsigned k = 0; k < FTy->getNumParams(); k++) {
// Find SPIRV instruction for parameter type.
auto ParamTy = FTy->getParamType(k);
if (ParamTy->isPointerTy()) {
auto PointeeTy = ParamTy->getPointerElementType();
if (PointeeTy->isStructTy() &&
dyn_cast<StructType>(PointeeTy)->isOpaque()) {
ParamTy = PointeeTy;
}
}
Ops << ParamTy;
}
RID = addSPIRVInst<kTypes>(spv::OpTypeFunction, Ops);
break;
}
}
if (RID.isValid()) {
TypeMap[Canonical] = RID;
if (Ty != Canonical) {
// Speed up future lookups of this type by also caching the non-canonical
// type.
TypeMap[Ty] = RID;
}
}
return RID;
}
void SPIRVProducerPass::GenerateSPIRVTypes() {
for (Type *Ty : getTypeList()) {
getSPIRVType(Ty);
}
}
SPIRVID SPIRVProducerPass::getSPIRVInt32Constant(uint32_t CstVal) {
Type *i32 = Type::getInt32Ty(module->getContext());
Constant *Cst = ConstantInt::get(i32, CstVal);
return getSPIRVValue(Cst);
}
SPIRVID SPIRVProducerPass::getSPIRVConstant(Constant *Cst) {
ValueMapType &VMap = getValueMap();
const bool hack_undef = clspv::Option::HackUndef();
SPIRVID RID;
//
// Generate OpConstant.
//
// Ops[0] = Result Type ID
// Ops[1] .. Ops[n] = Values LiteralNumber
SPIRVOperandVec Ops;
Ops << Cst->getType();
std::vector<uint32_t> LiteralNum;
spv::Op Opcode = spv::OpNop;
if (isa<UndefValue>(Cst)) {
// Ops[0] = Result Type ID
Opcode = spv::OpUndef;
if (hack_undef && IsTypeNullable(Cst->getType())) {
Opcode = spv::OpConstantNull;
}
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(Cst)) {
unsigned bit_width = CI->getBitWidth();
if (bit_width == 1) {
// If the bitwidth of constant is 1, generate OpConstantTrue or
// OpConstantFalse.
if (CI->getZExtValue()) {
// Ops[0] = Result Type ID
Opcode = spv::OpConstantTrue;
} else {
// Ops[0] = Result Type ID
Opcode = spv::OpConstantFalse;
}
} else {
auto V = CI->getZExtValue();
LiteralNum.push_back(V & 0xFFFFFFFF);
if (bit_width > 32) {
LiteralNum.push_back(V >> 32);
}
Opcode = spv::OpConstant;
Ops << LiteralNum;
}
} else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Cst)) {
uint64_t FPVal = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
Type *CFPTy = CFP->getType();
if (CFPTy->isFloatTy()) {
LiteralNum.push_back(FPVal & 0xFFFFFFFF);
} else if (CFPTy->isDoubleTy()) {
LiteralNum.push_back(FPVal & 0xFFFFFFFF);
LiteralNum.push_back(FPVal >> 32);
} else if (CFPTy->isHalfTy()) {
LiteralNum.push_back(FPVal & 0xFFFF);
} else {
CFPTy->print(errs());
llvm_unreachable("Implement this ConstantFP Type");
}
Opcode = spv::OpConstant;
Ops << LiteralNum;
} else if (isa<ConstantDataSequential>(Cst) &&
cast<ConstantDataSequential>(Cst)->isString()) {
Cst->print(errs());
llvm_unreachable("Implement this Constant");
} else if (const ConstantDataSequential *CDS =
dyn_cast<ConstantDataSequential>(Cst)) {
// Let's convert <4 x i8> constant to int constant specially.
// This case occurs when all the values are specified as constant
// ints.
Type *CstTy = Cst->getType();
if (is4xi8vec(CstTy)) {
//
// Generate OpConstant with OpTypeInt 32 0.
//
uint32_t IntValue = 0;
for (unsigned k = 0; k < 4; k++) {
const uint64_t Val = CDS->getElementAsInteger(k);
IntValue = (IntValue << 8) | (Val & 0xffu);
}
RID = getSPIRVInt32Constant(IntValue);
} else {
// A normal constant-data-sequential case.
for (unsigned k = 0; k < CDS->getNumElements(); k++) {
Ops << CDS->getElementAsConstant(k);
}
Opcode = spv::OpConstantComposite;
}
} else if (const ConstantAggregate *CA = dyn_cast<ConstantAggregate>(Cst)) {
// Let's convert <4 x i8> constant to int constant specially.
// This case occurs when at least one of the values is an undef.
Type *CstTy = Cst->getType();
if (is4xi8vec(CstTy)) {
//
// Generate OpConstant with OpTypeInt 32 0.
//
uint32_t IntValue = 0;
for (User::const_op_iterator I = Cst->op_begin(), E = Cst->op_end();
I != E; ++I) {
uint64_t Val = 0;
const Value *CV = *I;
if (auto *CI2 = dyn_cast<ConstantInt>(CV)) {
Val = CI2->getZExtValue();
}
IntValue = (IntValue << 8) | (Val & 0xffu);
}
RID = getSPIRVInt32Constant(IntValue);
} else {
// We use a constant composite in SPIR-V for our constant aggregate in
// LLVM.
Opcode = spv::OpConstantComposite;
for (unsigned k = 0; k < CA->getNumOperands(); k++) {
// And add an operand to the composite we are constructing
Ops << CA->getAggregateElement(k);
}
}
} else if (Cst->isNullValue()) {
Opcode = spv::OpConstantNull;
} else {
Cst->print(errs());
llvm_unreachable("Unsupported Constant???");
}
if (Opcode == spv::OpConstantNull && Cst->getType()->isPointerTy()) {
// Null pointer requires variable pointers.
setVariablePointersCapabilities(Cst->getType()->getPointerAddressSpace());
}
if (RID == 0) {
RID = addSPIRVInst<kConstants>(Opcode, Ops);
}
VMap[Cst] = RID;
return RID;
}
SPIRVID SPIRVProducerPass::getSPIRVValue(Value *V) {
auto II = ValueMap.find(V);
if (II != ValueMap.end()) {
assert(II->second.isValid());
return II->second;
}
if (Constant *Cst = dyn_cast<Constant>(V)) {
return getSPIRVConstant(Cst);
} else {
llvm_unreachable("Variable not found");
}
}
void SPIRVProducerPass::GenerateSamplers() {
auto &sampler_map = getSamplerMap();
SamplerLiteralToIDMap.clear();
DenseMap<unsigned, unsigned> SamplerLiteralToDescriptorSetMap;
DenseMap<unsigned, unsigned> SamplerLiteralToBindingMap;
// We might have samplers in the sampler map that are not used
// in the translation unit. We need to allocate variables
// for them and bindings too.
DenseSet<unsigned> used_bindings;
auto *var_fn = module->getFunction(clspv::LiteralSamplerFunction());
// Return if there are no literal samplers.
if (!var_fn)
return;
for (auto user : var_fn->users()) {
// Populate SamplerLiteralToDescriptorSetMap and
// SamplerLiteralToBindingMap.
//
// Look for calls like
// call %opencl.sampler_t addrspace(2)*
// @clspv.sampler.var.literal(
// i32 descriptor,
// i32 binding,
// i32 (index-into-sampler-map|sampler_mask))
if (auto *call = dyn_cast<CallInst>(user)) {
const auto third_param = static_cast<unsigned>(
dyn_cast<ConstantInt>(call->getArgOperand(2))->getZExtValue());
auto sampler_value = third_param;
if (clspv::Option::UseSamplerMap()) {
if (third_param >= sampler_map.size()) {
errs() << "Out of bounds index to sampler map: " << third_param;
llvm_unreachable("bad sampler init: out of bounds");
}
sampler_value = sampler_map[third_param].first;
}
const auto descriptor_set = static_cast<unsigned>(
dyn_cast<ConstantInt>(call->getArgOperand(0))->getZExtValue());
const auto binding = static_cast<unsigned>(
dyn_cast<ConstantInt>(call->getArgOperand(1))->getZExtValue());
SamplerLiteralToDescriptorSetMap[sampler_value] = descriptor_set;
SamplerLiteralToBindingMap[sampler_value] = binding;
used_bindings.insert(binding);
}
}
DenseSet<size_t> seen;
for (auto user : var_fn->users()) {
if (!isa<CallInst>(user))
continue;
auto call = cast<CallInst>(user);
const unsigned third_param = static_cast<unsigned>(
dyn_cast<ConstantInt>(call->getArgOperand(2))->getZExtValue());
// Already allocated a variable for this value.
if (!seen.insert(third_param).second)
continue;
auto sampler_value = third_param;
if (clspv::Option::UseSamplerMap()) {
sampler_value = sampler_map[third_param].first;
}
auto sampler_var_id = addSPIRVGlobalVariable(
getSPIRVType(SamplerTy), spv::StorageClassUniformConstant);
SamplerLiteralToIDMap[sampler_value] = sampler_var_id;
unsigned descriptor_set;
unsigned binding;
if (SamplerLiteralToBindingMap.find(sampler_value) ==
SamplerLiteralToBindingMap.end()) {
// This sampler is not actually used. Find the next one.
for (binding = 0; used_bindings.count(binding); binding++) {
}
descriptor_set = 0; // Literal samplers always use descriptor set 0.
used_bindings.insert(binding);
} else {
descriptor_set = SamplerLiteralToDescriptorSetMap[sampler_value];
binding = SamplerLiteralToBindingMap[sampler_value];
auto import_id = getReflectionImport();
SPIRVOperandVec Ops;
Ops << getSPIRVType(Type::getVoidTy(module->getContext())) << import_id
<< reflection::ExtInstLiteralSampler
<< getSPIRVInt32Constant(descriptor_set)
<< getSPIRVInt32Constant(binding)
<< getSPIRVInt32Constant(sampler_value);
addSPIRVInst<kReflection>(spv::OpExtInst, Ops);
}
// Ops[0] = Target ID
// Ops[1] = Decoration (DescriptorSet)
// Ops[2] = LiteralNumber according to Decoration
SPIRVOperandVec Ops;
Ops << sampler_var_id << spv::DecorationDescriptorSet << descriptor_set;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
// Ops[0] = Target ID
// Ops[1] = Decoration (Binding)
// Ops[2] = LiteralNumber according to Decoration
Ops.clear();
Ops << sampler_var_id << spv::DecorationBinding << binding;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
}
}
void SPIRVProducerPass::GenerateResourceVars() {
ValueMapType &VMap = getValueMap();
// Generate variables. Make one for each of resource var info object.
for (auto *info : ModuleOrderedResourceVars) {
Type *type = info->var_fn->getReturnType();
// Remap the address space for opaque types.
switch (info->arg_kind) {
case clspv::ArgKind::Sampler:
case clspv::ArgKind::SampledImage:
case clspv::ArgKind::StorageImage:
type = PointerType::get(type->getPointerElementType(),
clspv::AddressSpace::UniformConstant);
break;
default:
break;
}
const auto sc = GetStorageClassForArgKind(info->arg_kind);
info->var_id = addSPIRVGlobalVariable(getSPIRVType(type), sc);
// Map calls to the variable-builtin-function.
for (auto &U : info->var_fn->uses()) {
if (auto *call = dyn_cast<CallInst>(U.getUser())) {
const auto set = unsigned(
dyn_cast<ConstantInt>(call->getOperand(0))->getZExtValue());
const auto binding = unsigned(
dyn_cast<ConstantInt>(call->getOperand(1))->getZExtValue());
if (set == info->descriptor_set && binding == info->binding) {
switch (info->arg_kind) {
case clspv::ArgKind::Buffer:
case clspv::ArgKind::BufferUBO:
case clspv::ArgKind::Pod:
case clspv::ArgKind::PodUBO:
case clspv::ArgKind::PodPushConstant:
// The call maps to the variable directly.
VMap[call] = info->var_id;
break;
case clspv::ArgKind::Sampler:
case clspv::ArgKind::SampledImage:
case clspv::ArgKind::StorageImage:
// The call maps to a load we generate later.
ResourceVarDeferredLoadCalls[call] = info->var_id;
break;
default:
llvm_unreachable("Unhandled arg kind");
}
}
}
}
}
// Generate associated decorations.
SPIRVOperandVec Ops;
for (auto *info : ModuleOrderedResourceVars) {
// Push constants don't need descriptor set or binding decorations.
if (info->arg_kind == clspv::ArgKind::PodPushConstant)
continue;
// Decorate with DescriptorSet and Binding.
Ops.clear();
Ops << info->var_id << spv::DecorationDescriptorSet << info->descriptor_set;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
Ops.clear();
Ops << info->var_id << spv::DecorationBinding << info->binding;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
if (info->coherent) {
// Decorate with Coherent if required for the variable.
Ops.clear();
Ops << info->var_id << spv::DecorationCoherent;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
}
// Generate NonWritable and NonReadable
switch (info->arg_kind) {
case clspv::ArgKind::Buffer:
case clspv::ArgKind::BufferUBO:
if (info->var_fn->getReturnType()->getPointerAddressSpace() ==
clspv::AddressSpace::Constant) {
Ops.clear();
Ops << info->var_id << spv::DecorationNonWritable;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
}
break;
case clspv::ArgKind::StorageImage: {
auto *type = info->var_fn->getReturnType();
auto *struct_ty = cast<StructType>(type->getPointerElementType());
// TODO(alan-baker): This is conservative. If compiling for OpenCL 2.0 or
// above, the compiler treats all write_only images as read_write images.
if (struct_ty->getName().contains("_wo_t")) {
Ops.clear();
Ops << info->var_id << spv::DecorationNonReadable;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
}
break;
}
default:
break;
}
}
}
void SPIRVProducerPass::GenerateGlobalVar(GlobalVariable &GV) {
ValueMapType &VMap = getValueMap();
std::vector<SPIRVID> &BuiltinDimVec = getBuiltinDimVec();
const DataLayout &DL = GV.getParent()->getDataLayout();
const spv::BuiltIn BuiltinType = GetBuiltin(GV.getName());
Type *Ty = GV.getType();
PointerType *PTy = cast<PointerType>(Ty);
SPIRVID InitializerID;
// Workgroup size is handled differently (it goes into a constant)
if (spv::BuiltInWorkgroupSize == BuiltinType) {
uint32_t PrevXDimCst = 0xFFFFFFFF;
uint32_t PrevYDimCst = 0xFFFFFFFF;
uint32_t PrevZDimCst = 0xFFFFFFFF;
bool HasMD = true;
for (Function &Func : *GV.getParent()) {
if (Func.isDeclaration()) {
continue;
}
// We only need to check kernels.
if (Func.getCallingConv() != CallingConv::SPIR_KERNEL) {
continue;
}
if (const MDNode *MD =
dyn_cast<Function>(&Func)->getMetadata("reqd_work_group_size")) {
uint32_t CurXDimCst = static_cast<uint32_t>(
mdconst::extract<ConstantInt>(MD->getOperand(0))->getZExtValue());
uint32_t CurYDimCst = static_cast<uint32_t>(
mdconst::extract<ConstantInt>(MD->getOperand(1))->getZExtValue());
uint32_t CurZDimCst = static_cast<uint32_t>(
mdconst::extract<ConstantInt>(MD->getOperand(2))->getZExtValue());
if (PrevXDimCst == 0xFFFFFFFF && PrevYDimCst == 0xFFFFFFFF &&
PrevZDimCst == 0xFFFFFFFF) {
PrevXDimCst = CurXDimCst;
PrevYDimCst = CurYDimCst;
PrevZDimCst = CurZDimCst;
} else if (CurXDimCst != PrevXDimCst || CurYDimCst != PrevYDimCst ||
CurZDimCst != PrevZDimCst) {
HasMD = false;
continue;
} else {
continue;
}
//
// Generate OpConstantComposite.
//
// Ops[0] : Result Type ID
// Ops[1] : Constant size for x dimension.
// Ops[2] : Constant size for y dimension.
// Ops[3] : Constant size for z dimension.
SPIRVOperandVec Ops;
SPIRVID XDimCstID =
getSPIRVValue(mdconst::extract<ConstantInt>(MD->getOperand(0)));
SPIRVID YDimCstID =
getSPIRVValue(mdconst::extract<ConstantInt>(MD->getOperand(1)));
SPIRVID ZDimCstID =
getSPIRVValue(mdconst::extract<ConstantInt>(MD->getOperand(2)));
Ops << Ty->getPointerElementType() << XDimCstID << YDimCstID
<< ZDimCstID;
InitializerID =
addSPIRVInst<kGlobalVariables>(spv::OpConstantComposite, Ops);
} else {
HasMD = false;
}
}
// If all kernels do not have metadata for reqd_work_group_size, generate
// OpSpecConstants for x/y/z dimension.
if (!HasMD || clspv::Option::NonUniformNDRangeSupported()) {
//
// Generate OpSpecConstants for x/y/z dimension.
//
// Ops[0] : Result Type ID
// Ops[1] : Constant size for x/y/z dimension (Literal Number).
// Allocate spec constants for workgroup size.
clspv::AddWorkgroupSpecConstants(module);
SPIRVOperandVec Ops;
SPIRVID result_type_id = getSPIRVType(
dyn_cast<VectorType>(Ty->getPointerElementType())->getElementType());
// X Dimension
Ops << result_type_id << 1;
SPIRVID XDimCstID = addSPIRVInst<kConstants>(spv::OpSpecConstant, Ops);
// Y Dimension
Ops.clear();
Ops << result_type_id << 1;
SPIRVID YDimCstID = addSPIRVInst<kConstants>(spv::OpSpecConstant, Ops);
// Z Dimension
Ops.clear();
Ops << result_type_id << 1;
SPIRVID ZDimCstID = addSPIRVInst<kConstants>(spv::OpSpecConstant, Ops);
BuiltinDimVec.push_back(XDimCstID);
BuiltinDimVec.push_back(YDimCstID);
BuiltinDimVec.push_back(ZDimCstID);
//
// Generate OpSpecConstantComposite.
//
// Ops[0] : Result Type ID
// Ops[1] : Constant size for x dimension.
// Ops[2] : Constant size for y dimension.
// Ops[3] : Constant size for z dimension.
Ops.clear();
Ops << Ty->getPointerElementType() << XDimCstID << YDimCstID << ZDimCstID;
InitializerID =
addSPIRVInst<kConstants>(spv::OpSpecConstantComposite, Ops);
}
} else if (BuiltinType == spv::BuiltInWorkDim) {
// 1. Generate a specialization constant with a default of 3.
// 2. Allocate and annotate a SpecId for the constant.
// 3. Use the spec constant as the initializer for the variable.
SPIRVOperandVec Ops;
//
// Generate OpSpecConstant.
//
// Ops[0] : Result Type ID
// Ops[1] : Default literal value
Ops << IntegerType::get(GV.getContext(), 32) << 3;
InitializerID = addSPIRVInst<kConstants>(spv::OpSpecConstant, Ops);
//
// Generate SpecId decoration.
//
// Ops[0] : target
// Ops[1] : decoration
// Ops[2] : SpecId
auto spec_id = AllocateSpecConstant(module, SpecConstant::kWorkDim);
Ops.clear();
Ops << InitializerID << spv::DecorationSpecId << spec_id;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
} else if (BuiltinType == spv::BuiltInGlobalOffset) {
// 1. Generate a spec constant with a default of {0, 0, 0}.
// 2. Allocate and annotate SpecIds for the constants.
// 3. Use the spec constant as the initializer for the variable.
SPIRVOperandVec Ops;
//
// Generate OpSpecConstant for each dimension.
//
// Ops[0] : Result Type ID
// Ops[1] : Default literal value
//
Ops << IntegerType::get(GV.getContext(), 32) << 0;
SPIRVID x_id = addSPIRVInst<kConstants>(spv::OpSpecConstant, Ops);
Ops.clear();
Ops << IntegerType::get(GV.getContext(), 32) << 0;
SPIRVID y_id = addSPIRVInst<kConstants>(spv::OpSpecConstant, Ops);
Ops.clear();
Ops << IntegerType::get(GV.getContext(), 32) << 0;
SPIRVID z_id = addSPIRVInst<kConstants>(spv::OpSpecConstant, Ops);
//
// Generate SpecId decoration for each dimension.
//
// Ops[0] : target
// Ops[1] : decoration
// Ops[2] : SpecId
//
auto spec_id = AllocateSpecConstant(module, SpecConstant::kGlobalOffsetX);
Ops.clear();
Ops << x_id << spv::DecorationSpecId << spec_id;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
spec_id = AllocateSpecConstant(module, SpecConstant::kGlobalOffsetY);
Ops.clear();
Ops << y_id << spv::DecorationSpecId << spec_id;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
spec_id = AllocateSpecConstant(module, SpecConstant::kGlobalOffsetZ);
Ops.clear();
Ops << z_id << spv::DecorationSpecId << spec_id;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
//
// Generate OpSpecConstantComposite.
//
// Ops[0] : type id
// Ops[1..n-1] : elements
//
Ops.clear();
Ops << GV.getType()->getPointerElementType() << x_id << y_id << z_id;
InitializerID = addSPIRVInst<kConstants>(spv::OpSpecConstantComposite, Ops);
}
const auto AS = PTy->getAddressSpace();
const auto spvSC = GetStorageClass(AS);
const bool module_scope_constant_external_init =
(AS == AddressSpace::Constant) && GV.hasInitializer() &&
clspv::Option::ModuleConstantsInStorageBuffer();
if (GV.hasInitializer()) {
auto GVInit = GV.getInitializer();
if (!isa<UndefValue>(GVInit) && !module_scope_constant_external_init) {
InitializerID = getSPIRVValue(GVInit);
}
}
SPIRVID var_id =
addSPIRVGlobalVariable(getSPIRVType(Ty), spvSC, InitializerID);
VMap[&GV] = var_id;
auto IsOpenCLBuiltin = [](spv::BuiltIn builtin) {
return builtin == spv::BuiltInWorkDim ||
builtin == spv::BuiltInGlobalOffset;
};
// If we have a builtin (not an OpenCL builtin).
if (spv::BuiltInMax != BuiltinType && !IsOpenCLBuiltin(BuiltinType)) {
//
// Generate OpDecorate.
//
// DOps[0] = Target ID
// DOps[1] = Decoration (Builtin)
// DOps[2] = BuiltIn ID
SPIRVID ResultID;
// WorkgroupSize is different, we decorate the constant composite that has
// its value, rather than the variable that we use to access the value.
if (spv::BuiltInWorkgroupSize == BuiltinType) {
ResultID = InitializerID;
// Save both the value and variable IDs for later.
WorkgroupSizeValueID = InitializerID;
WorkgroupSizeVarID = getSPIRVValue(&GV);
} else {
ResultID = getSPIRVValue(&GV);
}
SPIRVOperandVec Ops;
Ops << ResultID << spv::DecorationBuiltIn << BuiltinType;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
} else if (module_scope_constant_external_init) {
// This module scope constant is initialized from a storage buffer with data
// provided by the host at binding 0 of the next descriptor set.
const uint32_t descriptor_set = TakeDescriptorIndex(module);
// Emit the intializer as a reflection instruction.
// Use "kind,buffer" to indicate storage buffer. We might want to expand
// that later to other types, like uniform buffer.
std::string hexbytes;
llvm::raw_string_ostream str(hexbytes);
clspv::ConstantEmitter(DL, str).Emit(GV.getInitializer());
// Reflection instruction for constant data.
SPIRVOperandVec Ops;
auto data_id = addSPIRVInst<kDebug>(spv::OpString, str.str().c_str());
Ops << getSPIRVType(Type::getVoidTy(module->getContext()))
<< getReflectionImport() << reflection::ExtInstConstantDataStorageBuffer
<< getSPIRVInt32Constant(descriptor_set) << getSPIRVInt32Constant(0)
<< data_id;
addSPIRVInst<kReflection>(spv::OpExtInst, Ops);
// OpDecorate %var DescriptorSet <descriptor_set>
Ops.clear();
Ops << var_id << spv::DecorationDescriptorSet << descriptor_set;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
// OpDecorate %var Binding <binding>
Ops.clear();
Ops << var_id << spv::DecorationBinding << 0;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
}
}
void SPIRVProducerPass::GenerateFuncPrologue(Function &F) {
ValueMapType &VMap = getValueMap();
EntryPointVecType &EntryPoints = getEntryPointVec();
auto &GlobalConstFuncTyMap = getGlobalConstFuncTypeMap();
auto &GlobalConstArgSet = getGlobalConstArgSet();
FunctionType *FTy = F.getFunctionType();
//
// Generate OPFunction.
//
// FOps[0] : Result Type ID
// FOps[1] : Function Control
// FOps[2] : Function Type ID
SPIRVOperandVec FOps;
// Find SPIRV instruction for return type.
FOps << FTy->getReturnType();
// Check function attributes for SPIRV Function Control.
uint32_t FuncControl = spv::FunctionControlMaskNone;
if (F.hasFnAttribute(Attribute::AlwaysInline)) {
FuncControl |= spv::FunctionControlInlineMask;
}
if (F.hasFnAttribute(Attribute::NoInline)) {
FuncControl |= spv::FunctionControlDontInlineMask;
}
// TODO: Check llvm attribute for Function Control Pure.
if (F.hasFnAttribute(Attribute::ReadOnly)) {
FuncControl |= spv::FunctionControlPureMask;
}
// TODO: Check llvm attribute for Function Control Const.
if (F.hasFnAttribute(Attribute::ReadNone)) {
FuncControl |= spv::FunctionControlConstMask;
}
FOps << FuncControl;
SPIRVID FTyID;
if (F.getCallingConv() == CallingConv::SPIR_KERNEL) {
SmallVector<Type *, 4> NewFuncParamTys;
FunctionType *NewFTy =
FunctionType::get(FTy->getReturnType(), NewFuncParamTys, false);
FTyID = getSPIRVType(NewFTy);
} else {
// Handle regular function with global constant parameters.
if (GlobalConstFuncTyMap.count(FTy)) {
FTyID = getSPIRVType(GlobalConstFuncTyMap[FTy].first);
} else {
FTyID = getSPIRVType(FTy);
}
}
FOps << FTyID;
// Generate SPIRV instruction for function.
SPIRVID FID = addSPIRVInst(spv::OpFunction, FOps);
VMap[&F] = FID;
if (F.getCallingConv() == CallingConv::SPIR_KERNEL) {
EntryPoints.push_back(std::make_pair(&F, FID));
}
if (clspv::Option::ShowIDs()) {
errs() << "Function " << F.getName() << " is " << FID.get() << "\n";
}
//
// Generate OpFunctionParameter for Normal function.
//
if (F.getCallingConv() != CallingConv::SPIR_KERNEL) {
// Iterate Argument for name instead of param type from function type.
unsigned ArgIdx = 0;
for (Argument &Arg : F.args()) {
// ParamOps[0] : Result Type ID
SPIRVOperandVec Ops;
// Find SPIRV instruction for parameter type.
SPIRVID ParamTyID = getSPIRVType(Arg.getType());
if (PointerType *PTy = dyn_cast<PointerType>(Arg.getType())) {
if (GlobalConstFuncTyMap.count(FTy)) {
if (ArgIdx == GlobalConstFuncTyMap[FTy].second) {
Type *EleTy = PTy->getPointerElementType();
Type *ArgTy =
PointerType::get(EleTy, AddressSpace::ModuleScopePrivate);
ParamTyID = getSPIRVType(ArgTy);
GlobalConstArgSet.insert(&Arg);
}
}
}
Ops << ParamTyID;
// Generate SPIRV instruction for parameter.
SPIRVID param_id = addSPIRVInst(spv::OpFunctionParameter, Ops);
VMap[&Arg] = param_id;
if (CalledWithCoherentResource(Arg)) {
// If the arg is passed a coherent resource ever, then decorate this
// parameter with Coherent too.
Ops.clear();
Ops << param_id << spv::DecorationCoherent;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
}
ArgIdx++;
}
}
}
void SPIRVProducerPass::GenerateModuleInfo() {
EntryPointVecType &EntryPoints = getEntryPointVec();
auto &EntryPointInterfaces = getEntryPointInterfacesList();
std::vector<SPIRVID> &BuiltinDimVec = getBuiltinDimVec();
SPIRVOperandVec Ops;
for (auto Capability : CapabilitySet) {
//
// Generate OpCapability
//
// Ops[0] = Capability
addSPIRVInst<kCapabilities>(spv::OpCapability, Capability);
}
// Always add the storage buffer extension
{
//
// Generate OpExtension.
//
// Ops[0] = Name (Literal String)
//
addSPIRVInst<kExtensions>(spv::OpExtension,
"SPV_KHR_storage_buffer_storage_class");
}
if (hasVariablePointers() || hasVariablePointersStorageBuffer()) {
//
// Generate OpExtension.
//
// Ops[0] = Name (Literal String)
//
addSPIRVInst<kExtensions>(spv::OpExtension, "SPV_KHR_variable_pointers");
}
//
// Generate OpMemoryModel
//
// Memory model for Vulkan will always be GLSL450.
// Ops[0] = Addressing Model
// Ops[1] = Memory Model
Ops.clear();
Ops << spv::AddressingModelLogical << spv::MemoryModelGLSL450;
addSPIRVInst<kMemoryModel>(spv::OpMemoryModel, Ops);
//
// Generate OpEntryPoint
//
for (auto EntryPoint : EntryPoints) {
// Ops[0] = Execution Model
// Ops[1] = EntryPoint ID
// Ops[2] = Name (Literal String)
// ...
//
// TODO: Do we need to consider Interface ID for forward references???
Ops.clear();
const StringRef &name = EntryPoint.first->getName();
Ops << spv::ExecutionModelGLCompute << EntryPoint.second << name;
for (auto &Interface : EntryPointInterfaces) {
Ops << Interface;
}
addSPIRVInst<kEntryPoints>(spv::OpEntryPoint, Ops);
}
if (BuiltinDimVec.empty()) {
for (auto EntryPoint : EntryPoints) {
const MDNode *MD = dyn_cast<Function>(EntryPoint.first)
->getMetadata("reqd_work_group_size");
if ((MD != nullptr) && !clspv::Option::NonUniformNDRangeSupported()) {
//
// Generate OpExecutionMode
//
// Ops[0] = Entry Point ID
// Ops[1] = Execution Mode
// Ops[2] ... Ops[n] = Optional literals according to Execution Mode
Ops.clear();
Ops << EntryPoint.second << spv::ExecutionModeLocalSize;
uint32_t XDim = static_cast<uint32_t>(
mdconst::extract<ConstantInt>(MD->getOperand(0))->getZExtValue());
uint32_t YDim = static_cast<uint32_t>(
mdconst::extract<ConstantInt>(MD->getOperand(1))->getZExtValue());
uint32_t ZDim = static_cast<uint32_t>(
mdconst::extract<ConstantInt>(MD->getOperand(2))->getZExtValue());
Ops << XDim << YDim << ZDim;
addSPIRVInst<kExecutionModes>(spv::OpExecutionMode, Ops);
}
}
}
//
// Generate OpSource.
//
// Ops[0] = SourceLanguage ID
// Ops[1] = Version (LiteralNum)
//
uint32_t LangID = spv::SourceLanguageUnknown;
uint32_t LangVer = 0;
switch (clspv::Option::Language()) {
case clspv::Option::SourceLanguage::OpenCL_C_10:
LangID = spv::SourceLanguageOpenCL_C;
LangVer = 100;
break;
case clspv::Option::SourceLanguage::OpenCL_C_11:
LangID = spv::SourceLanguageOpenCL_C;
LangVer = 110;
break;
case clspv::Option::SourceLanguage::OpenCL_C_12:
LangID = spv::SourceLanguageOpenCL_C;
LangVer = 120;
break;
case clspv::Option::SourceLanguage::OpenCL_C_20:
LangID = spv::SourceLanguageOpenCL_C;
LangVer = 200;
break;
case clspv::Option::SourceLanguage::OpenCL_C_30:
LangID = spv::SourceLanguageOpenCL_C;
LangVer = 300;
break;
case clspv::Option::SourceLanguage::OpenCL_CPP:
LangID = spv::SourceLanguageOpenCL_CPP;
LangVer = 100;
break;
default:
break;
}
Ops.clear();
Ops << LangID << LangVer;
addSPIRVInst<kDebug>(spv::OpSource, Ops);
if (!BuiltinDimVec.empty()) {
//
// Generate OpDecorates for x/y/z dimension.
//
// Ops[0] = Target ID
// Ops[1] = Decoration (SpecId)
// Ops[2] = Specialization Constant ID (Literal Number)
// X Dimension
Ops.clear();
Ops << BuiltinDimVec[0] << spv::DecorationSpecId << 0;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
// Y Dimension
Ops.clear();
Ops << BuiltinDimVec[1] << spv::DecorationSpecId << 1;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
// Z Dimension
Ops.clear();
Ops << BuiltinDimVec[2] << spv::DecorationSpecId << 2;
addSPIRVInst<kAnnotations>(spv::OpDecorate, Ops);
}
}
void SPIRVProducerPass::GenerateEntryPointInitialStores() {
// Work around a driver bug. Initializers on Private variables might not
// work. So the start of the kernel should store the initializer value to the
// variables. Yes, *every* entry point pays this cost if *any* entry point