blob: 0417273d971bb15b775bfe44354cc5e8a5247a81 [file] [log] [blame]
//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is the parent TargetLowering class for hardware code gen targets.
//
//===----------------------------------------------------------------------===//
#include "AMDGPUISelLowering.h"
#include "AMDILIntrinsicInfo.h"
#include "AMDGPUUtil.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
using namespace llvm;
AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
AMDILTargetLowering(TM)
{
// We need to custom lower some of the intrinsics
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
}
SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
const
{
switch (Op.getOpcode()) {
default: return AMDILTargetLowering::LowerOperation(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
}
}
SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const
{
unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
DebugLoc DL = Op.getDebugLoc();
EVT VT = Op.getValueType();
switch (IntrinsicID) {
default: return Op;
case AMDGPUIntrinsic::AMDIL_abs:
return LowerIntrinsicIABS(Op, DAG);
case AMDGPUIntrinsic::AMDGPU_lrp:
return LowerIntrinsicLRP(Op, DAG);
case AMDGPUIntrinsic::AMDIL_mad:
return DAG.getNode(AMDILISD::MAD, DL, VT, Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3));
case AMDGPUIntrinsic::AMDIL_max:
return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
Op.getOperand(2));
case AMDGPUIntrinsic::AMDGPU_imax:
return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
Op.getOperand(2));
case AMDGPUIntrinsic::AMDGPU_umax:
return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
Op.getOperand(2));
}
}
///IABS(a) = SMAX(sub(0, a), a)
SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
SelectionDAG &DAG) const
{
DebugLoc DL = Op.getDebugLoc();
EVT VT = Op.getValueType();
SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
Op.getOperand(1));
return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
}
/// Linear Interpolation
/// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
SelectionDAG &DAG) const
{
DebugLoc DL = Op.getDebugLoc();
EVT VT = Op.getValueType();
SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT, DAG.getConstant(1, VT),
Op.getOperand(1));
SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
Op.getOperand(3));
return DAG.getNode(AMDILISD::MAD, DL, VT, Op.getOperand(1),
Op.getOperand(2),
OneSubAC);
}
void AMDGPUTargetLowering::addLiveIn(MachineInstr * MI,
MachineFunction * MF, MachineRegisterInfo & MRI,
const TargetInstrInfo * TII, unsigned reg) const
{
AMDGPU::utilAddLiveIn(MF, MRI, TII, reg, MI->getOperand(0).getReg());
}
#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const
{
switch (Opcode) {
default: return AMDILTargetLowering::getTargetNodeName(Opcode);
NODE_NAME_CASE(FMAX)
NODE_NAME_CASE(SMAX)
NODE_NAME_CASE(UMAX)
}
}