summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2016-12-26 20:36:37 +0000
committerdim <dim@FreeBSD.org>2016-12-26 20:36:37 +0000
commit06210ae42d418d50d8d9365d5c9419308ae9e7ee (patch)
treeab60b4cdd6e430dda1f292a46a77ddb744723f31 /contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
parent2dd166267f53df1c3748b4325d294b9b839de74b (diff)
downloadFreeBSD-src-06210ae42d418d50d8d9365d5c9419308ae9e7ee.zip
FreeBSD-src-06210ae42d418d50d8d9365d5c9419308ae9e7ee.tar.gz
MFC r309124:
Upgrade our copies of clang, llvm, lldb, compiler-rt and libc++ to 3.9.0 release, and add lld 3.9.0. Also completely revamp the build system for clang, llvm, lldb and their related tools. Please note that from 3.5.0 onwards, clang, llvm and lldb require C++11 support to build; see UPDATING for more information. Release notes for llvm, clang and lld are available here: <http://llvm.org/releases/3.9.0/docs/ReleaseNotes.html> <http://llvm.org/releases/3.9.0/tools/clang/docs/ReleaseNotes.html> <http://llvm.org/releases/3.9.0/tools/lld/docs/ReleaseNotes.html> Thanks to Ed Maste, Bryan Drewery, Andrew Turner, Antoine Brodin and Jan Beich for their help. Relnotes: yes MFC r309147: Pull in r282174 from upstream llvm trunk (by Krzysztof Parzyszek): [PPC] Set SP after loading data from stack frame, if no red zone is present Follow-up to r280705: Make sure that the SP is only restored after all data is loaded from the stack frame, if there is no red zone. This completes the fix for https://llvm.org/bugs/show_bug.cgi?id=26519. Differential Revision: https://reviews.llvm.org/D24466 Reported by: Mark Millard PR: 214433 MFC r309149: Pull in r283060 from upstream llvm trunk (by Hal Finkel): [PowerPC] Refactor soft-float support, and enable PPC64 soft float This change enables soft-float for PowerPC64, and also makes soft-float disable all vector instruction sets for both 32-bit and 64-bit modes. This latter part is necessary because the PPC backend canonicalizes many Altivec vector types to floating-point types, and so soft-float breaks scalarization support for many operations. Both for embedded targets and for operating-system kernels desiring soft-float support, it seems reasonable that disabling hardware floating-point also disables vector instructions (embedded targets without hardware floating point support are unlikely to have Altivec, etc. and operating system kernels desiring not to use floating-point registers to lower syscall cost are unlikely to want to use vector registers either). If someone needs this to work, we'll need to change the fact that we promote many Altivec operations to act on v4f32. To make it possible to disable Altivec when soft-float is enabled, hardware floating-point support needs to be expressed as a positive feature, like the others, and not a negative feature, because target features cannot have dependencies on the disabling of some other feature. So +soft-float has now become -hard-float. Fixes PR26970. Pull in r283061 from upstream clang trunk (by Hal Finkel): [PowerPC] Enable soft-float for PPC64, and +soft-float -> -hard-float Enable soft-float support on PPC64, as the backend now supports it. Also, the backend now uses -hard-float instead of +soft-float, so set the target features accordingly. Fixes PR26970. Reported by: Mark Millard PR: 214433 MFC r309212: Add a few missed clang 3.9.0 files to OptionalObsoleteFiles. MFC r309262: Fix packaging for clang, lldb and lld 3.9.0 During the upgrade of clang/llvm etc to 3.9.0 in r309124, the PACKAGE directive in the usr.bin/clang/*.mk files got dropped accidentally. Restore it, with a few minor changes and additions: * Correct license in clang.ucl to NCSA * Add PACKAGE=clang for clang and most of the "ll" tools * Put lldb in its own package * Put lld in its own package Reviewed by: gjb, jmallett Differential Revision: https://reviews.freebsd.org/D8666 MFC r309656: During the bootstrap phase, when building the minimal llvm library on PowerPC, add lib/Support/Atomic.cpp. This is needed because upstream llvm revision r271821 disabled the use of std::call_once, which causes some fallback functions from Atomic.cpp to be used instead. Reported by: Mark Millard PR: 214902 MFC r309835: Tentatively apply https://reviews.llvm.org/D18730 to work around gcc PR 70528 (bogus error: constructor required before non-static data member). This should fix buildworld with the external gcc package. Reported by: https://jenkins.freebsd.org/job/FreeBSD_HEAD_amd64_gcc/ MFC r310194: Upgrade our copies of clang, llvm, lld, lldb, compiler-rt and libc++ to 3.9.1 release. Please note that from 3.5.0 onwards, clang, llvm and lldb require C++11 support to build; see UPDATING for more information. Release notes for llvm, clang and lld will be available here: <http://releases.llvm.org/3.9.1/docs/ReleaseNotes.html> <http://releases.llvm.org/3.9.1/tools/clang/docs/ReleaseNotes.html> <http://releases.llvm.org/3.9.1/tools/lld/docs/ReleaseNotes.html> Relnotes: yes
Diffstat (limited to 'contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp')
-rw-r--r--contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp1108
1 files changed, 1105 insertions, 3 deletions
diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
index 1b761b1..7bfa407 100644
--- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
+++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp
@@ -12,10 +12,13 @@
/// class. Some of the target-specific code is generated by tablegen in the file
/// WebAssemblyGenFastISel.inc, which is #included here.
///
+/// TODO: kill flags
+///
//===----------------------------------------------------------------------===//
#include "WebAssembly.h"
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
+#include "WebAssemblyMachineFunctionInfo.h"
#include "WebAssemblySubtarget.h"
#include "WebAssemblyTargetMachine.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
@@ -41,13 +44,122 @@ using namespace llvm;
namespace {
class WebAssemblyFastISel final : public FastISel {
+ // All possible address modes.
+ class Address {
+ public:
+ typedef enum { RegBase, FrameIndexBase } BaseKind;
+
+ private:
+ BaseKind Kind;
+ union {
+ unsigned Reg;
+ int FI;
+ } Base;
+
+ int64_t Offset;
+
+ const GlobalValue *GV;
+
+ public:
+ // Innocuous defaults for our address.
+ Address() : Kind(RegBase), Offset(0), GV(0) { Base.Reg = 0; }
+ void setKind(BaseKind K) { Kind = K; }
+ BaseKind getKind() const { return Kind; }
+ bool isRegBase() const { return Kind == RegBase; }
+ bool isFIBase() const { return Kind == FrameIndexBase; }
+ void setReg(unsigned Reg) {
+ assert(isRegBase() && "Invalid base register access!");
+ Base.Reg = Reg;
+ }
+ unsigned getReg() const {
+ assert(isRegBase() && "Invalid base register access!");
+ return Base.Reg;
+ }
+ void setFI(unsigned FI) {
+ assert(isFIBase() && "Invalid base frame index access!");
+ Base.FI = FI;
+ }
+ unsigned getFI() const {
+ assert(isFIBase() && "Invalid base frame index access!");
+ return Base.FI;
+ }
+
+ void setOffset(int64_t Offset_) { Offset = Offset_; }
+ int64_t getOffset() const { return Offset; }
+ void setGlobalValue(const GlobalValue *G) { GV = G; }
+ const GlobalValue *getGlobalValue() const { return GV; }
+ };
+
/// Keep a pointer to the WebAssemblySubtarget around so that we can make the
/// right decision when generating code for different targets.
const WebAssemblySubtarget *Subtarget;
LLVMContext *Context;
- // Call handling routines.
private:
+ // Utility helper routines
+ MVT::SimpleValueType getSimpleType(Type *Ty) {
+ EVT VT = TLI.getValueType(DL, Ty, /*HandleUnknown=*/true);
+ return VT.isSimple() ? VT.getSimpleVT().SimpleTy :
+ MVT::INVALID_SIMPLE_VALUE_TYPE;
+ }
+ MVT::SimpleValueType getLegalType(MVT::SimpleValueType VT) {
+ switch (VT) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ return MVT::i32;
+ case MVT::i32:
+ case MVT::i64:
+ case MVT::f32:
+ case MVT::f64:
+ return VT;
+ default:
+ break;
+ }
+ return MVT::INVALID_SIMPLE_VALUE_TYPE;
+ }
+ bool computeAddress(const Value *Obj, Address &Addr);
+ void materializeLoadStoreOperands(Address &Addr);
+ void addLoadStoreOperands(const Address &Addr, const MachineInstrBuilder &MIB,
+ MachineMemOperand *MMO);
+ unsigned maskI1Value(unsigned Reg, const Value *V);
+ unsigned getRegForI1Value(const Value *V, bool &Not);
+ unsigned zeroExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From);
+ unsigned signExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From);
+ unsigned zeroExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To);
+ unsigned signExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To);
+ unsigned getRegForUnsignedValue(const Value *V);
+ unsigned getRegForSignedValue(const Value *V);
+ unsigned getRegForPromotedValue(const Value *V, bool IsSigned);
+ unsigned notValue(unsigned Reg);
+ unsigned copyValue(unsigned Reg);
+
+ // Backend specific FastISel code.
+ unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
+ unsigned fastMaterializeConstant(const Constant *C) override;
+ bool fastLowerArguments() override;
+
+ // Selection routines.
+ bool selectCall(const Instruction *I);
+ bool selectSelect(const Instruction *I);
+ bool selectTrunc(const Instruction *I);
+ bool selectZExt(const Instruction *I);
+ bool selectSExt(const Instruction *I);
+ bool selectICmp(const Instruction *I);
+ bool selectFCmp(const Instruction *I);
+ bool selectBitCast(const Instruction *I);
+ bool selectLoad(const Instruction *I);
+ bool selectStore(const Instruction *I);
+ bool selectBr(const Instruction *I);
+ bool selectRet(const Instruction *I);
+ bool selectUnreachable(const Instruction *I);
+
public:
// Backend specific FastISel code.
WebAssemblyFastISel(FunctionLoweringInfo &FuncInfo,
@@ -64,11 +176,1001 @@ public:
} // end anonymous namespace
+bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
+
+ const User *U = nullptr;
+ unsigned Opcode = Instruction::UserOp1;
+ if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
+ // Don't walk into other basic blocks unless the object is an alloca from
+ // another block, otherwise it may not have a virtual register assigned.
+ if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
+ FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
+ Opcode = I->getOpcode();
+ U = I;
+ }
+ } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
+ Opcode = C->getOpcode();
+ U = C;
+ }
+
+ if (auto *Ty = dyn_cast<PointerType>(Obj->getType()))
+ if (Ty->getAddressSpace() > 255)
+ // Fast instruction selection doesn't support the special
+ // address spaces.
+ return false;
+
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
+ if (Addr.getGlobalValue())
+ return false;
+ Addr.setGlobalValue(GV);
+ return true;
+ }
+
+ switch (Opcode) {
+ default:
+ break;
+ case Instruction::BitCast: {
+ // Look through bitcasts.
+ return computeAddress(U->getOperand(0), Addr);
+ }
+ case Instruction::IntToPtr: {
+ // Look past no-op inttoptrs.
+ if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
+ TLI.getPointerTy(DL))
+ return computeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::PtrToInt: {
+ // Look past no-op ptrtoints.
+ if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
+ return computeAddress(U->getOperand(0), Addr);
+ break;
+ }
+ case Instruction::GetElementPtr: {
+ Address SavedAddr = Addr;
+ uint64_t TmpOffset = Addr.getOffset();
+ // Iterate through the GEP folding the constants into offsets where
+ // we can.
+ for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
+ GTI != E; ++GTI) {
+ const Value *Op = GTI.getOperand();
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ const StructLayout *SL = DL.getStructLayout(STy);
+ unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
+ TmpOffset += SL->getElementOffset(Idx);
+ } else {
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
+ for (;;) {
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
+ // Constant-offset addressing.
+ TmpOffset += CI->getSExtValue() * S;
+ break;
+ }
+ if (S == 1 && Addr.isRegBase() && Addr.getReg() == 0) {
+ // An unscaled add of a register. Set it as the new base.
+ Addr.setReg(getRegForValue(Op));
+ break;
+ }
+ if (canFoldAddIntoGEP(U, Op)) {
+ // A compatible add with a constant operand. Fold the constant.
+ ConstantInt *CI =
+ cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
+ TmpOffset += CI->getSExtValue() * S;
+ // Iterate on the other operand.
+ Op = cast<AddOperator>(Op)->getOperand(0);
+ continue;
+ }
+ // Unsupported
+ goto unsupported_gep;
+ }
+ }
+ }
+ // Try to grab the base operand now.
+ Addr.setOffset(TmpOffset);
+ if (computeAddress(U->getOperand(0), Addr))
+ return true;
+ // We failed, restore everything and try the other options.
+ Addr = SavedAddr;
+ unsupported_gep:
+ break;
+ }
+ case Instruction::Alloca: {
+ const AllocaInst *AI = cast<AllocaInst>(Obj);
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ Addr.setKind(Address::FrameIndexBase);
+ Addr.setFI(SI->second);
+ return true;
+ }
+ break;
+ }
+ case Instruction::Add: {
+ // Adds of constants are common and easy enough.
+ const Value *LHS = U->getOperand(0);
+ const Value *RHS = U->getOperand(1);
+
+ if (isa<ConstantInt>(LHS))
+ std::swap(LHS, RHS);
+
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ Addr.setOffset(Addr.getOffset() + CI->getSExtValue());
+ return computeAddress(LHS, Addr);
+ }
+
+ Address Backup = Addr;
+ if (computeAddress(LHS, Addr) && computeAddress(RHS, Addr))
+ return true;
+ Addr = Backup;
+
+ break;
+ }
+ case Instruction::Sub: {
+ // Subs of constants are common and easy enough.
+ const Value *LHS = U->getOperand(0);
+ const Value *RHS = U->getOperand(1);
+
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
+ Addr.setOffset(Addr.getOffset() - CI->getSExtValue());
+ return computeAddress(LHS, Addr);
+ }
+ break;
+ }
+ }
+ Addr.setReg(getRegForValue(Obj));
+ return Addr.getReg() != 0;
+}
+
+void WebAssemblyFastISel::materializeLoadStoreOperands(Address &Addr) {
+ if (Addr.isRegBase()) {
+ unsigned Reg = Addr.getReg();
+ if (Reg == 0) {
+ Reg = createResultReg(Subtarget->hasAddr64() ?
+ &WebAssembly::I64RegClass :
+ &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ?
+ WebAssembly::CONST_I64 :
+ WebAssembly::CONST_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), Reg)
+ .addImm(0);
+ Addr.setReg(Reg);
+ }
+ }
+}
+
+void WebAssemblyFastISel::addLoadStoreOperands(const Address &Addr,
+ const MachineInstrBuilder &MIB,
+ MachineMemOperand *MMO) {
+ if (const GlobalValue *GV = Addr.getGlobalValue())
+ MIB.addGlobalAddress(GV, Addr.getOffset());
+ else
+ MIB.addImm(Addr.getOffset());
+
+ if (Addr.isRegBase())
+ MIB.addReg(Addr.getReg());
+ else
+ MIB.addFrameIndex(Addr.getFI());
+
+ // Set the alignment operand (this is rewritten in SetP2AlignOperands).
+ // TODO: Disable SetP2AlignOperands for FastISel and just do it here.
+ MIB.addImm(0);
+
+ MIB.addMemOperand(MMO);
+}
+
+unsigned WebAssemblyFastISel::maskI1Value(unsigned Reg, const Value *V) {
+ return zeroExtendToI32(Reg, V, MVT::i1);
+}
+
+unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V, bool &Not) {
+ if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(V))
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(ICmp->getOperand(1)))
+ if (ICmp->isEquality() && C->isZero() && C->getType()->isIntegerTy(32)) {
+ Not = ICmp->isTrueWhenEqual();
+ return getRegForValue(ICmp->getOperand(0));
+ }
+
+ if (BinaryOperator::isNot(V)) {
+ Not = true;
+ return getRegForValue(BinaryOperator::getNotArgument(V));
+ }
+
+ Not = false;
+ return maskI1Value(getRegForValue(V), V);
+}
+
+unsigned WebAssemblyFastISel::zeroExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From) {
+ switch (From) {
+ case MVT::i1:
+ // If the value is naturally an i1, we don't need to mask it.
+ // TODO: Recursively examine selects, phis, and, or, xor, constants.
+ if (From == MVT::i1 && V != nullptr) {
+ if (isa<CmpInst>(V) ||
+ (isa<Argument>(V) && cast<Argument>(V)->hasZExtAttr()))
+ return copyValue(Reg);
+ }
+ case MVT::i8:
+ case MVT::i16:
+ break;
+ case MVT::i32:
+ return copyValue(Reg);
+ default:
+ return 0;
+ }
+
+ unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::CONST_I32), Imm)
+ .addImm(~(~uint64_t(0) << MVT(From).getSizeInBits()));
+
+ unsigned Result = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::AND_I32), Result)
+ .addReg(Reg)
+ .addReg(Imm);
+
+ return Result;
+}
+
+unsigned WebAssemblyFastISel::signExtendToI32(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From) {
+ switch (From) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ break;
+ case MVT::i32:
+ return copyValue(Reg);
+ default:
+ return 0;
+ }
+
+ unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::CONST_I32), Imm)
+ .addImm(32 - MVT(From).getSizeInBits());
+
+ unsigned Left = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::SHL_I32), Left)
+ .addReg(Reg)
+ .addReg(Imm);
+
+ unsigned Right = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::SHR_S_I32), Right)
+ .addReg(Left)
+ .addReg(Imm);
+
+ return Right;
+}
+
+unsigned WebAssemblyFastISel::zeroExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To) {
+ if (To == MVT::i64) {
+ if (From == MVT::i64)
+ return copyValue(Reg);
+
+ Reg = zeroExtendToI32(Reg, V, From);
+
+ unsigned Result = createResultReg(&WebAssembly::I64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::I64_EXTEND_U_I32), Result)
+ .addReg(Reg);
+ return Result;
+ }
+
+ return zeroExtendToI32(Reg, V, From);
+}
+
+unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V,
+ MVT::SimpleValueType From,
+ MVT::SimpleValueType To) {
+ if (To == MVT::i64) {
+ if (From == MVT::i64)
+ return copyValue(Reg);
+
+ Reg = signExtendToI32(Reg, V, From);
+
+ unsigned Result = createResultReg(&WebAssembly::I64RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::I64_EXTEND_S_I32), Result)
+ .addReg(Reg);
+ return Result;
+ }
+
+ return signExtendToI32(Reg, V, From);
+}
+
+unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) {
+ MVT::SimpleValueType From = getSimpleType(V->getType());
+ MVT::SimpleValueType To = getLegalType(From);
+ return zeroExtend(getRegForValue(V), V, From, To);
+}
+
+unsigned WebAssemblyFastISel::getRegForSignedValue(const Value *V) {
+ MVT::SimpleValueType From = getSimpleType(V->getType());
+ MVT::SimpleValueType To = getLegalType(From);
+ return zeroExtend(getRegForValue(V), V, From, To);
+}
+
+unsigned WebAssemblyFastISel::getRegForPromotedValue(const Value *V,
+ bool IsSigned) {
+ return IsSigned ? getRegForSignedValue(V) :
+ getRegForUnsignedValue(V);
+}
+
+unsigned WebAssemblyFastISel::notValue(unsigned Reg) {
+ assert(MRI.getRegClass(Reg) == &WebAssembly::I32RegClass);
+
+ unsigned NotReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::EQZ_I32), NotReg)
+ .addReg(Reg);
+ return NotReg;
+}
+
+unsigned WebAssemblyFastISel::copyValue(unsigned Reg) {
+ unsigned ResultReg = createResultReg(MRI.getRegClass(Reg));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::COPY), ResultReg)
+ .addReg(Reg);
+ return ResultReg;
+}
+
+unsigned WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
+ DenseMap<const AllocaInst *, int>::iterator SI =
+ FuncInfo.StaticAllocaMap.find(AI);
+
+ if (SI != FuncInfo.StaticAllocaMap.end()) {
+ unsigned ResultReg = createResultReg(Subtarget->hasAddr64() ?
+ &WebAssembly::I64RegClass :
+ &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ?
+ WebAssembly::COPY_LOCAL_I64 :
+ WebAssembly::COPY_LOCAL_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addFrameIndex(SI->second);
+ return ResultReg;
+ }
+
+ return 0;
+}
+
+unsigned WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) {
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
+ unsigned ResultReg = createResultReg(Subtarget->hasAddr64() ?
+ &WebAssembly::I64RegClass :
+ &WebAssembly::I32RegClass);
+ unsigned Opc = Subtarget->hasAddr64() ?
+ WebAssembly::CONST_I64 :
+ WebAssembly::CONST_I32;
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addGlobalAddress(GV);
+ return ResultReg;
+ }
+
+ // Let target-independent code handle it.
+ return 0;
+}
+
+bool WebAssemblyFastISel::fastLowerArguments() {
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ const Function *F = FuncInfo.Fn;
+ if (F->isVarArg())
+ return false;
+
+ unsigned i = 0;
+ for (auto const &Arg : F->args()) {
+ const AttributeSet &Attrs = F->getAttributes();
+ if (Attrs.hasAttribute(i+1, Attribute::ByVal) ||
+ Attrs.hasAttribute(i+1, Attribute::SwiftSelf) ||
+ Attrs.hasAttribute(i+1, Attribute::SwiftError) ||
+ Attrs.hasAttribute(i+1, Attribute::InAlloca) ||
+ Attrs.hasAttribute(i+1, Attribute::Nest))
+ return false;
+
+ Type *ArgTy = Arg.getType();
+ if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
+ return false;
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(ArgTy)) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = WebAssembly::ARGUMENT_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::ARGUMENT_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::ARGUMENT_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::ARGUMENT_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default:
+ return false;
+ }
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addImm(i);
+ updateValueMap(&Arg, ResultReg);
+
+ ++i;
+ }
+
+ MRI.addLiveIn(WebAssembly::ARGUMENTS);
+
+ auto *MFI = MF->getInfo<WebAssemblyFunctionInfo>();
+ for (auto const &Arg : F->args())
+ MFI->addParam(getLegalType(getSimpleType(Arg.getType())));
+
+ return true;
+}
+
+bool WebAssemblyFastISel::selectCall(const Instruction *I) {
+ const CallInst *Call = cast<CallInst>(I);
+
+ if (Call->isMustTailCall() || Call->isInlineAsm() ||
+ Call->getFunctionType()->isVarArg())
+ return false;
+
+ Function *Func = Call->getCalledFunction();
+ if (Func && Func->isIntrinsic())
+ return false;
+
+ FunctionType *FuncTy = Call->getFunctionType();
+ unsigned Opc;
+ bool IsDirect = Func != nullptr;
+ bool IsVoid = FuncTy->getReturnType()->isVoidTy();
+ unsigned ResultReg;
+ if (IsVoid) {
+ Opc = IsDirect ? WebAssembly::CALL_VOID : WebAssembly::CALL_INDIRECT_VOID;
+ } else {
+ MVT::SimpleValueType RetTy = getSimpleType(Call->getType());
+ switch (RetTy) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = IsDirect ? WebAssembly::CALL_I32 : WebAssembly::CALL_INDIRECT_I32;
+ ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ break;
+ case MVT::i64:
+ Opc = IsDirect ? WebAssembly::CALL_I64 : WebAssembly::CALL_INDIRECT_I64;
+ ResultReg = createResultReg(&WebAssembly::I64RegClass);
+ break;
+ case MVT::f32:
+ Opc = IsDirect ? WebAssembly::CALL_F32 : WebAssembly::CALL_INDIRECT_F32;
+ ResultReg = createResultReg(&WebAssembly::F32RegClass);
+ break;
+ case MVT::f64:
+ Opc = IsDirect ? WebAssembly::CALL_F64 : WebAssembly::CALL_INDIRECT_F64;
+ ResultReg = createResultReg(&WebAssembly::F64RegClass);
+ break;
+ default:
+ return false;
+ }
+ }
+
+ SmallVector<unsigned, 8> Args;
+ for (unsigned i = 0, e = Call->getNumArgOperands(); i < e; ++i) {
+ Value *V = Call->getArgOperand(i);
+ MVT::SimpleValueType ArgTy = getSimpleType(V->getType());
+ if (ArgTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
+ return false;
+
+ const AttributeSet &Attrs = Call->getAttributes();
+ if (Attrs.hasAttribute(i+1, Attribute::ByVal) ||
+ Attrs.hasAttribute(i+1, Attribute::SwiftSelf) ||
+ Attrs.hasAttribute(i+1, Attribute::SwiftError) ||
+ Attrs.hasAttribute(i+1, Attribute::InAlloca) ||
+ Attrs.hasAttribute(i+1, Attribute::Nest))
+ return false;
+
+ unsigned Reg;
+
+ if (Attrs.hasAttribute(i+1, Attribute::SExt))
+ Reg = getRegForSignedValue(V);
+ else if (Attrs.hasAttribute(i+1, Attribute::ZExt))
+ Reg = getRegForUnsignedValue(V);
+ else
+ Reg = getRegForValue(V);
+
+ if (Reg == 0)
+ return false;
+
+ Args.push_back(Reg);
+ }
+
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
+
+ if (!IsVoid)
+ MIB.addReg(ResultReg, RegState::Define);
+
+ if (IsDirect)
+ MIB.addGlobalAddress(Func);
+ else
+ MIB.addReg(getRegForValue(Call->getCalledValue()));
+
+ for (unsigned ArgReg : Args)
+ MIB.addReg(ArgReg);
+
+ if (!IsVoid)
+ updateValueMap(Call, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectSelect(const Instruction *I) {
+ const SelectInst *Select = cast<SelectInst>(I);
+
+ bool Not;
+ unsigned CondReg = getRegForI1Value(Select->getCondition(), Not);
+ if (CondReg == 0)
+ return false;
+
+ unsigned TrueReg = getRegForValue(Select->getTrueValue());
+ if (TrueReg == 0)
+ return false;
+
+ unsigned FalseReg = getRegForValue(Select->getFalseValue());
+ if (FalseReg == 0)
+ return false;
+
+ if (Not)
+ std::swap(TrueReg, FalseReg);
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(Select->getType())) {
+ case MVT::i1:
+ case MVT::i8:
+ case MVT::i16:
+ case MVT::i32:
+ Opc = WebAssembly::SELECT_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::SELECT_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::SELECT_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::SELECT_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default:
+ return false;
+ }
+
+ unsigned ResultReg = createResultReg(RC);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(TrueReg)
+ .addReg(FalseReg)
+ .addReg(CondReg);
+
+ updateValueMap(Select, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectTrunc(const Instruction *I) {
+ const TruncInst *Trunc = cast<TruncInst>(I);
+
+ unsigned Reg = getRegForValue(Trunc->getOperand(0));
+ if (Reg == 0)
+ return false;
+
+ if (Trunc->getOperand(0)->getType()->isIntegerTy(64)) {
+ unsigned Result = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::I32_WRAP_I64), Result)
+ .addReg(Reg);
+ Reg = Result;
+ }
+
+ updateValueMap(Trunc, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectZExt(const Instruction *I) {
+ const ZExtInst *ZExt = cast<ZExtInst>(I);
+
+ const Value *Op = ZExt->getOperand(0);
+ MVT::SimpleValueType From = getSimpleType(Op->getType());
+ MVT::SimpleValueType To = getLegalType(getSimpleType(ZExt->getType()));
+ unsigned Reg = zeroExtend(getRegForValue(Op), Op, From, To);
+ if (Reg == 0)
+ return false;
+
+ updateValueMap(ZExt, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectSExt(const Instruction *I) {
+ const SExtInst *SExt = cast<SExtInst>(I);
+
+ const Value *Op = SExt->getOperand(0);
+ MVT::SimpleValueType From = getSimpleType(Op->getType());
+ MVT::SimpleValueType To = getLegalType(getSimpleType(SExt->getType()));
+ unsigned Reg = signExtend(getRegForValue(Op), Op, From, To);
+ if (Reg == 0)
+ return false;
+
+ updateValueMap(SExt, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
+ const ICmpInst *ICmp = cast<ICmpInst>(I);
+
+ bool I32 = getSimpleType(ICmp->getOperand(0)->getType()) != MVT::i64;
+ unsigned Opc;
+ bool isSigned = false;
+ switch (ICmp->getPredicate()) {
+ case ICmpInst::ICMP_EQ:
+ Opc = I32 ? WebAssembly::EQ_I32 : WebAssembly::EQ_I64;
+ break;
+ case ICmpInst::ICMP_NE:
+ Opc = I32 ? WebAssembly::NE_I32 : WebAssembly::NE_I64;
+ break;
+ case ICmpInst::ICMP_UGT:
+ Opc = I32 ? WebAssembly::GT_U_I32 : WebAssembly::GT_U_I64;
+ break;
+ case ICmpInst::ICMP_UGE:
+ Opc = I32 ? WebAssembly::GE_U_I32 : WebAssembly::GE_U_I64;
+ break;
+ case ICmpInst::ICMP_ULT:
+ Opc = I32 ? WebAssembly::LT_U_I32 : WebAssembly::LT_U_I64;
+ break;
+ case ICmpInst::ICMP_ULE:
+ Opc = I32 ? WebAssembly::LE_U_I32 : WebAssembly::LE_U_I64;
+ break;
+ case ICmpInst::ICMP_SGT:
+ Opc = I32 ? WebAssembly::GT_S_I32 : WebAssembly::GT_S_I64;
+ isSigned = true;
+ break;
+ case ICmpInst::ICMP_SGE:
+ Opc = I32 ? WebAssembly::GE_S_I32 : WebAssembly::GE_S_I64;
+ isSigned = true;
+ break;
+ case ICmpInst::ICMP_SLT:
+ Opc = I32 ? WebAssembly::LT_S_I32 : WebAssembly::LT_S_I64;
+ isSigned = true;
+ break;
+ case ICmpInst::ICMP_SLE:
+ Opc = I32 ? WebAssembly::LE_S_I32 : WebAssembly::LE_S_I64;
+ isSigned = true;
+ break;
+ default: return false;
+ }
+
+ unsigned LHS = getRegForPromotedValue(ICmp->getOperand(0), isSigned);
+ if (LHS == 0)
+ return false;
+
+ unsigned RHS = getRegForPromotedValue(ICmp->getOperand(1), isSigned);
+ if (RHS == 0)
+ return false;
+
+ unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(LHS)
+ .addReg(RHS);
+ updateValueMap(ICmp, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
+ const FCmpInst *FCmp = cast<FCmpInst>(I);
+
+ unsigned LHS = getRegForValue(FCmp->getOperand(0));
+ if (LHS == 0)
+ return false;
+
+ unsigned RHS = getRegForValue(FCmp->getOperand(1));
+ if (RHS == 0)
+ return false;
+
+ bool F32 = getSimpleType(FCmp->getOperand(0)->getType()) != MVT::f64;
+ unsigned Opc;
+ bool Not = false;
+ switch (FCmp->getPredicate()) {
+ case FCmpInst::FCMP_OEQ:
+ Opc = F32 ? WebAssembly::EQ_F32 : WebAssembly::EQ_F64;
+ break;
+ case FCmpInst::FCMP_UNE:
+ Opc = F32 ? WebAssembly::NE_F32 : WebAssembly::NE_F64;
+ break;
+ case FCmpInst::FCMP_OGT:
+ Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
+ break;
+ case FCmpInst::FCMP_OGE:
+ Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
+ break;
+ case FCmpInst::FCMP_OLT:
+ Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
+ break;
+ case FCmpInst::FCMP_OLE:
+ Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
+ break;
+ case FCmpInst::FCMP_UGT:
+ Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_UGE:
+ Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_ULT:
+ Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
+ Not = true;
+ break;
+ case FCmpInst::FCMP_ULE:
+ Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
+ Not = true;
+ break;
+ default:
+ return false;
+ }
+
+ unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
+ .addReg(LHS)
+ .addReg(RHS);
+
+ if (Not)
+ ResultReg = notValue(ResultReg);
+
+ updateValueMap(FCmp, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
+ // Target-independent code can handle this, except it doesn't set the dead
+ // flag on the ARGUMENTS clobber, so we have to do that manually in order
+ // to satisfy code that expects this of isBitcast() instructions.
+ EVT VT = TLI.getValueType(DL, I->getOperand(0)->getType());
+ EVT RetVT = TLI.getValueType(DL, I->getType());
+ if (!VT.isSimple() || !RetVT.isSimple())
+ return false;
+
+ if (VT == RetVT) {
+ // No-op bitcast.
+ updateValueMap(I, getRegForValue(I->getOperand(0)));
+ return true;
+ }
+
+ unsigned Reg = fastEmit_ISD_BITCAST_r(VT.getSimpleVT(), RetVT.getSimpleVT(),
+ getRegForValue(I->getOperand(0)),
+ I->getOperand(0)->hasOneUse());
+ if (!Reg)
+ return false;
+ MachineBasicBlock::iterator Iter = FuncInfo.InsertPt;
+ --Iter;
+ assert(Iter->isBitcast());
+ Iter->setPhysRegsDeadExcept(ArrayRef<unsigned>(), TRI);
+ updateValueMap(I, Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectLoad(const Instruction *I) {
+ const LoadInst *Load = cast<LoadInst>(I);
+ if (Load->isAtomic())
+ return false;
+
+ Address Addr;
+ if (!computeAddress(Load->getPointerOperand(), Addr))
+ return false;
+
+ // TODO: Fold a following sign-/zero-extend into the load instruction.
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ switch (getSimpleType(Load->getType())) {
+ case MVT::i1:
+ case MVT::i8:
+ Opc = WebAssembly::LOAD8_U_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i16:
+ Opc = WebAssembly::LOAD16_U_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i32:
+ Opc = WebAssembly::LOAD_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::LOAD_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::LOAD_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::LOAD_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default:
+ return false;
+ }
+
+ materializeLoadStoreOperands(Addr);
+
+ unsigned ResultReg = createResultReg(RC);
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ ResultReg);
+
+ addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Load));
+
+ updateValueMap(Load, ResultReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectStore(const Instruction *I) {
+ const StoreInst *Store = cast<StoreInst>(I);
+ if (Store->isAtomic())
+ return false;
+
+ Address Addr;
+ if (!computeAddress(Store->getPointerOperand(), Addr))
+ return false;
+
+ unsigned Opc;
+ const TargetRegisterClass *RC;
+ bool VTIsi1 = false;
+ switch (getSimpleType(Store->getValueOperand()->getType())) {
+ case MVT::i1:
+ VTIsi1 = true;
+ case MVT::i8:
+ Opc = WebAssembly::STORE8_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i16:
+ Opc = WebAssembly::STORE16_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i32:
+ Opc = WebAssembly::STORE_I32;
+ RC = &WebAssembly::I32RegClass;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::STORE_I64;
+ RC = &WebAssembly::I64RegClass;
+ break;
+ case MVT::f32:
+ Opc = WebAssembly::STORE_F32;
+ RC = &WebAssembly::F32RegClass;
+ break;
+ case MVT::f64:
+ Opc = WebAssembly::STORE_F64;
+ RC = &WebAssembly::F64RegClass;
+ break;
+ default: return false;
+ }
+
+ materializeLoadStoreOperands(Addr);
+
+ unsigned ValueReg = getRegForValue(Store->getValueOperand());
+ if (VTIsi1)
+ ValueReg = maskI1Value(ValueReg, Store->getValueOperand());
+
+ unsigned ResultReg = createResultReg(RC);
+ auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ ResultReg);
+
+ addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Store));
+
+ MIB.addReg(ValueReg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectBr(const Instruction *I) {
+ const BranchInst *Br = cast<BranchInst>(I);
+ if (Br->isUnconditional()) {
+ MachineBasicBlock *MSucc = FuncInfo.MBBMap[Br->getSuccessor(0)];
+ fastEmitBranch(MSucc, Br->getDebugLoc());
+ return true;
+ }
+
+ MachineBasicBlock *TBB = FuncInfo.MBBMap[Br->getSuccessor(0)];
+ MachineBasicBlock *FBB = FuncInfo.MBBMap[Br->getSuccessor(1)];
+
+ bool Not;
+ unsigned CondReg = getRegForI1Value(Br->getCondition(), Not);
+
+ unsigned Opc = WebAssembly::BR_IF;
+ if (Not)
+ Opc = WebAssembly::BR_UNLESS;
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
+ .addMBB(TBB)
+ .addReg(CondReg);
+
+ finishCondBranch(Br->getParent(), TBB, FBB);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectRet(const Instruction *I) {
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+
+ if (Ret->getNumOperands() == 0) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::RETURN_VOID));
+ return true;
+ }
+
+ Value *RV = Ret->getOperand(0);
+ unsigned Opc;
+ switch (getSimpleType(RV->getType())) {
+ case MVT::i1: case MVT::i8:
+ case MVT::i16: case MVT::i32:
+ Opc = WebAssembly::RETURN_I32;
+ break;
+ case MVT::i64:
+ Opc = WebAssembly::RETURN_I64;
+ break;
+ case MVT::f32: Opc = WebAssembly::RETURN_F32; break;
+ case MVT::f64: Opc = WebAssembly::RETURN_F64; break;
+ default: return false;
+ }
+
+ unsigned Reg;
+ if (FuncInfo.Fn->getAttributes().hasAttribute(0, Attribute::SExt))
+ Reg = getRegForSignedValue(RV);
+ else if (FuncInfo.Fn->getAttributes().hasAttribute(0, Attribute::ZExt))
+ Reg = getRegForUnsignedValue(RV);
+ else
+ Reg = getRegForValue(RV);
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)).addReg(Reg);
+ return true;
+}
+
+bool WebAssemblyFastISel::selectUnreachable(const Instruction *I) {
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(WebAssembly::UNREACHABLE));
+ return true;
+}
+
bool WebAssemblyFastISel::fastSelectInstruction(const Instruction *I) {
switch (I->getOpcode()) {
- default:
+ case Instruction::Call:
+ if (selectCall(I))
+ return true;
break;
- // TODO: add fast-isel selection cases here...
+ case Instruction::Select: return selectSelect(I);
+ case Instruction::Trunc: return selectTrunc(I);
+ case Instruction::ZExt: return selectZExt(I);
+ case Instruction::SExt: return selectSExt(I);
+ case Instruction::ICmp: return selectICmp(I);
+ case Instruction::FCmp: return selectFCmp(I);
+ case Instruction::BitCast: return selectBitCast(I);
+ case Instruction::Load: return selectLoad(I);
+ case Instruction::Store: return selectStore(I);
+ case Instruction::Br: return selectBr(I);
+ case Instruction::Ret: return selectRet(I);
+ case Instruction::Unreachable: return selectUnreachable(I);
+ default: break;
}
// Fall back to target-independent instruction selection.
OpenPOWER on IntegriCloud