summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp')
-rw-r--r--contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp521
1 files changed, 460 insertions, 61 deletions
diff --git a/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
index 69bb80a..fbbce11 100644
--- a/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
+++ b/contrib/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp
@@ -54,8 +54,9 @@ public:
#include "AArch64GenAsmMatcher.inc"
};
- AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
- : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
+ AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
+ const MCInstrInfo &MII)
+ : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
MCAsmParserExtension::Initialize(_Parser);
// Initialize the set of available features.
@@ -126,6 +127,11 @@ public:
OperandMatchResultTy
ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
+ bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
+ SMLoc &LayoutLoc);
+
+ OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
+
bool validateInstruction(MCInst &Inst,
const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
@@ -153,6 +159,7 @@ private:
k_Immediate, // Including expressions referencing symbols
k_Register,
k_ShiftExtend,
+ k_VectorList, // A sequential list of 1 to 4 registers.
k_SysReg, // The register operand of MRS and MSR instructions
k_Token, // The mnemonic; other raw tokens the auto-generated
k_WrappedRegister // Load/store exclusive permit a wrapped register.
@@ -188,6 +195,13 @@ private:
bool ImplicitAmount;
};
+ // A vector register list is a sequential list of 1 to 4 registers.
+ struct VectorListOp {
+ unsigned RegNum;
+ unsigned Count;
+ A64Layout::VectorLayout Layout;
+ };
+
struct SysRegOp {
const char *Data;
unsigned Length;
@@ -205,6 +219,7 @@ private:
struct ImmOp Imm;
struct RegOp Reg;
struct ShiftExtendOp ShiftExtend;
+ struct VectorListOp VectorList;
struct SysRegOp SysReg;
struct TokOp Tok;
};
@@ -454,7 +469,7 @@ public:
}
bool isMOVN32Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_SABS_G0,
AArch64MCExpr::VK_AARCH64_SABS_G1,
AArch64MCExpr::VK_AARCH64_DTPREL_G1,
@@ -463,13 +478,13 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1,
AArch64MCExpr::VK_AARCH64_TPREL_G0,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(32, PermittedModifiers, NumModifiers);
}
bool isMOVN64Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_SABS_G0,
AArch64MCExpr::VK_AARCH64_SABS_G1,
AArch64MCExpr::VK_AARCH64_SABS_G2,
@@ -481,14 +496,14 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1,
AArch64MCExpr::VK_AARCH64_TPREL_G0,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(64, PermittedModifiers, NumModifiers);
}
bool isMOVZ32Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_ABS_G0,
AArch64MCExpr::VK_AARCH64_ABS_G1,
AArch64MCExpr::VK_AARCH64_SABS_G0,
@@ -499,13 +514,13 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1,
AArch64MCExpr::VK_AARCH64_TPREL_G0,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(32, PermittedModifiers, NumModifiers);
}
bool isMOVZ64Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_ABS_G0,
AArch64MCExpr::VK_AARCH64_ABS_G1,
AArch64MCExpr::VK_AARCH64_ABS_G2,
@@ -521,13 +536,13 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1,
AArch64MCExpr::VK_AARCH64_TPREL_G0,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(64, PermittedModifiers, NumModifiers);
}
bool isMOVK32Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
@@ -536,13 +551,13 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(32, PermittedModifiers, NumModifiers);
}
bool isMOVK64Imm() const {
- static AArch64MCExpr::VariantKind PermittedModifiers[] = {
+ static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
@@ -553,13 +568,13 @@ public:
AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
};
- unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
+ const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
return isMoveWideImm(64, PermittedModifiers, NumModifiers);
}
bool isMoveWideImm(unsigned RegWidth,
- AArch64MCExpr::VariantKind *PermittedModifiers,
+ const AArch64MCExpr::VariantKind *PermittedModifiers,
unsigned NumModifiers) const {
if (!isImmWithLSL()) return false;
@@ -664,8 +679,86 @@ public:
return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
}
- template<int MemSize> bool isSImm7Scaled() const {
- if (!isImm()) return false;
+ // if 0 < value <= w, return true
+ bool isShrFixedWidth(int w) const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return false;
+ int64_t Value = CE->getValue();
+ return Value > 0 && Value <= w;
+ }
+
+ bool isShrImm8() const { return isShrFixedWidth(8); }
+
+ bool isShrImm16() const { return isShrFixedWidth(16); }
+
+ bool isShrImm32() const { return isShrFixedWidth(32); }
+
+ bool isShrImm64() const { return isShrFixedWidth(64); }
+
+ // if 0 <= value < w, return true
+ bool isShlFixedWidth(int w) const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return false;
+ int64_t Value = CE->getValue();
+ return Value >= 0 && Value < w;
+ }
+
+ bool isShlImm8() const { return isShlFixedWidth(8); }
+
+ bool isShlImm16() const { return isShlFixedWidth(16); }
+
+ bool isShlImm32() const { return isShlFixedWidth(32); }
+
+ bool isShlImm64() const { return isShlFixedWidth(64); }
+
+ bool isNeonMovImmShiftLSL() const {
+ if (!isShiftOrExtend())
+ return false;
+
+ if (ShiftExtend.ShiftType != A64SE::LSL)
+ return false;
+
+ // Valid shift amount is 0, 8, 16 and 24.
+ return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
+ }
+
+ bool isNeonMovImmShiftLSLH() const {
+ if (!isShiftOrExtend())
+ return false;
+
+ if (ShiftExtend.ShiftType != A64SE::LSL)
+ return false;
+
+ // Valid shift amount is 0 and 8.
+ return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
+ }
+
+ bool isNeonMovImmShiftMSL() const {
+ if (!isShiftOrExtend())
+ return false;
+
+ if (ShiftExtend.ShiftType != A64SE::MSL)
+ return false;
+
+ // Valid shift amount is 8 and 16.
+ return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
+ }
+
+ template <A64Layout::VectorLayout Layout, unsigned Count>
+ bool isVectorList() const {
+ return Kind == k_VectorList && VectorList.Layout == Layout &&
+ VectorList.Count == Count;
+ }
+
+ template <int MemSize> bool isSImm7Scaled() const {
+ if (!isImm())
+ return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
@@ -705,10 +798,38 @@ public:
return isa<MCConstantExpr>(getImm());
}
+ bool isNeonUImm64Mask() const {
+ if (!isImm())
+ return false;
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE)
+ return false;
+
+ uint64_t Value = CE->getValue();
+
+ // i64 value with each byte being either 0x00 or 0xff.
+ for (unsigned i = 0; i < 8; ++i, Value >>= 8)
+ if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
+ return false;
+ return true;
+ }
+
+ // if value == N, return true
+ template<int N>
+ bool isExactImm() const {
+ if (!isImm()) return false;
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+
+ return CE->getValue() == N;
+ }
+
static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
unsigned ShiftAmount,
bool ImplicitAmount,
- SMLoc S, SMLoc E) {
+ SMLoc S,SMLoc E) {
AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
Op->ImmWithLSL.Val = Val;
Op->ImmWithLSL.ShiftAmount = ShiftAmount;
@@ -766,6 +887,18 @@ public:
return Op;
}
+ static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
+ A64Layout::VectorLayout Layout,
+ SMLoc S, SMLoc E) {
+ AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
+ Op->VectorList.RegNum = RegNum;
+ Op->VectorList.Count = Count;
+ Op->VectorList.Layout = Layout;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
Op->Tok.Data = Str.data();
@@ -1026,6 +1159,40 @@ public:
Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
}
+ // For Vector Immediates shifted imm operands.
+ void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
+ llvm_unreachable("Invalid shift amount for vector immediate inst.");
+
+ // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
+ int64_t Imm = ShiftExtend.Amount / 8;
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
+
+ void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
+ llvm_unreachable("Invalid shift amount for vector immediate inst.");
+
+ // Encode LSLH shift amount 0, 8 as 0, 1.
+ int64_t Imm = ShiftExtend.Amount / 8;
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
+
+ void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
+ llvm_unreachable("Invalid shift amount for vector immediate inst.");
+
+ // Encode MSL shift amount 8, 16 as 0, 1.
+ int64_t Imm = ShiftExtend.Amount / 8 - 1;
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
+
// For the extend in load-store (register offset) instructions.
template<unsigned MemSize>
void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
@@ -1065,6 +1232,25 @@ public:
Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
}
+
+ void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ // A bit from each byte in the constant forms the encoded immediate
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ uint64_t Value = CE->getValue();
+
+ unsigned Imm = 0;
+ for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
+ Imm |= (Value & 1) << i;
+ }
+ Inst.addOperand(MCOperand::CreateImm(Imm));
+ }
+
+ void addVectorListOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
+ }
};
} // end anonymous namespace.
@@ -1104,7 +1290,6 @@ AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
else
return MatchOperand_Success;
}
-
// ... or it might be a symbolish thing
}
// Fall through
@@ -1148,7 +1333,7 @@ AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
return ParseOperand(Operands, Mnemonic);
}
// The following will likely be useful later, but not in very early cases
- case AsmToken::LCurly: // Weird SIMD lists
+ case AsmToken::LCurly: // SIMD vector list is not parsed here
llvm_unreachable("Don't know how to deal with '{' in operand");
return MatchOperand_ParseFail;
}
@@ -1306,7 +1491,7 @@ AArch64AsmParser::ParseImmWithLSLOperand(
// The optional operand must be "lsl #N" where N is non-negative.
if (Parser.getTok().is(AsmToken::Identifier)
- && Parser.getTok().getIdentifier().lower() == "lsl") {
+ && Parser.getTok().getIdentifier().equals_lower("lsl")) {
Parser.Lex();
if (Parser.getTok().is(AsmToken::Hash)) {
@@ -1363,9 +1548,8 @@ AArch64AsmParser::ParseCRxOperand(
return MatchOperand_ParseFail;
}
- std::string LowerTok = Parser.getTok().getIdentifier().lower();
- StringRef Tok(LowerTok);
- if (Tok[0] != 'c') {
+ StringRef Tok = Parser.getTok().getIdentifier();
+ if (Tok[0] != 'c' && Tok[0] != 'C') {
Error(S, "Expected cN operand where 0 <= N <= 15");
return MatchOperand_ParseFail;
}
@@ -1437,22 +1621,11 @@ AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
std::string LowerReg = Tok.getString().lower();
size_t DotPos = LowerReg.find('.');
- RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
- if (RegNum == AArch64::NoRegister) {
- RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
- .Case("ip0", AArch64::X16)
- .Case("ip1", AArch64::X17)
- .Case("fp", AArch64::X29)
- .Case("lr", AArch64::X30)
- .Default(AArch64::NoRegister);
- }
- if (RegNum == AArch64::NoRegister)
- return false;
-
+ bool IsVec128 = false;
SMLoc S = Tok.getLoc();
RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
- if (DotPos == StringRef::npos) {
+ if (DotPos == std::string::npos) {
Layout = StringRef();
} else {
// Everything afterwards needs to be a literal token, expected to be
@@ -1462,20 +1635,78 @@ AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
// gives us a permanent string to use in the token (a pointer into LowerReg
// would go out of scope when we return).
LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
- std::string LayoutText = LowerReg.substr(DotPos, StringRef::npos);
+ StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
+
+ // See if it's a 128-bit layout first.
Layout = StringSwitch<const char *>(LayoutText)
- .Case(".d", ".d").Case(".1d", ".1d").Case(".2d", ".2d")
- .Case(".s", ".s").Case(".2s", ".2s").Case(".4s", ".4s")
- .Case(".h", ".h").Case(".4h", ".4h").Case(".8h", ".8h")
- .Case(".b", ".b").Case(".8b", ".8b").Case(".16b", ".16b")
+ .Case(".q", ".q").Case(".1q", ".1q")
+ .Case(".d", ".d").Case(".2d", ".2d")
+ .Case(".s", ".s").Case(".4s", ".4s")
+ .Case(".h", ".h").Case(".8h", ".8h")
+ .Case(".b", ".b").Case(".16b", ".16b")
.Default("");
+ if (Layout.size() != 0)
+ IsVec128 = true;
+ else {
+ Layout = StringSwitch<const char *>(LayoutText)
+ .Case(".1d", ".1d")
+ .Case(".2s", ".2s")
+ .Case(".4h", ".4h")
+ .Case(".8b", ".8b")
+ .Default("");
+ }
+
if (Layout.size() == 0) {
- // Malformed register
+ // If we've still not pinned it down the register is malformed.
return false;
}
}
+ RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
+ if (RegNum == AArch64::NoRegister) {
+ RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
+ .Case("ip0", AArch64::X16)
+ .Case("ip1", AArch64::X17)
+ .Case("fp", AArch64::X29)
+ .Case("lr", AArch64::X30)
+ .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
+ .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
+ .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
+ .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
+ .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
+ .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
+ .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
+ .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
+ .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
+ .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
+ .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
+ .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
+ .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
+ .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
+ .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
+ .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
+ .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
+ .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
+ .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
+ .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
+ .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
+ .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
+ .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
+ .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
+ .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
+ .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
+ .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
+ .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
+ .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
+ .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
+ .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
+ .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
+ .Default(AArch64::NoRegister);
+ }
+ if (RegNum == AArch64::NoRegister)
+ return false;
+
return true;
}
@@ -1507,6 +1738,7 @@ AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
case 'h': NumLanes = 8; break;
case 's': NumLanes = 4; break;
case 'd': NumLanes = 2; break;
+ case 'q': NumLanes = 1; break;
}
}
@@ -1660,20 +1892,21 @@ AArch64AsmParser::ParseShiftExtend(
std::string LowerID = IDVal.lower();
A64SE::ShiftExtSpecifiers Spec =
- StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
- .Case("lsl", A64SE::LSL)
- .Case("lsr", A64SE::LSR)
- .Case("asr", A64SE::ASR)
- .Case("ror", A64SE::ROR)
- .Case("uxtb", A64SE::UXTB)
- .Case("uxth", A64SE::UXTH)
- .Case("uxtw", A64SE::UXTW)
- .Case("uxtx", A64SE::UXTX)
- .Case("sxtb", A64SE::SXTB)
- .Case("sxth", A64SE::SXTH)
- .Case("sxtw", A64SE::SXTW)
- .Case("sxtx", A64SE::SXTX)
- .Default(A64SE::Invalid);
+ StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
+ .Case("lsl", A64SE::LSL)
+ .Case("msl", A64SE::MSL)
+ .Case("lsr", A64SE::LSR)
+ .Case("asr", A64SE::ASR)
+ .Case("ror", A64SE::ROR)
+ .Case("uxtb", A64SE::UXTB)
+ .Case("uxth", A64SE::UXTH)
+ .Case("uxtw", A64SE::UXTW)
+ .Case("uxtx", A64SE::UXTX)
+ .Case("sxtb", A64SE::SXTB)
+ .Case("sxth", A64SE::SXTH)
+ .Case("sxtw", A64SE::SXTW)
+ .Case("sxtx", A64SE::SXTX)
+ .Default(A64SE::Invalid);
if (Spec == A64SE::Invalid)
return MatchOperand_NoMatch;
@@ -1683,8 +1916,8 @@ AArch64AsmParser::ParseShiftExtend(
S = Parser.getTok().getLoc();
Parser.Lex();
- if (Spec != A64SE::LSL && Spec != A64SE::LSR &&
- Spec != A64SE::ASR && Spec != A64SE::ROR) {
+ if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
+ Spec != A64SE::ROR && Spec != A64SE::MSL) {
// The shift amount can be omitted for the extending versions, but not real
// shifts:
// add x0, x0, x0, uxtb
@@ -1724,6 +1957,148 @@ AArch64AsmParser::ParseShiftExtend(
return MatchOperand_Success;
}
+/// Try to parse a vector register token, If it is a vector register,
+/// the token is eaten and return true. Otherwise return false.
+bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
+ StringRef &Layout, SMLoc &LayoutLoc) {
+ bool IsVector = true;
+
+ if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
+ IsVector = false;
+ else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
+ .contains(RegNum) &&
+ !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
+ .contains(RegNum))
+ IsVector = false;
+ else if (Layout.size() == 0)
+ IsVector = false;
+
+ if (!IsVector)
+ Error(Parser.getTok().getLoc(), "expected vector type register");
+
+ Parser.Lex(); // Eat this token.
+ return IsVector;
+}
+
+
+// A vector list contains 1-4 consecutive registers.
+// Now there are two kinds of vector list when number of vector > 1:
+// (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
+// (2) {Vn.layout - Vm.layout}
+// If the layout is like .b/.h/.s/.d, also parse the lane.
+AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
+ SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
+ if (Parser.getTok().isNot(AsmToken::LCurly)) {
+ Error(Parser.getTok().getLoc(), "'{' expected");
+ return MatchOperand_ParseFail;
+ }
+ SMLoc SLoc = Parser.getTok().getLoc();
+ Parser.Lex(); // Eat '{' token.
+
+ unsigned Reg, Count = 1;
+ StringRef LayoutStr;
+ SMLoc RegEndLoc, LayoutLoc;
+ if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
+ return MatchOperand_ParseFail;
+
+ if (Parser.getTok().is(AsmToken::Minus)) {
+ Parser.Lex(); // Eat the minus.
+
+ unsigned Reg2;
+ StringRef LayoutStr2;
+ SMLoc RegEndLoc2, LayoutLoc2;
+ SMLoc RegLoc2 = Parser.getTok().getLoc();
+
+ if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
+ return MatchOperand_ParseFail;
+ unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
+
+ if (LayoutStr != LayoutStr2) {
+ Error(LayoutLoc2, "expected the same vector layout");
+ return MatchOperand_ParseFail;
+ }
+ if (Space == 0 || Space > 3) {
+ Error(RegLoc2, "invalid number of vectors");
+ return MatchOperand_ParseFail;
+ }
+
+ Count += Space;
+ } else {
+ unsigned LastReg = Reg;
+ while (Parser.getTok().is(AsmToken::Comma)) {
+ Parser.Lex(); // Eat the comma.
+ unsigned Reg2;
+ StringRef LayoutStr2;
+ SMLoc RegEndLoc2, LayoutLoc2;
+ SMLoc RegLoc2 = Parser.getTok().getLoc();
+
+ if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
+ return MatchOperand_ParseFail;
+ unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
+ : (Reg2 + 32 - LastReg);
+ Count++;
+
+ // The space between two vectors should be 1. And they should have the same layout.
+ // Total count shouldn't be great than 4
+ if (Space != 1) {
+ Error(RegLoc2, "invalid space between two vectors");
+ return MatchOperand_ParseFail;
+ }
+ if (LayoutStr != LayoutStr2) {
+ Error(LayoutLoc2, "expected the same vector layout");
+ return MatchOperand_ParseFail;
+ }
+ if (Count > 4) {
+ Error(RegLoc2, "invalid number of vectors");
+ return MatchOperand_ParseFail;
+ }
+
+ LastReg = Reg2;
+ }
+ }
+
+ if (Parser.getTok().isNot(AsmToken::RCurly)) {
+ Error(Parser.getTok().getLoc(), "'}' expected");
+ return MatchOperand_ParseFail;
+ }
+ SMLoc ELoc = Parser.getTok().getLoc();
+ Parser.Lex(); // Eat '}' token.
+
+ A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
+ if (Count > 1) { // If count > 1, create vector list using super register.
+ bool IsVec64 = (Layout < A64Layout::VL_16B);
+ static unsigned SupRegIDs[3][2] = {
+ { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
+ { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
+ { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
+ };
+ unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
+ unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
+ const MCRegisterInfo *MRI = getContext().getRegisterInfo();
+ Reg = MRI->getMatchingSuperReg(Reg, Sub0,
+ &AArch64MCRegisterClasses[SupRegID]);
+ }
+ Operands.push_back(
+ AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
+
+ if (Parser.getTok().is(AsmToken::LBrac)) {
+ uint32_t NumLanes = 0;
+ switch(Layout) {
+ case A64Layout::VL_B : NumLanes = 16; break;
+ case A64Layout::VL_H : NumLanes = 8; break;
+ case A64Layout::VL_S : NumLanes = 4; break;
+ case A64Layout::VL_D : NumLanes = 2; break;
+ default:
+ SMLoc Loc = getLexer().getLoc();
+ Error(Loc, "expected comma before next operand");
+ return MatchOperand_ParseFail;
+ }
+ return ParseNEONLane(Operands, NumLanes);
+ } else {
+ return MatchOperand_Success;
+ }
+}
+
// FIXME: We would really like to be able to tablegen'erate this.
bool AArch64AsmParser::
validateInstruction(MCInst &Inst,
@@ -1918,7 +2293,7 @@ bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
if (getParser().parseExpression(Value))
return true;
- getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
+ getParser().getStreamer().EmitValue(Value, Size);
if (getLexer().is(AsmToken::EndOfStatement))
break;
@@ -2019,7 +2394,7 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
"expected compatible register or floating-point constant");
case Match_FPZero:
return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
- "expected floating-point constant #0.0");
+ "expected floating-point constant #0.0 or invalid register type");
case Match_Label:
return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
"expected label or encodable integer pc offset");
@@ -2140,6 +2515,30 @@ bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
case Match_Width64:
return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
"expected integer in range [<lsb>, 63]");
+ case Match_ShrImm8:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [1, 8]");
+ case Match_ShrImm16:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [1, 16]");
+ case Match_ShrImm32:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [1, 32]");
+ case Match_ShrImm64:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [1, 64]");
+ case Match_ShlImm8:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [0, 7]");
+ case Match_ShlImm16:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [0, 15]");
+ case Match_ShlImm32:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [0, 31]");
+ case Match_ShlImm64:
+ return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
+ "expected integer in range [0, 63]");
}
llvm_unreachable("Implement any new match types added!");
OpenPOWER on IntegriCloud