diff options
author | dim <dim@FreeBSD.org> | 2015-03-23 21:13:29 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2015-03-23 21:13:29 +0000 |
commit | 17d956b9623fc81fa1dd281d843352f6f7ffa62e (patch) | |
tree | 858728a8856827216090a111be310b4c5fa3d7a3 /contrib/llvm/lib | |
parent | 013bf140e5da5e219ef60db583704cf0f4bc87b4 (diff) | |
download | FreeBSD-src-17d956b9623fc81fa1dd281d843352f6f7ffa62e.zip FreeBSD-src-17d956b9623fc81fa1dd281d843352f6f7ffa62e.tar.gz |
Pull in r230348 from upstream llvm trunk (by Tim Northover):
ARM: treat [N x i32] and [N x i64] as AAPCS composite types
The logic is almost there already, with our special homogeneous
aggregate handling. Tweaking it like this allows front-ends to emit
AAPCS compliant code without ever having to count registers or add
discarded padding arguments.
Only arrays of i32 and i64 are needed to model AAPCS rules, but I
decided to apply the logic to all integer arrays for more consistency.
This fixes a possible "Unexpected member type for HA" error when
compiling lib/msun/bsdsrc/b_tgamma.c for armv6.
Reported by: Jakub Palider <jpa@semihalf.com>
Diffstat (limited to 'contrib/llvm/lib')
-rw-r--r-- | contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 15 | ||||
-rw-r--r-- | contrib/llvm/lib/Target/ARM/ARMCallingConv.h | 147 | ||||
-rw-r--r-- | contrib/llvm/lib/Target/ARM/ARMCallingConv.td | 2 | ||||
-rw-r--r-- | contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp | 12 |
4 files changed, 107 insertions, 69 deletions
diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index d192910..551da20 100644 --- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7429,11 +7429,8 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { } if (Args[i].isNest) Flags.setNest(); - if (NeedsRegBlock) { + if (NeedsRegBlock) Flags.setInConsecutiveRegs(); - if (Value == NumValues - 1) - Flags.setInConsecutiveRegsLast(); - } Flags.setOrigAlign(OriginalAlignment); MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT); @@ -7482,6 +7479,9 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { CLI.Outs.push_back(MyFlags); CLI.OutVals.push_back(Parts[j]); } + + if (NeedsRegBlock && Value == NumValues - 1) + CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast(); } } @@ -7696,11 +7696,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) { } if (F.getAttributes().hasAttribute(Idx, Attribute::Nest)) Flags.setNest(); - if (NeedsRegBlock) { + if (NeedsRegBlock) Flags.setInConsecutiveRegs(); - if (Value == NumValues - 1) - Flags.setInConsecutiveRegsLast(); - } Flags.setOrigAlign(OriginalAlignment); MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT); @@ -7715,6 +7712,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) { MyFlags.Flags.setOrigAlign(1); Ins.push_back(MyFlags); } + if (NeedsRegBlock && Value == NumValues - 1) + Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast(); PartBase += VT.getStoreSize(); } } diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.h b/contrib/llvm/lib/Target/ARM/ARMCallingConv.h index e0d0559..a421f62 100644 --- a/contrib/llvm/lib/Target/ARM/ARMCallingConv.h +++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.h @@ -160,6 +160,8 @@ static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT, State); } +static const uint16_t RRegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; + static const uint16_t SRegList[] = { ARM::S0, ARM::S1, ARM::S2, ARM::S3, ARM::S4, ARM::S5, ARM::S6, ARM::S7, ARM::S8, ARM::S9, ARM::S10, ARM::S11, @@ -168,81 +170,114 @@ static const uint16_t DRegList[] = { ARM::D0, ARM::D1, ARM::D2, ARM::D3, ARM::D4, ARM::D5, ARM::D6, ARM::D7 }; static const uint16_t QRegList[] = { ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3 }; + // Allocate part of an AAPCS HFA or HVA. We assume that each member of the HA // has InConsecutiveRegs set, and that the last member also has // InConsecutiveRegsLast set. We must process all members of the HA before // we can allocate it, as we need to know the total number of registers that // will be needed in order to (attempt to) allocate a contiguous block. -static bool CC_ARM_AAPCS_Custom_HA(unsigned &ValNo, MVT &ValVT, MVT &LocVT, - CCValAssign::LocInfo &LocInfo, - ISD::ArgFlagsTy &ArgFlags, CCState &State) { - SmallVectorImpl<CCValAssign> &PendingHAMembers = State.getPendingLocs(); +static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned &ValNo, MVT &ValVT, + MVT &LocVT, + CCValAssign::LocInfo &LocInfo, + ISD::ArgFlagsTy &ArgFlags, + CCState &State) { + SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs(); // AAPCS HFAs must have 1-4 elements, all of the same type - assert(PendingHAMembers.size() < 4); - if (PendingHAMembers.size() > 0) - assert(PendingHAMembers[0].getLocVT() == LocVT); + if (PendingMembers.size() > 0) + assert(PendingMembers[0].getLocVT() == LocVT); // Add the argument to the list to be allocated once we know the size of the - // HA - PendingHAMembers.push_back( - CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); - - if (ArgFlags.isInConsecutiveRegsLast()) { - assert(PendingHAMembers.size() > 0 && PendingHAMembers.size() <= 4 && - "Homogeneous aggregates must have between 1 and 4 members"); - - // Try to allocate a contiguous block of registers, each of the correct - // size to hold one member. - ArrayRef<uint16_t> RegList; - switch (LocVT.SimpleTy) { - case MVT::f32: - RegList = SRegList; - break; - case MVT::f64: - RegList = DRegList; - break; - case MVT::v2f64: - RegList = QRegList; - break; - default: - llvm_unreachable("Unexpected member type for HA"); - break; - } + // aggregate. Store the type's required alignmnent as extra info for later: in + // the [N x i64] case all trace has been removed by the time we actually get + // to do allocation. + PendingMembers.push_back(CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo, + ArgFlags.getOrigAlign())); - unsigned RegResult = - State.AllocateRegBlock(RegList, PendingHAMembers.size()); - - if (RegResult) { - for (SmallVectorImpl<CCValAssign>::iterator It = PendingHAMembers.begin(); - It != PendingHAMembers.end(); ++It) { - It->convertToReg(RegResult); - State.addLoc(*It); - ++RegResult; - } - PendingHAMembers.clear(); - return true; - } + if (!ArgFlags.isInConsecutiveRegsLast()) + return true; + + // Try to allocate a contiguous block of registers, each of the correct + // size to hold one member. + unsigned Align = std::min(PendingMembers[0].getExtraInfo(), 8U); - // Register allocation failed, fall back to the stack + ArrayRef<uint16_t> RegList; + switch (LocVT.SimpleTy) { + case MVT::i32: { + RegList = RRegList; + unsigned RegIdx = State.getFirstUnallocated(RegList.data(), RegList.size()); - // Mark all VFP regs as unavailable (AAPCS rule C.2.vfp) - for (unsigned regNo = 0; regNo < 16; ++regNo) - State.AllocateReg(SRegList[regNo]); + // First consume all registers that would give an unaligned object. Whether + // we go on stack or in regs, no-one will be using them in future. + unsigned RegAlign = RoundUpToAlignment(Align, 4) / 4; + while (RegIdx % RegAlign != 0 && RegIdx < RegList.size()) + State.AllocateReg(RegList[RegIdx++]); - unsigned Size = LocVT.getSizeInBits() / 8; - unsigned Align = std::min(Size, 8U); + break; + } + case MVT::f32: + RegList = SRegList; + break; + case MVT::f64: + RegList = DRegList; + break; + case MVT::v2f64: + RegList = QRegList; + break; + default: + llvm_unreachable("Unexpected member type for block aggregate"); + break; + } + + unsigned RegResult = State.AllocateRegBlock(RegList, PendingMembers.size()); + if (RegResult) { + for (SmallVectorImpl<CCValAssign>::iterator It = PendingMembers.begin(); + It != PendingMembers.end(); ++It) { + It->convertToReg(RegResult); + State.addLoc(*It); + ++RegResult; + } + PendingMembers.clear(); + return true; + } + + // Register allocation failed, we'll be needing the stack + unsigned Size = LocVT.getSizeInBits() / 8; + if (LocVT == MVT::i32 && State.getNextStackOffset() == 0) { + // If nothing else has used the stack until this point, a non-HFA aggregate + // can be split between regs and stack. + unsigned RegIdx = State.getFirstUnallocated(RegList.data(), RegList.size()); + for (auto &It : PendingMembers) { + if (RegIdx >= RegList.size()) + It.convertToMem(State.AllocateStack(Size, Size)); + else + It.convertToReg(State.AllocateReg(RegList[RegIdx++])); - for (auto It : PendingHAMembers) { - It.convertToMem(State.AllocateStack(Size, Align)); State.addLoc(It); } + PendingMembers.clear(); + return true; + } else if (LocVT != MVT::i32) + RegList = SRegList; + + // Mark all regs as unavailable (AAPCS rule C.2.vfp for VFP, C.6 for core) + for (auto Reg : RegList) + State.AllocateReg(Reg); - // All pending members have now been allocated - PendingHAMembers.clear(); + for (auto &It : PendingMembers) { + It.convertToMem(State.AllocateStack(Size, Align)); + State.addLoc(It); + + // After the first item has been allocated, the rest are packed as tightly + // as possible. (E.g. an incoming i64 would have starting Align of 8, but + // we'll be allocating a bunch of i32 slots). + Align = Size; } - // This will be allocated by the last member of the HA + // All pending members have now been allocated + PendingMembers.clear(); + + // This will be allocated by the last member of the aggregate return true; } diff --git a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td index 526089b..7dd21ec 100644 --- a/contrib/llvm/lib/Target/ARM/ARMCallingConv.td +++ b/contrib/llvm/lib/Target/ARM/ARMCallingConv.td @@ -175,7 +175,7 @@ def CC_ARM_AAPCS_VFP : CallingConv<[ CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, // HFAs are passed in a contiguous block of registers, or on the stack - CCIfConsecutiveRegs<CCCustom<"CC_ARM_AAPCS_Custom_HA">>, + CCIfConsecutiveRegs<CCCustom<"CC_ARM_AAPCS_Custom_Aggregate">>, CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>, CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, diff --git a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp index a1de5ef..e4bea5f 100644 --- a/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/contrib/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -11280,7 +11280,9 @@ static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, return (Members > 0 && Members <= 4); } -/// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate. +/// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of +/// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when +/// passing according to AAPCS rules. bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { if (getEffectiveCallingConv(CallConv, isVarArg) != @@ -11289,7 +11291,9 @@ bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters( HABaseType Base = HA_UNKNOWN; uint64_t Members = 0; - bool result = isHomogeneousAggregate(Ty, Base, Members); - DEBUG(dbgs() << "isHA: " << result << " "; Ty->dump()); - return result; + bool IsHA = isHomogeneousAggregate(Ty, Base, Members); + DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump()); + + bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy(); + return IsHA || IsIntArray; } |