diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2014-08-29 15:00:27 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2014-08-29 15:00:27 +0100 |
commit | 508280f5666a706a3681462b2a1d7de8107fd6fb (patch) | |
tree | 8ac95cc1b77048afcaad49c350e734bc145e31dd /disas | |
parent | d9aa68855724752a5684c6acfb17d8db15cec2f8 (diff) | |
download | hqemu-508280f5666a706a3681462b2a1d7de8107fd6fb.zip hqemu-508280f5666a706a3681462b2a1d7de8107fd6fb.tar.gz |
disas/libvixl: Update to upstream VIXL 1.5
Update our copy of libvixl to upstream's 1.5 release.
This includes the upstream versions of the fixes we
were carrying locally (commit ffebe899).
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 1407162987-4659-1-git-send-email-peter.maydell@linaro.org
Diffstat (limited to 'disas')
-rw-r--r-- | disas/libvixl/README | 2 | ||||
-rw-r--r-- | disas/libvixl/a64/assembler-a64.h | 363 | ||||
-rw-r--r-- | disas/libvixl/a64/constants-a64.h | 68 | ||||
-rw-r--r-- | disas/libvixl/a64/cpu-a64.h | 27 | ||||
-rw-r--r-- | disas/libvixl/a64/decoder-a64.cc | 15 | ||||
-rw-r--r-- | disas/libvixl/a64/decoder-a64.h | 1 | ||||
-rw-r--r-- | disas/libvixl/a64/disasm-a64.cc | 88 | ||||
-rw-r--r-- | disas/libvixl/a64/disasm-a64.h | 2 | ||||
-rw-r--r-- | disas/libvixl/a64/instructions-a64.cc | 25 | ||||
-rw-r--r-- | disas/libvixl/a64/instructions-a64.h | 10 | ||||
-rw-r--r-- | disas/libvixl/platform.h | 8 | ||||
-rw-r--r-- | disas/libvixl/utils.cc | 10 | ||||
-rw-r--r-- | disas/libvixl/utils.h | 32 |
13 files changed, 558 insertions, 93 deletions
diff --git a/disas/libvixl/README b/disas/libvixl/README index a0ecac3..8301996 100644 --- a/disas/libvixl/README +++ b/disas/libvixl/README @@ -2,7 +2,7 @@ The code in this directory is a subset of libvixl: https://github.com/armvixl/vixl (specifically, it is the set of files needed for disassembly only, -taken from libvixl 1.4). +taken from libvixl 1.5). Bugfixes should preferably be sent upstream initially. The disassembler does not currently support the entire A64 instruction diff --git a/disas/libvixl/a64/assembler-a64.h b/disas/libvixl/a64/assembler-a64.h index 1e2947b..cc0b758 100644 --- a/disas/libvixl/a64/assembler-a64.h +++ b/disas/libvixl/a64/assembler-a64.h @@ -28,6 +28,7 @@ #define VIXL_A64_ASSEMBLER_A64_H_ #include <list> +#include <stack> #include "globals.h" #include "utils.h" @@ -574,34 +575,107 @@ class MemOperand { class Label { public: - Label() : is_bound_(false), link_(NULL), target_(NULL) {} + Label() : location_(kLocationUnbound) {} ~Label() { // If the label has been linked to, it needs to be bound to a target. VIXL_ASSERT(!IsLinked() || IsBound()); } - inline Instruction* link() const { return link_; } - inline Instruction* target() const { return target_; } + inline bool IsBound() const { return location_ >= 0; } + inline bool IsLinked() const { return !links_.empty(); } - inline bool IsBound() const { return is_bound_; } - inline bool IsLinked() const { return link_ != NULL; } + private: + // The list of linked instructions is stored in a stack-like structure. We + // don't use std::stack directly because it's slow for the common case where + // only one or two instructions refer to a label, and labels themselves are + // short-lived. This class behaves like std::stack, but the first few links + // are preallocated (configured by kPreallocatedLinks). + // + // If more than N links are required, this falls back to std::stack. + class LinksStack { + public: + LinksStack() : size_(0), links_extended_(NULL) {} + ~LinksStack() { + delete links_extended_; + } - inline void set_link(Instruction* new_link) { link_ = new_link; } + size_t size() const { + return size_; + } - static const int kEndOfChain = 0; + bool empty() const { + return size_ == 0; + } - private: - // Indicates if the label has been bound, ie its location is fixed. - bool is_bound_; - // Branches instructions branching to this label form a chained list, with - // their offset indicating where the next instruction is located. - // link_ points to the latest branch instruction generated branching to this - // branch. - // If link_ is not NULL, the label has been linked to. - Instruction* link_; + void push(ptrdiff_t value) { + if (size_ < kPreallocatedLinks) { + links_[size_] = value; + } else { + if (links_extended_ == NULL) { + links_extended_ = new std::stack<ptrdiff_t>(); + } + VIXL_ASSERT(size_ == (links_extended_->size() + kPreallocatedLinks)); + links_extended_->push(value); + } + size_++; + } + + ptrdiff_t top() const { + return (size_ <= kPreallocatedLinks) ? links_[size_ - 1] + : links_extended_->top(); + } + + void pop() { + size_--; + if (size_ >= kPreallocatedLinks) { + links_extended_->pop(); + VIXL_ASSERT(size_ == (links_extended_->size() + kPreallocatedLinks)); + } + } + + private: + static const size_t kPreallocatedLinks = 4; + + size_t size_; + ptrdiff_t links_[kPreallocatedLinks]; + std::stack<ptrdiff_t> * links_extended_; + }; + + inline ptrdiff_t location() const { return location_; } + + inline void Bind(ptrdiff_t location) { + // Labels can only be bound once. + VIXL_ASSERT(!IsBound()); + location_ = location; + } + + inline void AddLink(ptrdiff_t instruction) { + // If a label is bound, the assembler already has the information it needs + // to write the instruction, so there is no need to add it to links_. + VIXL_ASSERT(!IsBound()); + links_.push(instruction); + } + + inline ptrdiff_t GetAndRemoveNextLink() { + VIXL_ASSERT(IsLinked()); + ptrdiff_t link = links_.top(); + links_.pop(); + return link; + } + + // The offsets of the instructions that have linked to this label. + LinksStack links_; // The label location. - Instruction* target_; + ptrdiff_t location_; + static const ptrdiff_t kLocationUnbound = -1; + + // It is not safe to copy labels, so disable the copy constructor by declaring + // it private (without an implementation). + Label(const Label&); + + // The Assembler class is responsible for binding and linking labels, since + // the stored offsets need to be consistent with the Assembler's buffer. friend class Assembler; }; @@ -635,10 +709,49 @@ class Literal { }; +// Control whether or not position-independent code should be emitted. +enum PositionIndependentCodeOption { + // All code generated will be position-independent; all branches and + // references to labels generated with the Label class will use PC-relative + // addressing. + PositionIndependentCode, + + // Allow VIXL to generate code that refers to absolute addresses. With this + // option, it will not be possible to copy the code buffer and run it from a + // different address; code must be generated in its final location. + PositionDependentCode, + + // Allow VIXL to assume that the bottom 12 bits of the address will be + // constant, but that the top 48 bits may change. This allows `adrp` to + // function in systems which copy code between pages, but otherwise maintain + // 4KB page alignment. + PageOffsetDependentCode +}; + + +// Control how scaled- and unscaled-offset loads and stores are generated. +enum LoadStoreScalingOption { + // Prefer scaled-immediate-offset instructions, but emit unscaled-offset, + // register-offset, pre-index or post-index instructions if necessary. + PreferScaledOffset, + + // Prefer unscaled-immediate-offset instructions, but emit scaled-offset, + // register-offset, pre-index or post-index instructions if necessary. + PreferUnscaledOffset, + + // Require scaled-immediate-offset instructions. + RequireScaledOffset, + + // Require unscaled-immediate-offset instructions. + RequireUnscaledOffset +}; + + // Assembler. class Assembler { public: - Assembler(byte* buffer, unsigned buffer_size); + Assembler(byte* buffer, unsigned buffer_size, + PositionIndependentCodeOption pic = PositionIndependentCode); // The destructor asserts that one of the following is true: // * The Assembler object has not been used. @@ -662,12 +775,15 @@ class Assembler { // Label. // Bind a label to the current PC. void bind(Label* label); - int UpdateAndGetByteOffsetTo(Label* label); - inline int UpdateAndGetInstructionOffsetTo(Label* label) { - VIXL_ASSERT(Label::kEndOfChain == 0); - return UpdateAndGetByteOffsetTo(label) >> kInstructionSizeLog2; - } + // Return the address of a bound label. + template <typename T> + inline T GetLabelAddress(const Label * label) { + VIXL_ASSERT(label->IsBound()); + VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); + VIXL_STATIC_ASSERT(sizeof(*buffer_) == 1); + return reinterpret_cast<T>(buffer_ + label->location()); + } // Instruction set functions. @@ -733,6 +849,12 @@ class Assembler { // Calculate the address of a PC offset. void adr(const Register& rd, int imm21); + // Calculate the page address of a label. + void adrp(const Register& rd, Label* label); + + // Calculate the page address of a PC offset. + void adrp(const Register& rd, int imm21); + // Data Processing instructions. // Add. void add(const Register& rd, @@ -1112,31 +1234,76 @@ class Assembler { // Memory instructions. // Load integer or FP register. - void ldr(const CPURegister& rt, const MemOperand& src); + void ldr(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); // Store integer or FP register. - void str(const CPURegister& rt, const MemOperand& dst); + void str(const CPURegister& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); // Load word with sign extension. - void ldrsw(const Register& rt, const MemOperand& src); + void ldrsw(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); // Load byte. - void ldrb(const Register& rt, const MemOperand& src); + void ldrb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); // Store byte. - void strb(const Register& rt, const MemOperand& dst); + void strb(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); // Load byte with sign extension. - void ldrsb(const Register& rt, const MemOperand& src); + void ldrsb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); // Load half-word. - void ldrh(const Register& rt, const MemOperand& src); + void ldrh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); // Store half-word. - void strh(const Register& rt, const MemOperand& dst); + void strh(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferScaledOffset); // Load half-word with sign extension. - void ldrsh(const Register& rt, const MemOperand& src); + void ldrsh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferScaledOffset); + + // Load integer or FP register (with unscaled offset). + void ldur(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store integer or FP register (with unscaled offset). + void stur(const CPURegister& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load word with sign extension. + void ldursw(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load byte (with unscaled offset). + void ldurb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store byte (with unscaled offset). + void sturb(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load byte with sign extension (and unscaled offset). + void ldursb(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load half-word (with unscaled offset). + void ldurh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Store half-word (with unscaled offset). + void sturh(const Register& rt, const MemOperand& dst, + LoadStoreScalingOption option = PreferUnscaledOffset); + + // Load half-word with sign extension (and unscaled offset). + void ldursh(const Register& rt, const MemOperand& src, + LoadStoreScalingOption option = PreferUnscaledOffset); // Load integer or FP register pair. void ldp(const CPURegister& rt, const CPURegister& rt2, @@ -1166,6 +1333,79 @@ class Assembler { // Load single precision floating point literal to FP register. void ldr(const FPRegister& ft, float imm); + // Store exclusive byte. + void stxrb(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store exclusive half-word. + void stxrh(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store exclusive register. + void stxr(const Register& rs, const Register& rt, const MemOperand& dst); + + // Load exclusive byte. + void ldxrb(const Register& rt, const MemOperand& src); + + // Load exclusive half-word. + void ldxrh(const Register& rt, const MemOperand& src); + + // Load exclusive register. + void ldxr(const Register& rt, const MemOperand& src); + + // Store exclusive register pair. + void stxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst); + + // Load exclusive register pair. + void ldxp(const Register& rt, const Register& rt2, const MemOperand& src); + + // Store-release exclusive byte. + void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store-release exclusive half-word. + void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst); + + // Store-release exclusive register. + void stlxr(const Register& rs, const Register& rt, const MemOperand& dst); + + // Load-acquire exclusive byte. + void ldaxrb(const Register& rt, const MemOperand& src); + + // Load-acquire exclusive half-word. + void ldaxrh(const Register& rt, const MemOperand& src); + + // Load-acquire exclusive register. + void ldaxr(const Register& rt, const MemOperand& src); + + // Store-release exclusive register pair. + void stlxp(const Register& rs, + const Register& rt, + const Register& rt2, + const MemOperand& dst); + + // Load-acquire exclusive register pair. + void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src); + + // Store-release byte. + void stlrb(const Register& rt, const MemOperand& dst); + + // Store-release half-word. + void stlrh(const Register& rt, const MemOperand& dst); + + // Store-release register. + void stlr(const Register& rt, const MemOperand& dst); + + // Load-acquire byte. + void ldarb(const Register& rt, const MemOperand& src); + + // Load-acquire half-word. + void ldarh(const Register& rt, const MemOperand& src); + + // Load-acquire register. + void ldar(const Register& rt, const MemOperand& src); + + // Move instructions. The default shift of -1 indicates that the move // instruction will calculate an appropriate 16-bit immediate and left shift // that is equal to the 64-bit immediate argument. If an explicit left shift @@ -1214,6 +1454,9 @@ class Assembler { // System hint. void hint(SystemHint code); + // Clear exclusive monitor. + void clrex(int imm4 = 0xf); + // Data memory barrier. void dmb(BarrierDomain domain, BarrierType type); @@ -1429,6 +1672,11 @@ class Assembler { return rt2.code() << Rt2_offset; } + static Instr Rs(CPURegister rs) { + VIXL_ASSERT(rs.code() != kSPRegInternalCode); + return rs.code() << Rs_offset; + } + // These encoding functions allow the stack pointer to be encoded, and // disallow the zero register. static Instr RdSP(Register rd) { @@ -1619,6 +1867,11 @@ class Assembler { return imm7 << ImmHint_offset; } + static Instr CRm(int imm4) { + VIXL_ASSERT(is_uint4(imm4)); + return imm4 << CRm_offset; + } + static Instr ImmBarrierDomain(int imm2) { VIXL_ASSERT(is_uint2(imm2)); return imm2 << ImmBarrierDomain_offset; @@ -1660,16 +1913,20 @@ class Assembler { } // Size of the code generated in bytes - uint64_t SizeOfCodeGenerated() const { + size_t SizeOfCodeGenerated() const { VIXL_ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_))); return pc_ - buffer_; } // Size of the code generated since label to the current position. - uint64_t SizeOfCodeGeneratedSince(Label* label) const { + size_t SizeOfCodeGeneratedSince(Label* label) const { + size_t pc_offset = SizeOfCodeGenerated(); + VIXL_ASSERT(label->IsBound()); - VIXL_ASSERT((pc_ >= label->target()) && (pc_ < (buffer_ + buffer_size_))); - return pc_ - label->target(); + VIXL_ASSERT(pc_offset >= static_cast<size_t>(label->location())); + VIXL_ASSERT(pc_offset < buffer_size_); + + return pc_offset - label->location(); } @@ -1693,6 +1950,15 @@ class Assembler { void EmitLiteralPool(LiteralPoolEmitOption option = NoJumpRequired); size_t LiteralPoolSize(); + inline PositionIndependentCodeOption pic() { + return pic_; + } + + inline bool AllowPageOffsetDependentCode() { + return (pic() == PageOffsetDependentCode) || + (pic() == PositionDependentCode); + } + protected: inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const { return reg.Is64Bits() ? xzr : wzr; @@ -1701,7 +1967,8 @@ class Assembler { void LoadStore(const CPURegister& rt, const MemOperand& addr, - LoadStoreOp op); + LoadStoreOp op, + LoadStoreScalingOption option = PreferScaledOffset); static bool IsImmLSUnscaled(ptrdiff_t offset); static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size); @@ -1717,9 +1984,9 @@ class Assembler { LogicalOp op); static bool IsImmLogical(uint64_t value, unsigned width, - unsigned* n, - unsigned* imm_s, - unsigned* imm_r); + unsigned* n = NULL, + unsigned* imm_s = NULL, + unsigned* imm_r = NULL); void ConditionalCompare(const Register& rn, const Operand& operand, @@ -1823,6 +2090,17 @@ class Assembler { void RecordLiteral(int64_t imm, unsigned size); + // Link the current (not-yet-emitted) instruction to the specified label, then + // return an offset to be encoded in the instruction. If the label is not yet + // bound, an offset of 0 is returned. + ptrdiff_t LinkAndGetByteOffsetTo(Label * label); + ptrdiff_t LinkAndGetInstructionOffsetTo(Label * label); + ptrdiff_t LinkAndGetPageOffsetTo(Label * label); + + // A common implementation for the LinkAndGet<Type>OffsetTo helpers. + template <int element_size> + ptrdiff_t LinkAndGetOffsetTo(Label* label); + // Emit the instruction at pc_. void Emit(Instr instruction) { VIXL_STATIC_ASSERT(sizeof(*pc_) == 1); @@ -1864,12 +2142,15 @@ class Assembler { // The buffer into which code and relocation info are generated. Instruction* buffer_; // Buffer size, in bytes. - unsigned buffer_size_; + size_t buffer_size_; Instruction* pc_; std::list<Literal*> literals_; Instruction* next_literal_pool_check_; unsigned literal_pool_monitor_; + PositionIndependentCodeOption pic_; + + friend class Label; friend class BlockLiteralPoolScope; #ifdef DEBUG diff --git a/disas/libvixl/a64/constants-a64.h b/disas/libvixl/a64/constants-a64.h index 99677c1..7a14f85 100644 --- a/disas/libvixl/a64/constants-a64.h +++ b/disas/libvixl/a64/constants-a64.h @@ -46,13 +46,13 @@ R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31) #define INSTRUCTION_FIELDS_LIST(V_) \ /* Register fields */ \ -V_(Rd, 4, 0, Bits) /* Destination register. */ \ -V_(Rn, 9, 5, Bits) /* First source register. */ \ -V_(Rm, 20, 16, Bits) /* Second source register. */ \ -V_(Ra, 14, 10, Bits) /* Third source register. */ \ -V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \ -V_(Rt2, 14, 10, Bits) /* Load second dest / */ \ - /* store second source. */ \ +V_(Rd, 4, 0, Bits) /* Destination register. */ \ +V_(Rn, 9, 5, Bits) /* First source register. */ \ +V_(Rm, 20, 16, Bits) /* Second source register. */ \ +V_(Ra, 14, 10, Bits) /* Third source register. */ \ +V_(Rt, 4, 0, Bits) /* Load/store register. */ \ +V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \ +V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \ V_(PrefetchMode, 4, 0, Bits) \ \ /* Common bits */ \ @@ -126,6 +126,13 @@ V_(SysOp1, 18, 16, Bits) \ V_(SysOp2, 7, 5, Bits) \ V_(CRn, 15, 12, Bits) \ V_(CRm, 11, 8, Bits) \ + \ +/* Load-/store-exclusive */ \ +V_(LdStXLoad, 22, 22, Bits) \ +V_(LdStXNotExclusive, 23, 23, Bits) \ +V_(LdStXAcquireRelease, 15, 15, Bits) \ +V_(LdStXSizeLog2, 31, 30, Bits) \ +V_(LdStXPair, 21, 21, Bits) \ #define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \ @@ -585,6 +592,13 @@ enum MemBarrierOp { ISB = MemBarrierFixed | 0x00000040 }; +enum SystemExclusiveMonitorOp { + SystemExclusiveMonitorFixed = 0xD503305F, + SystemExclusiveMonitorFMask = 0xFFFFF0FF, + SystemExclusiveMonitorMask = 0xFFFFF0FF, + CLREX = SystemExclusiveMonitorFixed +}; + // Any load or store. enum LoadStoreAnyOp { LoadStoreAnyFMask = 0x0a000000, @@ -702,7 +716,7 @@ enum LoadStoreUnscaledOffsetOp { // Load/store (post, pre, offset and unsigned.) enum LoadStoreOp { - LoadStoreOpMask = 0xC4C00000, + LoadStoreOpMask = 0xC4C00000, #define LOAD_STORE(A, B, C, D) \ A##B##_##C = D LOAD_STORE_OP_LIST(LOAD_STORE), @@ -756,6 +770,44 @@ enum LoadStoreRegisterOffset { #undef LOAD_STORE_REGISTER_OFFSET }; +enum LoadStoreExclusive { + LoadStoreExclusiveFixed = 0x08000000, + LoadStoreExclusiveFMask = 0x3F000000, + LoadStoreExclusiveMask = 0xFFE08000, + STXRB_w = LoadStoreExclusiveFixed | 0x00000000, + STXRH_w = LoadStoreExclusiveFixed | 0x40000000, + STXR_w = LoadStoreExclusiveFixed | 0x80000000, + STXR_x = LoadStoreExclusiveFixed | 0xC0000000, + LDXRB_w = LoadStoreExclusiveFixed | 0x00400000, + LDXRH_w = LoadStoreExclusiveFixed | 0x40400000, + LDXR_w = LoadStoreExclusiveFixed | 0x80400000, + LDXR_x = LoadStoreExclusiveFixed | 0xC0400000, + STXP_w = LoadStoreExclusiveFixed | 0x80200000, + STXP_x = LoadStoreExclusiveFixed | 0xC0200000, + LDXP_w = LoadStoreExclusiveFixed | 0x80600000, + LDXP_x = LoadStoreExclusiveFixed | 0xC0600000, + STLXRB_w = LoadStoreExclusiveFixed | 0x00008000, + STLXRH_w = LoadStoreExclusiveFixed | 0x40008000, + STLXR_w = LoadStoreExclusiveFixed | 0x80008000, + STLXR_x = LoadStoreExclusiveFixed | 0xC0008000, + LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000, + LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000, + LDAXR_w = LoadStoreExclusiveFixed | 0x80408000, + LDAXR_x = LoadStoreExclusiveFixed | 0xC0408000, + STLXP_w = LoadStoreExclusiveFixed | 0x80208000, + STLXP_x = LoadStoreExclusiveFixed | 0xC0208000, + LDAXP_w = LoadStoreExclusiveFixed | 0x80608000, + LDAXP_x = LoadStoreExclusiveFixed | 0xC0608000, + STLRB_w = LoadStoreExclusiveFixed | 0x00808000, + STLRH_w = LoadStoreExclusiveFixed | 0x40808000, + STLR_w = LoadStoreExclusiveFixed | 0x80808000, + STLR_x = LoadStoreExclusiveFixed | 0xC0808000, + LDARB_w = LoadStoreExclusiveFixed | 0x00C08000, + LDARH_w = LoadStoreExclusiveFixed | 0x40C08000, + LDAR_w = LoadStoreExclusiveFixed | 0x80C08000, + LDAR_x = LoadStoreExclusiveFixed | 0xC0C08000 +}; + // Conditional compare. enum ConditionalCompareOp { ConditionalCompareMask = 0x60000000, diff --git a/disas/libvixl/a64/cpu-a64.h b/disas/libvixl/a64/cpu-a64.h index dfd8f01..59b7974 100644 --- a/disas/libvixl/a64/cpu-a64.h +++ b/disas/libvixl/a64/cpu-a64.h @@ -28,6 +28,7 @@ #define VIXL_CPU_A64_H #include "globals.h" +#include "instructions-a64.h" namespace vixl { @@ -42,6 +43,32 @@ class CPU { // safely run. static void EnsureIAndDCacheCoherency(void *address, size_t length); + // Handle tagged pointers. + template <typename T> + static T SetPointerTag(T pointer, uint64_t tag) { + VIXL_ASSERT(is_uintn(kAddressTagWidth, tag)); + + // Use C-style casts to get static_cast behaviour for integral types (T), + // and reinterpret_cast behaviour for other types. + + uint64_t raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); + + raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset); + return (T)raw; + } + + template <typename T> + static uint64_t GetPointerTag(T pointer) { + // Use C-style casts to get static_cast behaviour for integral types (T), + // and reinterpret_cast behaviour for other types. + + uint64_t raw = (uint64_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw)); + + return (raw & kAddressTagMask) >> kAddressTagOffset; + } + private: // Return the content of the cache type register. static uint32_t GetCacheType(); diff --git a/disas/libvixl/a64/decoder-a64.cc b/disas/libvixl/a64/decoder-a64.cc index 8450eb3..5831b73 100644 --- a/disas/libvixl/a64/decoder-a64.cc +++ b/disas/libvixl/a64/decoder-a64.cc @@ -171,9 +171,9 @@ void Decoder::DecodePCRelAddressing(Instruction* instr) { void Decoder::DecodeBranchSystemException(Instruction* instr) { VIXL_ASSERT((instr->Bits(27, 24) == 0x4) || - (instr->Bits(27, 24) == 0x5) || - (instr->Bits(27, 24) == 0x6) || - (instr->Bits(27, 24) == 0x7) ); + (instr->Bits(27, 24) == 0x5) || + (instr->Bits(27, 24) == 0x6) || + (instr->Bits(27, 24) == 0x7) ); switch (instr->Bits(31, 29)) { case 0: @@ -272,16 +272,15 @@ void Decoder::DecodeBranchSystemException(Instruction* instr) { void Decoder::DecodeLoadStore(Instruction* instr) { VIXL_ASSERT((instr->Bits(27, 24) == 0x8) || - (instr->Bits(27, 24) == 0x9) || - (instr->Bits(27, 24) == 0xC) || - (instr->Bits(27, 24) == 0xD) ); + (instr->Bits(27, 24) == 0x9) || + (instr->Bits(27, 24) == 0xC) || + (instr->Bits(27, 24) == 0xD) ); if (instr->Bit(24) == 0) { if (instr->Bit(28) == 0) { if (instr->Bit(29) == 0) { if (instr->Bit(26) == 0) { - // TODO: VisitLoadStoreExclusive. - VisitUnimplemented(instr); + VisitLoadStoreExclusive(instr); } else { DecodeAdvSIMDLoadStore(instr); } diff --git a/disas/libvixl/a64/decoder-a64.h b/disas/libvixl/a64/decoder-a64.h index bbbbd81..72c1519 100644 --- a/disas/libvixl/a64/decoder-a64.h +++ b/disas/libvixl/a64/decoder-a64.h @@ -59,6 +59,7 @@ V(LoadStorePreIndex) \ V(LoadStoreRegisterOffset) \ V(LoadStoreUnsignedOffset) \ + V(LoadStoreExclusive) \ V(LogicalShifted) \ V(AddSubShifted) \ V(AddSubExtended) \ diff --git a/disas/libvixl/a64/disasm-a64.cc b/disas/libvixl/a64/disasm-a64.cc index f81ce4b..248ebfd 100644 --- a/disas/libvixl/a64/disasm-a64.cc +++ b/disas/libvixl/a64/disasm-a64.cc @@ -24,6 +24,7 @@ // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#include <cstdlib> #include "a64/disasm-a64.h" namespace vixl { @@ -529,7 +530,7 @@ void Disassembler::VisitExtract(Instruction* instr) { void Disassembler::VisitPCRelAddressing(Instruction* instr) { switch (instr->Mask(PCRelAddressingMask)) { case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break; - // ADRP is not implemented. + case ADRP: Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); break; default: Format(instr, "unimplemented", "(PCRelAddressing)"); } } @@ -943,6 +944,49 @@ void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) { } +void Disassembler::VisitLoadStoreExclusive(Instruction* instr) { + const char *mnemonic = "unimplemented"; + const char *form; + + switch (instr->Mask(LoadStoreExclusiveMask)) { + case STXRB_w: mnemonic = "stxrb"; form = "'Ws, 'Wt, ['Xns]"; break; + case STXRH_w: mnemonic = "stxrh"; form = "'Ws, 'Wt, ['Xns]"; break; + case STXR_w: mnemonic = "stxr"; form = "'Ws, 'Wt, ['Xns]"; break; + case STXR_x: mnemonic = "stxr"; form = "'Ws, 'Xt, ['Xns]"; break; + case LDXRB_w: mnemonic = "ldxrb"; form = "'Wt, ['Xns]"; break; + case LDXRH_w: mnemonic = "ldxrh"; form = "'Wt, ['Xns]"; break; + case LDXR_w: mnemonic = "ldxr"; form = "'Wt, ['Xns]"; break; + case LDXR_x: mnemonic = "ldxr"; form = "'Xt, ['Xns]"; break; + case STXP_w: mnemonic = "stxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break; + case STXP_x: mnemonic = "stxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break; + case LDXP_w: mnemonic = "ldxp"; form = "'Wt, 'Wt2, ['Xns]"; break; + case LDXP_x: mnemonic = "ldxp"; form = "'Xt, 'Xt2, ['Xns]"; break; + case STLXRB_w: mnemonic = "stlxrb"; form = "'Ws, 'Wt, ['Xns]"; break; + case STLXRH_w: mnemonic = "stlxrh"; form = "'Ws, 'Wt, ['Xns]"; break; + case STLXR_w: mnemonic = "stlxr"; form = "'Ws, 'Wt, ['Xns]"; break; + case STLXR_x: mnemonic = "stlxr"; form = "'Ws, 'Xt, ['Xns]"; break; + case LDAXRB_w: mnemonic = "ldaxrb"; form = "'Wt, ['Xns]"; break; + case LDAXRH_w: mnemonic = "ldaxrh"; form = "'Wt, ['Xns]"; break; + case LDAXR_w: mnemonic = "ldaxr"; form = "'Wt, ['Xns]"; break; + case LDAXR_x: mnemonic = "ldaxr"; form = "'Xt, ['Xns]"; break; + case STLXP_w: mnemonic = "stlxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break; + case STLXP_x: mnemonic = "stlxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break; + case LDAXP_w: mnemonic = "ldaxp"; form = "'Wt, 'Wt2, ['Xns]"; break; + case LDAXP_x: mnemonic = "ldaxp"; form = "'Xt, 'Xt2, ['Xns]"; break; + case STLRB_w: mnemonic = "stlrb"; form = "'Wt, ['Xns]"; break; + case STLRH_w: mnemonic = "stlrh"; form = "'Wt, ['Xns]"; break; + case STLR_w: mnemonic = "stlr"; form = "'Wt, ['Xns]"; break; + case STLR_x: mnemonic = "stlr"; form = "'Xt, ['Xns]"; break; + case LDARB_w: mnemonic = "ldarb"; form = "'Wt, ['Xns]"; break; + case LDARH_w: mnemonic = "ldarh"; form = "'Wt, ['Xns]"; break; + case LDAR_w: mnemonic = "ldar"; form = "'Wt, ['Xns]"; break; + case LDAR_x: mnemonic = "ldar"; form = "'Xt, ['Xns]"; break; + default: form = "(LoadStoreExclusive)"; + } + Format(instr, mnemonic, form); +} + + void Disassembler::VisitFPCompare(Instruction* instr) { const char *mnemonic = "unimplemented"; const char *form = "'Fn, 'Fm"; @@ -1162,7 +1206,15 @@ void Disassembler::VisitSystem(Instruction* instr) { const char *mnemonic = "unimplemented"; const char *form = "(System)"; - if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { + if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) { + switch (instr->Mask(SystemExclusiveMonitorMask)) { + case CLREX: { + mnemonic = "clrex"; + form = (instr->CRm() == 0xf) ? NULL : "'IX"; + break; + } + } + } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { switch (instr->Mask(SystemSysRegMask)) { case MRS: { mnemonic = "mrs"; @@ -1184,7 +1236,6 @@ void Disassembler::VisitSystem(Instruction* instr) { } } } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) { - VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT); switch (instr->ImmHint()) { case NOP: { mnemonic = "nop"; @@ -1312,6 +1363,7 @@ int Disassembler::SubstituteRegisterField(Instruction* instr, case 'n': reg_num = instr->Rn(); break; case 'm': reg_num = instr->Rm(); break; case 'a': reg_num = instr->Ra(); break; + case 's': reg_num = instr->Rs(); break; case 't': { if (format[2] == '2') { reg_num = instr->Rt2(); @@ -1458,6 +1510,10 @@ int Disassembler::SubstituteImmediateField(Instruction* instr, AppendToOutput("#0x%" PRIx64, instr->ImmException()); return 6; } + case 'X': { // IX - CLREX instruction. + AppendToOutput("#0x%" PRIx64, instr->CRm()); + return 2; + } default: { VIXL_UNIMPLEMENTED(); return 0; @@ -1564,21 +1620,20 @@ int Disassembler::SubstituteConditionField(Instruction* instr, int Disassembler::SubstitutePCRelAddressField(Instruction* instr, const char* format) { - USE(format); - VIXL_ASSERT(strncmp(format, "AddrPCRel", 9) == 0); + VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) || // Used by `adr`. + (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`. - int offset = instr->ImmPCRel(); + int64_t offset = instr->ImmPCRel(); + Instruction * base = instr; - // Only ADR (AddrPCRelByte) is supported. - VIXL_ASSERT(strcmp(format, "AddrPCRelByte") == 0); - - char sign = '+'; - if (offset < 0) { - offset = -offset; - sign = '-'; + if (format[9] == 'P') { + offset *= kPageSize; + base = AlignDown(base, kPageSize); } - VIXL_STATIC_ASSERT(sizeof(*instr) == 1); - AppendToOutput("#%c0x%x (addr %p)", sign, offset, instr + offset); + + char sign = (offset < 0) ? '-' : '+'; + void * target = reinterpret_cast<void *>(base + offset); + AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, std::abs(offset), target); return 13; } @@ -1606,7 +1661,8 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr, sign = '-'; } VIXL_STATIC_ASSERT(sizeof(*instr) == 1); - AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, offset, instr + offset); + void * address = reinterpret_cast<void *>(instr + offset); + AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, offset, address); return 8; } diff --git a/disas/libvixl/a64/disasm-a64.h b/disas/libvixl/a64/disasm-a64.h index 3a56e15..06ee43f 100644 --- a/disas/libvixl/a64/disasm-a64.h +++ b/disas/libvixl/a64/disasm-a64.h @@ -85,7 +85,7 @@ class Disassembler: public DecoderVisitor { bool IsMovzMovnImm(unsigned reg_size, uint64_t value); void ResetOutput(); - void AppendToOutput(const char* string, ...); + void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3); char* buffer_; uint32_t buffer_pos_; diff --git a/disas/libvixl/a64/instructions-a64.cc b/disas/libvixl/a64/instructions-a64.cc index c4eb7c4..e9caceb 100644 --- a/disas/libvixl/a64/instructions-a64.cc +++ b/disas/libvixl/a64/instructions-a64.cc @@ -149,17 +149,24 @@ LSDataSize CalcLSPairDataSize(LoadStorePairOp op) { Instruction* Instruction::ImmPCOffsetTarget() { + Instruction * base = this; ptrdiff_t offset; if (IsPCRelAddressing()) { - // PC-relative addressing. Only ADR is supported. + // ADR and ADRP. offset = ImmPCRel(); + if (Mask(PCRelAddressingMask) == ADRP) { + base = AlignDown(base, kPageSize); + offset *= kPageSize; + } else { + VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR); + } } else { // All PC-relative branches. VIXL_ASSERT(BranchType() != UnknownBranchType); // Relative branch offsets are instruction-size-aligned. offset = ImmBranch() << kInstructionSizeLog2; } - return this + offset; + return base + offset; } @@ -185,10 +192,16 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) { void Instruction::SetPCRelImmTarget(Instruction* target) { - // ADRP is not supported, so 'this' must point to an ADR instruction. - VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR); - - Instr imm = Assembler::ImmPCRelAddress(target - this); + int32_t imm21; + if ((Mask(PCRelAddressingMask) == ADR)) { + imm21 = target - this; + } else { + VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP); + uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize; + uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize; + imm21 = target_page - this_page; + } + Instr imm = Assembler::ImmPCRelAddress(imm21); SetInstructionBits(Mask(~ImmPCRel_mask) | imm); } diff --git a/disas/libvixl/a64/instructions-a64.h b/disas/libvixl/a64/instructions-a64.h index a4240d7..d5b90c5 100644 --- a/disas/libvixl/a64/instructions-a64.h +++ b/disas/libvixl/a64/instructions-a64.h @@ -41,6 +41,10 @@ const unsigned kLiteralEntrySize = 4; const unsigned kLiteralEntrySizeLog2 = 2; const unsigned kMaxLoadLiteralRange = 1 * MBytes; +// This is the nominal page size (as used by the adrp instruction); the actual +// size of the memory pages allocated by the kernel is likely to differ. +const unsigned kPageSize = 4 * KBytes; + const unsigned kWRegSize = 32; const unsigned kWRegSizeLog2 = 5; const unsigned kWRegSizeInBytes = kWRegSize / 8; @@ -79,6 +83,12 @@ const unsigned kZeroRegCode = 31; const unsigned kSPRegInternalCode = 63; const unsigned kRegCodeMask = 0x1f; +const unsigned kAddressTagOffset = 56; +const unsigned kAddressTagWidth = 8; +const uint64_t kAddressTagMask = + ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset; +VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000)); + // AArch64 floating-point specifics. These match IEEE-754. const unsigned kDoubleMantissaBits = 52; const unsigned kDoubleExponentBits = 11; diff --git a/disas/libvixl/platform.h b/disas/libvixl/platform.h index b5c2085..de2b110 100644 --- a/disas/libvixl/platform.h +++ b/disas/libvixl/platform.h @@ -28,14 +28,10 @@ #define PLATFORM_H // Define platform specific functionalities. +#include <signal.h> namespace vixl { -#ifdef USE_SIMULATOR -// Currently we assume running the simulator implies running on x86 hardware. -inline void HostBreakpoint() { asm("int3"); } -#else -inline void HostBreakpoint() { asm("brk"); } -#endif +inline void HostBreakpoint() { raise(SIGINT); } } // namespace vixl #endif diff --git a/disas/libvixl/utils.cc b/disas/libvixl/utils.cc index c9c05d1..4d4fcbd 100644 --- a/disas/libvixl/utils.cc +++ b/disas/libvixl/utils.cc @@ -124,4 +124,14 @@ int CountSetBits(uint64_t value, int width) { return value; } + + +uint64_t LowestSetBit(uint64_t value) { + return value & -value; +} + + +bool IsPowerOf2(int64_t value) { + return (value != 0) && ((value & (value - 1)) == 0); +} } // namespace vixl diff --git a/disas/libvixl/utils.h b/disas/libvixl/utils.h index 83c928c..b472f0e 100644 --- a/disas/libvixl/utils.h +++ b/disas/libvixl/utils.h @@ -33,6 +33,14 @@ namespace vixl { +// Macros for compile-time format checking. +#if defined(__GNUC__) +#define PRINTF_CHECK(format_index, varargs_index) \ + __attribute__((format(printf, format_index, varargs_index))) +#else +#define PRINTF_CHECK(format_index, varargs_index) +#endif + // Check number width. inline bool is_intn(unsigned n, int64_t x) { VIXL_ASSERT((0 < n) && (n < 64)); @@ -155,6 +163,8 @@ int CountLeadingZeros(uint64_t value, int width); int CountLeadingSignBits(int64_t value, int width); int CountTrailingZeros(uint64_t value, int width); int CountSetBits(uint64_t value, int width); +uint64_t LowestSetBit(uint64_t value); +bool IsPowerOf2(int64_t value); // Pointer alignment // TODO: rename/refactor to make it specific to instructions. @@ -167,21 +177,31 @@ bool IsWordAligned(T pointer) { // Increment a pointer until it has the specified alignment. template<class T> T AlignUp(T pointer, size_t alignment) { - VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(uintptr_t)); - uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer); + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + + uintptr_t pointer_raw = (uintptr_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw)); + size_t align_step = (alignment - pointer_raw) % alignment; VIXL_ASSERT((pointer_raw + align_step) % alignment == 0); - return reinterpret_cast<T>(pointer_raw + align_step); + + return (T)(pointer_raw + align_step); } // Decrement a pointer until it has the specified alignment. template<class T> T AlignDown(T pointer, size_t alignment) { - VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(uintptr_t)); - uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer); + // Use C-style casts to get static_cast behaviour for integral types (T), and + // reinterpret_cast behaviour for other types. + + uintptr_t pointer_raw = (uintptr_t)pointer; + VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw)); + size_t align_step = pointer_raw % alignment; VIXL_ASSERT((pointer_raw - align_step) % alignment == 0); - return reinterpret_cast<T>(pointer_raw - align_step); + + return (T)(pointer_raw - align_step); } |