summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/lldb/source/Symbol/FuncUnwinders.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/lldb/source/Symbol/FuncUnwinders.cpp')
-rw-r--r--contrib/llvm/tools/lldb/source/Symbol/FuncUnwinders.cpp379
1 files changed, 228 insertions, 151 deletions
diff --git a/contrib/llvm/tools/lldb/source/Symbol/FuncUnwinders.cpp b/contrib/llvm/tools/lldb/source/Symbol/FuncUnwinders.cpp
index 95fc817..1eb73ee 100644
--- a/contrib/llvm/tools/lldb/source/Symbol/FuncUnwinders.cpp
+++ b/contrib/llvm/tools/lldb/source/Symbol/FuncUnwinders.cpp
@@ -11,6 +11,7 @@
#include "lldb/Core/Address.h"
#include "lldb/Symbol/FuncUnwinders.h"
#include "lldb/Symbol/DWARFCallFrameInfo.h"
+#include "lldb/Symbol/CompactUnwindInfo.h"
#include "lldb/Symbol/ObjectFile.h"
#include "lldb/Symbol/UnwindPlan.h"
#include "lldb/Symbol/UnwindTable.h"
@@ -24,145 +25,212 @@
using namespace lldb;
using namespace lldb_private;
+//------------------------------------------------
+/// constructor
+//------------------------------------------------
-FuncUnwinders::FuncUnwinders
-(
- UnwindTable& unwind_table,
- AddressRange range
-) :
- m_unwind_table(unwind_table),
- m_range(range),
+FuncUnwinders::FuncUnwinders (UnwindTable& unwind_table, AddressRange range) :
+ m_unwind_table (unwind_table),
+ m_range (range),
m_mutex (Mutex::eMutexTypeRecursive),
- m_unwind_plan_call_site_sp (),
- m_unwind_plan_non_call_site_sp (),
+ m_unwind_plan_assembly_sp (),
+ m_unwind_plan_eh_frame_sp (),
+ m_unwind_plan_eh_frame_augmented_sp (),
+ m_unwind_plan_compact_unwind (),
m_unwind_plan_fast_sp (),
m_unwind_plan_arch_default_sp (),
- m_tried_unwind_at_call_site (false),
- m_tried_unwind_at_non_call_site (false),
+ m_unwind_plan_arch_default_at_func_entry_sp (),
+ m_tried_unwind_plan_assembly (false),
+ m_tried_unwind_plan_eh_frame (false),
+ m_tried_unwind_plan_eh_frame_augmented (false),
+ m_tried_unwind_plan_compact_unwind (false),
m_tried_unwind_fast (false),
m_tried_unwind_arch_default (false),
m_tried_unwind_arch_default_at_func_entry (false),
- m_first_non_prologue_insn()
+ m_first_non_prologue_insn ()
{
}
-FuncUnwinders::~FuncUnwinders ()
+//------------------------------------------------
+/// destructor
+//------------------------------------------------
+
+FuncUnwinders::~FuncUnwinders ()
{
}
UnwindPlanSP
-FuncUnwinders::GetUnwindPlanAtCallSite (int current_offset)
+FuncUnwinders::GetUnwindPlanAtCallSite (Target &target, int current_offset)
{
- // Lock the mutex to ensure we can always give out the most appropriate
- // information. We want to make sure if someone requests a call site unwind
- // plan, that they get one and don't run into a race condition where one
- // thread has started to create the unwind plan and has put it into
- // m_unwind_plan_call_site_sp, and have another thread enter this function
- // and return the partially filled in m_unwind_plan_call_site_sp pointer.
- // We also want to make sure that we lock out other unwind plans from
- // being accessed until this one is done creating itself in case someone
- // had some code like:
- // UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...)
- // if (best_unwind_plan == NULL)
- // best_unwind_plan = GetUnwindPlanAtNonCallSite (...)
Mutex::Locker locker (m_mutex);
- if (m_tried_unwind_at_call_site == false && m_unwind_plan_call_site_sp.get() == nullptr)
+
+ UnwindPlanSP unwind_plan_sp = GetEHFrameUnwindPlan (target, current_offset);
+ if (unwind_plan_sp.get() == nullptr)
{
- m_tried_unwind_at_call_site = true;
- // We have cases (e.g. with _sigtramp on Mac OS X) where the hand-written eh_frame unwind info for a
- // function does not cover the entire range of the function and so the FDE only lists a subset of the
- // address range. If we try to look up the unwind info by the starting address of the function
- // (i.e. m_range.GetBaseAddress()) we may not find the eh_frame FDE. We need to use the actual byte offset
- // into the function when looking it up.
-
- if (m_range.GetBaseAddress().IsValid())
- {
- Address current_pc (m_range.GetBaseAddress ());
- if (current_offset != -1)
- current_pc.SetOffset (current_pc.GetOffset() + current_offset);
+ unwind_plan_sp = GetCompactUnwindUnwindPlan (target, current_offset);
+ }
+
+ return unwind_plan_sp;
+}
- DWARFCallFrameInfo *eh_frame = m_unwind_table.GetEHFrameInfo();
- if (eh_frame)
+UnwindPlanSP
+FuncUnwinders::GetCompactUnwindUnwindPlan (Target &target, int current_offset)
+{
+ if (m_unwind_plan_compact_unwind.size() > 0)
+ return m_unwind_plan_compact_unwind[0]; // FIXME support multiple compact unwind plans for one func
+ if (m_tried_unwind_plan_compact_unwind)
+ return UnwindPlanSP();
+
+ Mutex::Locker lock (m_mutex);
+ m_tried_unwind_plan_compact_unwind = true;
+ if (m_range.GetBaseAddress().IsValid())
+ {
+ Address current_pc (m_range.GetBaseAddress ());
+ if (current_offset != -1)
+ current_pc.SetOffset (current_pc.GetOffset() + current_offset);
+ CompactUnwindInfo *compact_unwind = m_unwind_table.GetCompactUnwindInfo();
+ if (compact_unwind)
+ {
+ UnwindPlanSP unwind_plan_sp (new UnwindPlan (lldb::eRegisterKindGeneric));
+ if (compact_unwind->GetUnwindPlan (target, current_pc, *unwind_plan_sp))
{
- m_unwind_plan_call_site_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
- if (!eh_frame->GetUnwindPlan (current_pc, *m_unwind_plan_call_site_sp))
- m_unwind_plan_call_site_sp.reset();
+ m_unwind_plan_compact_unwind.push_back (unwind_plan_sp);
+ return m_unwind_plan_compact_unwind[0]; // FIXME support multiple compact unwind plans for one func
}
}
}
- return m_unwind_plan_call_site_sp;
+ return UnwindPlanSP();
}
UnwindPlanSP
-FuncUnwinders::GetUnwindPlanAtNonCallSite (Target& target, Thread& thread, int current_offset)
+FuncUnwinders::GetEHFrameUnwindPlan (Target &target, int current_offset)
{
- // Lock the mutex to ensure we can always give out the most appropriate
- // information. We want to make sure if someone requests an unwind
- // plan, that they get one and don't run into a race condition where one
- // thread has started to create the unwind plan and has put it into
- // the unique pointer member variable, and have another thread enter this function
- // and return the partially filled pointer contained in the unique pointer.
- // We also want to make sure that we lock out other unwind plans from
- // being accessed until this one is done creating itself in case someone
- // had some code like:
- // UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...)
- // if (best_unwind_plan == NULL)
- // best_unwind_plan = GetUnwindPlanAtNonCallSite (...)
- Mutex::Locker locker (m_mutex);
- if (m_tried_unwind_at_non_call_site == false && m_unwind_plan_non_call_site_sp.get() == nullptr)
+ if (m_unwind_plan_eh_frame_sp.get() || m_tried_unwind_plan_eh_frame)
+ return m_unwind_plan_eh_frame_sp;
+
+ Mutex::Locker lock (m_mutex);
+ m_tried_unwind_plan_eh_frame = true;
+ if (m_range.GetBaseAddress().IsValid())
{
- UnwindAssemblySP assembly_profiler_sp (GetUnwindAssemblyProfiler());
- if (assembly_profiler_sp)
+ Address current_pc (m_range.GetBaseAddress ());
+ if (current_offset != -1)
+ current_pc.SetOffset (current_pc.GetOffset() + current_offset);
+ DWARFCallFrameInfo *eh_frame = m_unwind_table.GetEHFrameInfo();
+ if (eh_frame)
{
- if (target.GetArchitecture().GetCore() == ArchSpec::eCore_x86_32_i386
- || target.GetArchitecture().GetCore() == ArchSpec::eCore_x86_64_x86_64
- || target.GetArchitecture().GetCore() == ArchSpec::eCore_x86_64_x86_64h)
+ m_unwind_plan_eh_frame_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
+ if (!eh_frame->GetUnwindPlan (current_pc, *m_unwind_plan_eh_frame_sp))
+ m_unwind_plan_eh_frame_sp.reset();
+ }
+ }
+ return m_unwind_plan_eh_frame_sp;
+}
+
+UnwindPlanSP
+FuncUnwinders::GetEHFrameAugmentedUnwindPlan (Target &target, Thread &thread, int current_offset)
+{
+ if (m_unwind_plan_eh_frame_augmented_sp.get() || m_tried_unwind_plan_eh_frame_augmented)
+ return m_unwind_plan_eh_frame_augmented_sp;
+
+ // Only supported on x86 architectures where we get eh_frame from the compiler that describes
+ // the prologue instructions perfectly, and sometimes the epilogue instructions too.
+ if (target.GetArchitecture().GetCore() != ArchSpec::eCore_x86_32_i386
+ && target.GetArchitecture().GetCore() != ArchSpec::eCore_x86_64_x86_64
+ && target.GetArchitecture().GetCore() != ArchSpec::eCore_x86_64_x86_64h)
+ {
+ m_tried_unwind_plan_eh_frame_augmented = true;
+ return m_unwind_plan_eh_frame_augmented_sp;
+ }
+
+ Mutex::Locker lock (m_mutex);
+ m_tried_unwind_plan_eh_frame_augmented = true;
+
+ if (m_range.GetBaseAddress().IsValid())
+ {
+ Address current_pc (m_range.GetBaseAddress ());
+ if (current_offset != -1)
+ current_pc.SetOffset (current_pc.GetOffset() + current_offset);
+ DWARFCallFrameInfo *eh_frame = m_unwind_table.GetEHFrameInfo();
+ if (eh_frame)
+ {
+ m_unwind_plan_eh_frame_augmented_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
+ if (!eh_frame->GetUnwindPlan (current_pc, *m_unwind_plan_eh_frame_augmented_sp))
{
- // For 0th frame on i386 & x86_64, we fetch eh_frame and try using assembly profiler
- // to augment it into asynchronous unwind table.
- GetUnwindPlanAtCallSite(current_offset);
- if (m_unwind_plan_call_site_sp) {
- UnwindPlan* plan = new UnwindPlan (*m_unwind_plan_call_site_sp);
- if (assembly_profiler_sp->AugmentUnwindPlanFromCallSite (m_range, thread, *plan)) {
- m_unwind_plan_non_call_site_sp.reset (plan);
- return m_unwind_plan_non_call_site_sp;
+ m_unwind_plan_eh_frame_augmented_sp.reset();
+ }
+ else
+ {
+ // Augment the eh_frame instructions with epilogue descriptions if necessary so the
+ // UnwindPlan can be used at any instruction in the function.
+
+ UnwindAssemblySP assembly_profiler_sp (GetUnwindAssemblyProfiler());
+ if (assembly_profiler_sp)
+ {
+ if (!assembly_profiler_sp->AugmentUnwindPlanFromCallSite (m_range, thread, *m_unwind_plan_eh_frame_augmented_sp))
+ {
+ m_unwind_plan_eh_frame_augmented_sp.reset();
}
}
+ else
+ {
+ m_unwind_plan_eh_frame_augmented_sp.reset();
+ }
}
+ }
+ }
+ return m_unwind_plan_eh_frame_augmented_sp;
+}
+
+
+UnwindPlanSP
+FuncUnwinders::GetAssemblyUnwindPlan (Target &target, Thread &thread, int current_offset)
+{
+ if (m_unwind_plan_assembly_sp.get() || m_tried_unwind_plan_assembly)
+ return m_unwind_plan_assembly_sp;
- m_unwind_plan_non_call_site_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
- if (!assembly_profiler_sp->GetNonCallSiteUnwindPlanFromAssembly (m_range, thread, *m_unwind_plan_non_call_site_sp))
- m_unwind_plan_non_call_site_sp.reset();
+ Mutex::Locker lock (m_mutex);
+ m_tried_unwind_plan_assembly = true;
+
+ UnwindAssemblySP assembly_profiler_sp (GetUnwindAssemblyProfiler());
+ if (assembly_profiler_sp)
+ {
+ m_unwind_plan_assembly_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
+ if (!assembly_profiler_sp->GetNonCallSiteUnwindPlanFromAssembly (m_range, thread, *m_unwind_plan_assembly_sp))
+ {
+ m_unwind_plan_assembly_sp.reset();
}
}
- return m_unwind_plan_non_call_site_sp;
+ return m_unwind_plan_assembly_sp;
+}
+
+
+UnwindPlanSP
+FuncUnwinders::GetUnwindPlanAtNonCallSite (Target& target, Thread& thread, int current_offset)
+{
+ UnwindPlanSP non_call_site_unwindplan_sp = GetEHFrameAugmentedUnwindPlan (target, thread, current_offset);
+ if (non_call_site_unwindplan_sp.get() == nullptr)
+ {
+ non_call_site_unwindplan_sp = GetAssemblyUnwindPlan (target, thread, current_offset);
+ }
+ return non_call_site_unwindplan_sp;
}
UnwindPlanSP
FuncUnwinders::GetUnwindPlanFastUnwind (Thread& thread)
{
- // Lock the mutex to ensure we can always give out the most appropriate
- // information. We want to make sure if someone requests an unwind
- // plan, that they get one and don't run into a race condition where one
- // thread has started to create the unwind plan and has put it into
- // the unique pointer member variable, and have another thread enter this function
- // and return the partially filled pointer contained in the unique pointer.
- // We also want to make sure that we lock out other unwind plans from
- // being accessed until this one is done creating itself in case someone
- // had some code like:
- // UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...)
- // if (best_unwind_plan == NULL)
- // best_unwind_plan = GetUnwindPlanAtNonCallSite (...)
+ if (m_unwind_plan_fast_sp.get() || m_tried_unwind_fast)
+ return m_unwind_plan_fast_sp;
+
Mutex::Locker locker (m_mutex);
- if (m_tried_unwind_fast == false && m_unwind_plan_fast_sp.get() == nullptr)
+ m_tried_unwind_fast = true;
+
+ UnwindAssemblySP assembly_profiler_sp (GetUnwindAssemblyProfiler());
+ if (assembly_profiler_sp)
{
- m_tried_unwind_fast = true;
- UnwindAssemblySP assembly_profiler_sp (GetUnwindAssemblyProfiler());
- if (assembly_profiler_sp)
+ m_unwind_plan_fast_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
+ if (!assembly_profiler_sp->GetFastUnwindPlan (m_range, thread, *m_unwind_plan_fast_sp))
{
- m_unwind_plan_fast_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
- if (!assembly_profiler_sp->GetFastUnwindPlan (m_range, thread, *m_unwind_plan_fast_sp))
- m_unwind_plan_fast_sp.reset();
+ m_unwind_plan_fast_sp.reset();
}
}
return m_unwind_plan_fast_sp;
@@ -171,32 +239,23 @@ FuncUnwinders::GetUnwindPlanFastUnwind (Thread& thread)
UnwindPlanSP
FuncUnwinders::GetUnwindPlanArchitectureDefault (Thread& thread)
{
- // Lock the mutex to ensure we can always give out the most appropriate
- // information. We want to make sure if someone requests an unwind
- // plan, that they get one and don't run into a race condition where one
- // thread has started to create the unwind plan and has put it into
- // the unique pointer member variable, and have another thread enter this function
- // and return the partially filled pointer contained in the unique pointer.
- // We also want to make sure that we lock out other unwind plans from
- // being accessed until this one is done creating itself in case someone
- // had some code like:
- // UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...)
- // if (best_unwind_plan == NULL)
- // best_unwind_plan = GetUnwindPlanAtNonCallSite (...)
+ if (m_unwind_plan_arch_default_sp.get() || m_tried_unwind_arch_default)
+ return m_unwind_plan_arch_default_sp;
+
Mutex::Locker locker (m_mutex);
- if (m_tried_unwind_arch_default == false && m_unwind_plan_arch_default_sp.get() == nullptr)
+ m_tried_unwind_arch_default = true;
+
+ Address current_pc;
+ ProcessSP process_sp (thread.CalculateProcess());
+ if (process_sp)
{
- m_tried_unwind_arch_default = true;
- Address current_pc;
- ProcessSP process_sp (thread.CalculateProcess());
- if (process_sp)
+ ABI *abi = process_sp->GetABI().get();
+ if (abi)
{
- ABI *abi = process_sp->GetABI().get();
- if (abi)
+ m_unwind_plan_arch_default_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
+ if (!abi->CreateDefaultUnwindPlan(*m_unwind_plan_arch_default_sp))
{
- m_unwind_plan_arch_default_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
- if (m_unwind_plan_arch_default_sp)
- abi->CreateDefaultUnwindPlan(*m_unwind_plan_arch_default_sp);
+ m_unwind_plan_arch_default_sp.reset();
}
}
}
@@ -207,32 +266,23 @@ FuncUnwinders::GetUnwindPlanArchitectureDefault (Thread& thread)
UnwindPlanSP
FuncUnwinders::GetUnwindPlanArchitectureDefaultAtFunctionEntry (Thread& thread)
{
- // Lock the mutex to ensure we can always give out the most appropriate
- // information. We want to make sure if someone requests an unwind
- // plan, that they get one and don't run into a race condition where one
- // thread has started to create the unwind plan and has put it into
- // the unique pointer member variable, and have another thread enter this function
- // and return the partially filled pointer contained in the unique pointer.
- // We also want to make sure that we lock out other unwind plans from
- // being accessed until this one is done creating itself in case someone
- // had some code like:
- // UnwindPlan *best_unwind_plan = ...GetUnwindPlanAtCallSite (...)
- // if (best_unwind_plan == NULL)
- // best_unwind_plan = GetUnwindPlanAtNonCallSite (...)
+ if (m_unwind_plan_arch_default_at_func_entry_sp.get() || m_tried_unwind_arch_default_at_func_entry)
+ return m_unwind_plan_arch_default_at_func_entry_sp;
+
Mutex::Locker locker (m_mutex);
- if (m_tried_unwind_arch_default_at_func_entry == false && m_unwind_plan_arch_default_at_func_entry_sp.get() == nullptr)
+ m_tried_unwind_arch_default_at_func_entry = true;
+
+ Address current_pc;
+ ProcessSP process_sp (thread.CalculateProcess());
+ if (process_sp)
{
- m_tried_unwind_arch_default_at_func_entry = true;
- Address current_pc;
- ProcessSP process_sp (thread.CalculateProcess());
- if (process_sp)
+ ABI *abi = process_sp->GetABI().get();
+ if (abi)
{
- ABI *abi = process_sp->GetABI().get();
- if (abi)
+ m_unwind_plan_arch_default_at_func_entry_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
+ if (!abi->CreateFunctionEntryUnwindPlan(*m_unwind_plan_arch_default_at_func_entry_sp))
{
- m_unwind_plan_arch_default_at_func_entry_sp.reset (new UnwindPlan (lldb::eRegisterKindGeneric));
- if (m_unwind_plan_arch_default_at_func_entry_sp)
- abi->CreateFunctionEntryUnwindPlan(*m_unwind_plan_arch_default_at_func_entry_sp);
+ m_unwind_plan_arch_default_at_func_entry_sp.reset();
}
}
}
@@ -246,10 +296,11 @@ FuncUnwinders::GetFirstNonPrologueInsn (Target& target)
{
if (m_first_non_prologue_insn.IsValid())
return m_first_non_prologue_insn;
+
+ Mutex::Locker locker (m_mutex);
ExecutionContext exe_ctx (target.shared_from_this(), false);
UnwindAssemblySP assembly_profiler_sp (GetUnwindAssemblyProfiler());
if (assembly_profiler_sp)
- if (assembly_profiler_sp)
assembly_profiler_sp->FirstNonPrologueInsn (m_range, exe_ctx, m_first_non_prologue_insn);
return m_first_non_prologue_insn;
}
@@ -260,16 +311,6 @@ FuncUnwinders::GetFunctionStartAddress () const
return m_range.GetBaseAddress();
}
-void
-FuncUnwinders::InvalidateNonCallSiteUnwindPlan (lldb_private::Thread& thread)
-{
- UnwindPlanSP arch_default = GetUnwindPlanArchitectureDefault (thread);
- if (arch_default && m_tried_unwind_at_call_site)
- {
- m_unwind_plan_call_site_sp = arch_default;
- }
-}
-
lldb::UnwindAssemblySP
FuncUnwinders::GetUnwindAssemblyProfiler ()
{
@@ -281,3 +322,39 @@ FuncUnwinders::GetUnwindAssemblyProfiler ()
}
return assembly_profiler_sp;
}
+
+Address
+FuncUnwinders::GetLSDAAddress (Target &target)
+{
+ Address lsda_addr;
+
+ UnwindPlanSP unwind_plan_sp = GetEHFrameUnwindPlan (target, -1);
+ if (unwind_plan_sp.get() == nullptr)
+ {
+ unwind_plan_sp = GetCompactUnwindUnwindPlan (target, -1);
+ }
+ if (unwind_plan_sp.get() && unwind_plan_sp->GetLSDAAddress().IsValid())
+ {
+ lsda_addr = unwind_plan_sp->GetLSDAAddress();
+ }
+ return lsda_addr;
+}
+
+
+Address
+FuncUnwinders::GetPersonalityRoutinePtrAddress (Target &target)
+{
+ Address personality_addr;
+
+ UnwindPlanSP unwind_plan_sp = GetEHFrameUnwindPlan (target, -1);
+ if (unwind_plan_sp.get() == nullptr)
+ {
+ unwind_plan_sp = GetCompactUnwindUnwindPlan (target, -1);
+ }
+ if (unwind_plan_sp.get() && unwind_plan_sp->GetPersonalityFunctionPtr().IsValid())
+ {
+ personality_addr = unwind_plan_sp->GetPersonalityFunctionPtr();
+ }
+
+ return personality_addr;
+}
OpenPOWER on IntegriCloud