summaryrefslogtreecommitdiffstats
path: root/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2016-01-06 20:12:03 +0000
committerdim <dim@FreeBSD.org>2016-01-06 20:12:03 +0000
commit78b9749c0a4ea980a8b934645da6ae98fcc665e8 (patch)
treedd2a1ddf0476664c2b823409c36cbccd52662ca7 /packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock
parent60cb593f9d55fa5ca7a5372b731f2330345b4b9a (diff)
downloadFreeBSD-src-78b9749c0a4ea980a8b934645da6ae98fcc665e8.zip
FreeBSD-src-78b9749c0a4ea980a8b934645da6ae98fcc665e8.tar.gz
Vendor import of lldb trunk r256945:
https://llvm.org/svn/llvm-project/lldb/trunk@256945
Diffstat (limited to 'packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock')
-rw-r--r--packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/.categories1
-rw-r--r--packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/Makefile6
-rw-r--r--packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/TestExprDoesntBlock.py57
-rw-r--r--packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/locking.c80
4 files changed, 144 insertions, 0 deletions
diff --git a/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/.categories b/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/.categories
new file mode 100644
index 0000000..897e40a
--- /dev/null
+++ b/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/.categories
@@ -0,0 +1 @@
+expression
diff --git a/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/Makefile b/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/Makefile
new file mode 100644
index 0000000..a10791d
--- /dev/null
+++ b/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/Makefile
@@ -0,0 +1,6 @@
+LEVEL = ../../make
+
+C_SOURCES := locking.c
+ENABLE_THREADS := YES
+
+include $(LEVEL)/Makefile.rules
diff --git a/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/TestExprDoesntBlock.py b/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/TestExprDoesntBlock.py
new file mode 100644
index 0000000..a54b458
--- /dev/null
+++ b/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/TestExprDoesntBlock.py
@@ -0,0 +1,57 @@
+"""
+Test that expr will time out and allow other threads to run if it blocks.
+"""
+
+from __future__ import print_function
+
+
+
+import os, time
+import re
+import lldb
+import lldbsuite.test.lldbutil as lldbutil
+from lldbsuite.test.lldbtest import *
+
+class ExprDoesntDeadlockTestCase(TestBase):
+
+ def getCategories(self):
+ return ['basic_process']
+
+ mydir = TestBase.compute_mydir(__file__)
+
+ @expectedFailureFreeBSD('llvm.org/pr17946')
+ @expectedFlakeyLinux # failed 1/365 test runs, line 61, thread.IsValid()
+ @expectedFailureWindows # Windows doesn't have pthreads, need to port this test.
+ def test_with_run_command(self):
+ """Test that expr will time out and allow other threads to run if it blocks."""
+ self.build()
+ exe = os.path.join(os.getcwd(), "a.out")
+
+ # Create a target by the debugger.
+ target = self.dbg.CreateTarget(exe)
+ self.assertTrue(target, VALID_TARGET)
+
+ # Now create a breakpoint at source line before call_me_to_get_lock gets called.
+
+ main_file_spec = lldb.SBFileSpec ("locking.c")
+ breakpoint = target.BreakpointCreateBySourceRegex('Break here', main_file_spec)
+ if self.TraceOn():
+ print("breakpoint:", breakpoint)
+ self.assertTrue(breakpoint and
+ breakpoint.GetNumLocations() == 1,
+ VALID_BREAKPOINT)
+
+ # Now launch the process, and do not stop at entry point.
+ process = target.LaunchSimple (None, None, self.get_process_working_directory())
+ self.assertTrue(process, PROCESS_IS_VALID)
+
+ # Frame #0 should be on self.line1 and the break condition should hold.
+ from lldbsuite.test.lldbutil import get_stopped_thread
+ thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
+ self.assertTrue(thread.IsValid(), "There should be a thread stopped due to breakpoint condition")
+
+ frame0 = thread.GetFrameAtIndex(0)
+
+ var = frame0.EvaluateExpression ("call_me_to_get_lock()")
+ self.assertTrue (var.IsValid())
+ self.assertTrue (var.GetValueAsSigned (0) == 567)
diff --git a/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/locking.c b/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/locking.c
new file mode 100644
index 0000000..fae9979
--- /dev/null
+++ b/packages/Python/lldbsuite/test/functionalities/expr-doesnt-deadlock/locking.c
@@ -0,0 +1,80 @@
+#include <pthread.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+
+pthread_mutex_t contended_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+pthread_mutex_t control_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t control_condition;
+
+pthread_mutex_t thread_started_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t thread_started_condition;
+
+// This function runs in a thread. The locking dance is to make sure that
+// by the time the main thread reaches the pthread_join below, this thread
+// has for sure acquired the contended_mutex. So then the call_me_to_get_lock
+// function will block trying to get the mutex, and only succeed once it
+// signals this thread, then lets it run to wake up from the cond_wait and
+// release the mutex.
+
+void *
+lock_acquirer_1 (void *input)
+{
+ pthread_mutex_lock (&contended_mutex);
+
+ // Grab this mutex, that will ensure that the main thread
+ // is in its cond_wait for it (since that's when it drops the mutex.
+
+ pthread_mutex_lock (&thread_started_mutex);
+ pthread_mutex_unlock(&thread_started_mutex);
+
+ // Now signal the main thread that it can continue, we have the contended lock
+ // so the call to call_me_to_get_lock won't make any progress till this
+ // thread gets a chance to run.
+
+ pthread_mutex_lock (&control_mutex);
+
+ pthread_cond_signal (&thread_started_condition);
+
+ pthread_cond_wait (&control_condition, &control_mutex);
+
+ pthread_mutex_unlock (&contended_mutex);
+ return NULL;
+}
+
+int
+call_me_to_get_lock ()
+{
+ pthread_cond_signal (&control_condition);
+ pthread_mutex_lock (&contended_mutex);
+ return 567;
+}
+
+int main ()
+{
+ pthread_t thread_1;
+
+ pthread_cond_init (&control_condition, NULL);
+ pthread_cond_init (&thread_started_condition, NULL);
+
+ pthread_mutex_lock (&thread_started_mutex);
+
+ pthread_create (&thread_1, NULL, lock_acquirer_1, NULL);
+
+ pthread_cond_wait (&thread_started_condition, &thread_started_mutex);
+
+ pthread_mutex_lock (&control_mutex);
+ pthread_mutex_unlock (&control_mutex);
+
+ // Break here. At this point the other thread will have the contended_mutex,
+ // and be sitting in its cond_wait for the control condition. So there is
+ // no way that our by-hand calling of call_me_to_get_lock will proceed
+ // without running the first thread at least somewhat.
+
+ call_me_to_get_lock();
+ pthread_join (thread_1, NULL);
+
+ return 0;
+
+}
OpenPOWER on IntegriCloud