summaryrefslogtreecommitdiffstats
path: root/kernel/lockdep_internals.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep_internals.h')
-rw-r--r--kernel/lockdep_internals.h83
1 files changed, 83 insertions, 0 deletions
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 699a2ac..6f48d37 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -136,3 +136,86 @@ extern atomic_t nr_find_usage_backwards_recursions;
# define debug_atomic_dec(ptr) do { } while (0)
# define debug_atomic_read(ptr) 0
#endif
+
+/* The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ * */
+#define MAX_CIRCULAR_QUE_SIZE 4096UL
+struct circular_queue{
+ unsigned long element[MAX_CIRCULAR_QUE_SIZE];
+ unsigned int front, rear;
+};
+
+#define LOCK_ACCESSED 1UL
+#define LOCK_ACCESSED_MASK (~LOCK_ACCESSED)
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+ cq->front = cq->rear = 0;
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+ return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+ return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE) == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+ if (__cq_full(cq))
+ return -1;
+
+ cq->element[cq->rear] = elem;
+ cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
+ return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+ if (__cq_empty(cq))
+ return -1;
+
+ *elem = cq->element[cq->front];
+ cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
+ return 0;
+}
+
+static inline int __cq_get_elem_count(struct circular_queue *cq)
+{
+ return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+ struct lock_list *parent)
+{
+ lock->parent = (void *) parent + LOCK_ACCESSED;
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+ return (unsigned long)lock->parent & LOCK_ACCESSED;
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+ return (struct lock_list *)
+ ((unsigned long)child->parent & LOCK_ACCESSED_MASK);
+}
+
+static inline unsigned long get_lock_depth(struct lock_list *child)
+{
+ unsigned long depth = 0;
+ struct lock_list *parent;
+
+ while ((parent = get_lock_parent(child))) {
+ child = parent;
+ depth++;
+ }
+ return depth;
+}
OpenPOWER on IntegriCloud