summaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-04 15:31:36 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-04 15:31:36 -0700
commit53c566625fb872e7826a237f0f5c21458028e94a (patch)
tree8ef9990ed2124f085442bc5a44c3f5212bf4002d /include/trace
parent34917f9713905a937816ebb7ee5f25bef7a6441c (diff)
parent00fdf13a2e9f313a044288aa59d3b8ec29ff904a (diff)
downloadop-kernel-dev-53c566625fb872e7826a237f0f5c21458028e94a.zip
op-kernel-dev-53c566625fb872e7826a237f0f5c21458028e94a.tar.gz
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs changes from Chris Mason: "This is a pretty long stream of bug fixes and performance fixes. Qu Wenruo has replaced the btrfs async threads with regular kernel workqueues. We'll keep an eye out for performance differences, but it's nice to be using more generic code for this. We still have some corruption fixes and other patches coming in for the merge window, but this batch is tested and ready to go" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (108 commits) Btrfs: fix a crash of clone with inline extents's split btrfs: fix uninit variable warning Btrfs: take into account total references when doing backref lookup Btrfs: part 2, fix incremental send's decision to delay a dir move/rename Btrfs: fix incremental send's decision to delay a dir move/rename Btrfs: remove unnecessary inode generation lookup in send Btrfs: fix race when updating existing ref head btrfs: Add trace for btrfs_workqueue alloc/destroy Btrfs: less fs tree lock contention when using autodefrag Btrfs: return EPERM when deleting a default subvolume Btrfs: add missing kfree in btrfs_destroy_workqueue Btrfs: cache extent states in defrag code path Btrfs: fix deadlock with nested trans handles Btrfs: fix possible empty list access when flushing the delalloc inodes Btrfs: split the global ordered extents mutex Btrfs: don't flush all delalloc inodes when we doesn't get s_umount lock Btrfs: reclaim delalloc metadata more aggressively Btrfs: remove unnecessary lock in may_commit_transaction() Btrfs: remove the unnecessary flush when preparing the pages Btrfs: just do dirty page flush for the inode with compression before direct IO ...
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/btrfs.h137
1 files changed, 137 insertions, 0 deletions
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 3176cdc..4ee4e30 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -21,6 +21,8 @@ struct btrfs_block_group_cache;
struct btrfs_free_cluster;
struct map_lookup;
struct extent_buffer;
+struct btrfs_work;
+struct __btrfs_workqueue;
#define show_ref_type(type) \
__print_symbolic(type, \
@@ -982,6 +984,141 @@ TRACE_EVENT(free_extent_state,
(void *)__entry->ip)
);
+DECLARE_EVENT_CLASS(btrfs__work,
+
+ TP_PROTO(struct btrfs_work *work),
+
+ TP_ARGS(work),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ __field( void *, wq )
+ __field( void *, func )
+ __field( void *, ordered_func )
+ __field( void *, ordered_free )
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ __entry->wq = work->wq;
+ __entry->func = work->func;
+ __entry->ordered_func = work->ordered_func;
+ __entry->ordered_free = work->ordered_free;
+ ),
+
+ TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p",
+ __entry->work, __entry->wq, __entry->func,
+ __entry->ordered_func, __entry->ordered_free)
+);
+
+/* For situiations that the work is freed */
+DECLARE_EVENT_CLASS(btrfs__work__done,
+
+ TP_PROTO(struct btrfs_work *work),
+
+ TP_ARGS(work),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ ),
+
+ TP_printk("work->%p", __entry->work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_work_queued,
+
+ TP_PROTO(struct btrfs_work *work),
+
+ TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_work_sched,
+
+ TP_PROTO(struct btrfs_work *work),
+
+ TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_normal_work_done,
+
+ TP_PROTO(struct btrfs_work *work),
+
+ TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
+
+ TP_PROTO(struct btrfs_work *work),
+
+ TP_ARGS(work)
+);
+
+DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
+
+ TP_PROTO(struct btrfs_work *work),
+
+ TP_ARGS(work)
+);
+
+DECLARE_EVENT_CLASS(btrfs__workqueue,
+
+ TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high),
+
+ TP_ARGS(wq, name, high),
+
+ TP_STRUCT__entry(
+ __field( void *, wq )
+ __string( name, name )
+ __field( int , high )
+ ),
+
+ TP_fast_assign(
+ __entry->wq = wq;
+ __assign_str(name, name);
+ __entry->high = high;
+ ),
+
+ TP_printk("name=%s%s, wq=%p", __get_str(name),
+ __print_flags(__entry->high, "",
+ {(WQ_HIGHPRI), "-high"}),
+ __entry->wq)
+);
+
+DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc,
+
+ TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high),
+
+ TP_ARGS(wq, name, high)
+);
+
+DECLARE_EVENT_CLASS(btrfs__workqueue_done,
+
+ TP_PROTO(struct __btrfs_workqueue *wq),
+
+ TP_ARGS(wq),
+
+ TP_STRUCT__entry(
+ __field( void *, wq )
+ ),
+
+ TP_fast_assign(
+ __entry->wq = wq;
+ ),
+
+ TP_printk("wq=%p", __entry->wq)
+);
+
+DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
+
+ TP_PROTO(struct __btrfs_workqueue *wq),
+
+ TP_ARGS(wq)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
OpenPOWER on IntegriCloud