From 73ba39ab9307340dc98ec3622891314bbc09cc2e Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Sun, 4 Dec 2016 12:51:53 +0800 Subject: [PATCH 01/10] btrfs: return the actual error value from from btrfs_uuid_tree_iterate In function btrfs_uuid_tree_iterate(), errno is assigned to variable ret on errors. However, it directly returns 0. It may be better to return ret. This patch also removes the warning, because the caller already prints a warning. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=188731 Signed-off-by: Pan Bian Reviewed-by: Omar Sandoval [ edited subject ] Signed-off-by: David Sterba --- fs/btrfs/uuid-tree.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c index 161342b73ce5..726f928238d0 100644 --- a/fs/btrfs/uuid-tree.c +++ b/fs/btrfs/uuid-tree.c @@ -352,7 +352,5 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, out: btrfs_free_path(path); - if (ret) - btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret); - return 0; + return ret; } From aa7c8da35d1905d80e840d075f07d26ec90144b5 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 20 Dec 2016 13:28:27 -0500 Subject: [PATCH 02/10] btrfs: fix error handling when run_delayed_extent_op fails In __btrfs_run_delayed_refs, the error path when run_delayed_extent_op fails sets locked_ref->processing = 0 but doesn't re-increment delayed_refs->num_heads_ready. As a result, we end up triggering the WARN_ON in btrfs_select_ref_head. Fixes: d7df2c796d7 (Btrfs: attach delayed ref updates to delayed ref heads) Reported-by: Jon Nelson Signed-off-by: Jeff Mahoney Reviewed-by: Liu Bo Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e97302f437a1..5366e50c84c6 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, */ if (must_insert_reserved) locked_ref->must_insert_reserved = 1; + spin_lock(&delayed_refs->lock); locked_ref->processing = 0; + delayed_refs->num_heads_ready++; + spin_unlock(&delayed_refs->lock); btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); From d0280996437081dd12ed1e982ac8aeaa62835ec4 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Tue, 20 Dec 2016 13:28:28 -0500 Subject: [PATCH 03/10] btrfs: fix locking when we put back a delayed ref that's too new In __btrfs_run_delayed_refs, when we put back a delayed ref that's too new, we have already dropped the lock on locked_ref when we set ->processing = 0. This patch keeps the lock to cover that assignment. Fixes: d7df2c796d7 (Btrfs: attach delayed ref updates to delayed ref heads) Signed-off-by: Jeff Mahoney Reviewed-by: Liu Bo Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5366e50c84c6..ac7e6713033c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, if (ref && ref->seq && btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { spin_unlock(&locked_ref->lock); - btrfs_delayed_ref_unlock(locked_ref); spin_lock(&delayed_refs->lock); locked_ref->processing = 0; delayed_refs->num_heads_ready++; spin_unlock(&delayed_refs->lock); + btrfs_delayed_ref_unlock(locked_ref); locked_ref = NULL; cond_resched(); count++; From e321f8a801d7b4c40da8005257b05b9c2b51b072 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Wed, 30 Nov 2016 16:11:04 -0800 Subject: [PATCH 04/10] Btrfs: use down_read_nested to make lockdep silent If @block_group is not @used_bg, it'll try to get @used_bg's lock without droping @block_group 's lock and lockdep has throwed a scary deadlock warning about it. Fix it by using down_read_nested. Signed-off-by: Liu Bo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ac7e6713033c..dcd2e798767e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7387,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, spin_unlock(&cluster->refill_lock); - down_read(&used_bg->data_rwsem); + /* We should only have one-level nested. */ + down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); spin_lock(&cluster->refill_lock); if (used_bg == cluster->block_group) From 781feef7e6befafd4d9787d1f7ada1f9ccd504e4 Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Wed, 30 Nov 2016 16:20:25 -0800 Subject: [PATCH 05/10] Btrfs: fix lockdep warning about log_mutex While checking INODE_REF/INODE_EXTREF for a corner case, we may acquire a different inode's log_mutex with holding the current inode's log_mutex, and lockdep has complained this with a possilble deadlock warning. Fix this by using mutex_lock_nested() when processing the other inode's log_mutex. Reviewed-by: Filipe Manana Signed-off-by: Liu Bo Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f10bf5213ed8..eeffff84f280 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -37,6 +37,7 @@ */ #define LOG_INODE_ALL 0 #define LOG_INODE_EXISTS 1 +#define LOG_OTHER_INODE 2 /* * directory trouble cases @@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (S_ISDIR(inode->i_mode) || (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags) && - inode_only == LOG_INODE_EXISTS)) + inode_only >= LOG_INODE_EXISTS)) max_key.type = BTRFS_XATTR_ITEM_KEY; else max_key.type = (u8)-1; @@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, return ret; } - mutex_lock(&BTRFS_I(inode)->log_mutex); + if (inode_only == LOG_OTHER_INODE) { + inode_only = LOG_INODE_EXISTS; + mutex_lock_nested(&BTRFS_I(inode)->log_mutex, + SINGLE_DEPTH_NESTING); + } else { + mutex_lock(&BTRFS_I(inode)->log_mutex); + } /* * a brute force approach to making sure we get the most uptodate @@ -4817,7 +4824,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, * unpin it. */ err = btrfs_log_inode(trans, root, other_inode, - LOG_INODE_EXISTS, + LOG_OTHER_INODE, 0, LLONG_MAX, ctx); iput(other_inode); if (err) From c2931667c83ded6504b3857e99cc45b21fa496fb Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Thu, 22 Dec 2016 17:13:54 -0800 Subject: [PATCH 06/10] Btrfs: adjust outstanding_extents counter properly when dio write is split Currently how btrfs dio deals with split dio write is not good enough if dio write is split into several segments due to the lack of contiguous space, a large dio write like 'dd bs=1G count=1' can end up with incorrect outstanding_extents counter and endio would complain loudly with an assertion. This fixes the problem by compensating the outstanding_extents counter in inode if a large dio write gets split. Reported-by: Anand Jain Tested-by: Anand Jain Signed-off-by: Liu Bo Signed-off-by: David Sterba --- fs/btrfs/inode.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a713d9d324b0..81b9d9d0450c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7623,11 +7623,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode, * within our reservation, otherwise we need to adjust our inode * counter appropriately. */ - if (dio_data->outstanding_extents) { + if (dio_data->outstanding_extents >= num_extents) { dio_data->outstanding_extents -= num_extents; } else { + /* + * If dio write length has been split due to no large enough + * contiguous space, we need to compensate our inode counter + * appropriately. + */ + u64 num_needed = num_extents - dio_data->outstanding_extents; + spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->outstanding_extents += num_extents; + BTRFS_I(inode)->outstanding_extents += num_needed; spin_unlock(&BTRFS_I(inode)->lock); } } From ac0c7cf8be00f269f82964cf7b144ca3edc5dbc4 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 6 Jan 2017 14:12:51 +0100 Subject: [PATCH 07/10] btrfs: fix crash when tracepoint arguments are freed by wq callbacks Enabling btrfs tracepoints leads to instant crash, as reported. The wq callbacks could free the memory and the tracepoints started to dereference the members to get to fs_info. The proposed fix https://marc.info/?l=linux-btrfs&m=148172436722606&w=2 removed the tracepoints but we could preserve them by passing only the required data in a safe way. Fixes: bc074524e123 ("btrfs: prefix fsid to all trace events") CC: stable@vger.kernel.org # 4.8+ Reported-by: Sebastian Andrzej Siewior Reviewed-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/async-thread.c | 15 +++++++++++---- include/trace/events/btrfs.h | 22 +++++++++++++--------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 63d197724519..ff0b0be92d61 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) unsigned long flags; while (1) { + void *wtag; + spin_lock_irqsave(lock, flags); if (list_empty(list)) break; @@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) spin_unlock_irqrestore(lock, flags); /* - * we don't want to call the ordered free functions - * with the lock held though + * We don't want to call the ordered free functions with the + * lock held though. Save the work as tag for the trace event, + * because the callback could free the structure. */ + wtag = work; work->ordered_free(work); - trace_btrfs_all_work_done(work); + trace_btrfs_all_work_done(wq->fs_info, wtag); } spin_unlock_irqrestore(lock, flags); } @@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) static void normal_work_helper(struct btrfs_work *work) { struct __btrfs_workqueue *wq; + void *wtag; int need_order = 0; /* @@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work) if (work->ordered_func) need_order = 1; wq = work->wq; + /* Safe for tracepoints in case work gets freed by the callback */ + wtag = work; trace_btrfs_work_sched(work); thresh_exec_hook(wq); @@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work) run_ordered_work(wq); } if (!need_order) - trace_btrfs_all_work_done(work); + trace_btrfs_all_work_done(wq->fs_info, wtag); } void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index c14bed4ab097..b09225c77676 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1157,22 +1157,26 @@ DECLARE_EVENT_CLASS(btrfs__work, __entry->func, __entry->ordered_func, __entry->ordered_free) ); -/* For situiations that the work is freed */ +/* + * For situiations when the work is freed, we pass fs_info and a tag that that + * matches address of the work structure so it can be paired with the + * scheduling event. + */ DECLARE_EVENT_CLASS(btrfs__work__done, - TP_PROTO(struct btrfs_work *work), + TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag), - TP_ARGS(work), + TP_ARGS(fs_info, wtag), TP_STRUCT__entry_btrfs( - __field( void *, work ) + __field( void *, wtag ) ), - TP_fast_assign_btrfs(btrfs_work_owner(work), - __entry->work = work; + TP_fast_assign_btrfs(fs_info, + __entry->wtag = wtag; ), - TP_printk_btrfs("work->%p", __entry->work) + TP_printk_btrfs("work->%p", __entry->wtag) ); DEFINE_EVENT(btrfs__work, btrfs_work_queued, @@ -1191,9 +1195,9 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched, DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done, - TP_PROTO(struct btrfs_work *work), + TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag), - TP_ARGS(work) + TP_ARGS(fs_info, wtag) ); DEFINE_EVENT(btrfs__work, btrfs_ordered_sched, From 92a1bf76a89ad338f00eb9a2c7689a3907fbcaad Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Thu, 17 Nov 2016 15:00:50 -0800 Subject: [PATCH 08/10] Btrfs: add 'inode' for extent map tracepoint 'inode' is an important field for btrfs_get_extent, lets trace it. Signed-off-by: Liu Bo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/inode.c | 2 +- include/trace/events/btrfs.h | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a713d9d324b0..fab189c67eff 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7059,7 +7059,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, write_unlock(&em_tree->lock); out: - trace_btrfs_get_extent(root, em); + trace_btrfs_get_extent(root, inode, em); btrfs_free_path(path); if (trans) { diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index b09225c77676..3048f5205363 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -184,14 +184,16 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict, TRACE_EVENT_CONDITION(btrfs_get_extent, - TP_PROTO(struct btrfs_root *root, struct extent_map *map), + TP_PROTO(struct btrfs_root *root, struct inode *inode, + struct extent_map *map), - TP_ARGS(root, map), + TP_ARGS(root, inode, map), TP_CONDITION(map), TP_STRUCT__entry_btrfs( __field( u64, root_objectid ) + __field( u64, ino ) __field( u64, start ) __field( u64, len ) __field( u64, orig_start ) @@ -204,7 +206,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, TP_fast_assign_btrfs(root->fs_info, __entry->root_objectid = root->root_key.objectid; - __entry->start = map->start; + __entry->ino = btrfs_ino(inode); + __entry->start = map->start; __entry->len = map->len; __entry->orig_start = map->orig_start; __entry->block_start = map->block_start; @@ -214,11 +217,12 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, __entry->compress_type = map->compress_type; ), - TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu, " + TP_printk_btrfs("root = %llu(%s), ino = %llu start = %llu, len = %llu, " "orig_start = %llu, block_start = %llu(%s), " "block_len = %llu, flags = %s, refs = %u, " "compress_type = %u", show_root_type(__entry->root_objectid), + (unsigned long long)__entry->ino, (unsigned long long)__entry->start, (unsigned long long)__entry->len, (unsigned long long)__entry->orig_start, From 7856654842bdbebc0fbcbf51573da5d70a787aba Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Wed, 30 Nov 2016 16:10:10 -0800 Subject: [PATCH 09/10] Btrfs: add truncated_len for ordered extent tracepoints This can help us monitor truncated ordered extents. Signed-off-by: Liu Bo Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 3048f5205363..2026a89786b0 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -263,6 +263,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent, __field( int, compress_type ) __field( int, refs ) __field( u64, root_objectid ) + __field( u64, truncated_len ) ), TP_fast_assign_btrfs(btrfs_sb(inode->i_sb), @@ -277,10 +278,12 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent, __entry->refs = atomic_read(&ordered->refs); __entry->root_objectid = BTRFS_I(inode)->root->root_key.objectid; + __entry->truncated_len = ordered->truncated_len; ), TP_printk_btrfs("root = %llu(%s), ino = %llu, file_offset = %llu, " "start = %llu, len = %llu, disk_len = %llu, " + "truncated_len = %llu, " "bytes_left = %llu, flags = %s, compress_type = %d, " "refs = %d", show_root_type(__entry->root_objectid), @@ -289,6 +292,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent, (unsigned long long)__entry->start, (unsigned long long)__entry->len, (unsigned long long)__entry->disk_len, + (unsigned long long)__entry->truncated_len, (unsigned long long)__entry->bytes_left, show_ordered_flags(__entry->flags), __entry->compress_type, __entry->refs) From 562a7a07bf61e2949f7cbdb6ac7537ad9e2794d1 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 6 Jan 2017 15:51:36 +0100 Subject: [PATCH 10/10] btrfs: make tracepoint format strings more compact We've recently added the fsid to trace events, this makes the line quite long. To reduce the it again, remove extra spaces around = and remove ",". Signed-off-by: David Sterba --- include/trace/events/btrfs.h | 112 +++++++++++++++++------------------ 1 file changed, 56 insertions(+), 56 deletions(-) diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 2026a89786b0..88d18a8ceb59 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -130,8 +130,8 @@ DECLARE_EVENT_CLASS(btrfs__inode, BTRFS_I(inode)->root->root_key.objectid; ), - TP_printk_btrfs("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, " - "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu", + TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%lu blocks=%llu " + "disk_i_size=%llu last_trans=%llu logged_trans=%llu", show_root_type(__entry->root_objectid), (unsigned long long)__entry->generation, (unsigned long)__entry->ino, @@ -217,10 +217,10 @@ TRACE_EVENT_CONDITION(btrfs_get_extent, __entry->compress_type = map->compress_type; ), - TP_printk_btrfs("root = %llu(%s), ino = %llu start = %llu, len = %llu, " - "orig_start = %llu, block_start = %llu(%s), " - "block_len = %llu, flags = %s, refs = %u, " - "compress_type = %u", + TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu " + "orig_start=%llu block_start=%llu(%s) " + "block_len=%llu flags=%s refs=%u " + "compress_type=%u", show_root_type(__entry->root_objectid), (unsigned long long)__entry->ino, (unsigned long long)__entry->start, @@ -281,11 +281,11 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent, __entry->truncated_len = ordered->truncated_len; ), - TP_printk_btrfs("root = %llu(%s), ino = %llu, file_offset = %llu, " - "start = %llu, len = %llu, disk_len = %llu, " - "truncated_len = %llu, " - "bytes_left = %llu, flags = %s, compress_type = %d, " - "refs = %d", + TP_printk_btrfs("root=%llu(%s) ino=%llu file_offset=%llu " + "start=%llu len=%llu disk_len=%llu " + "truncated_len=%llu " + "bytes_left=%llu flags=%s compress_type=%d " + "refs=%d", show_root_type(__entry->root_objectid), (unsigned long long)__entry->ino, (unsigned long long)__entry->file_offset, @@ -362,10 +362,10 @@ DECLARE_EVENT_CLASS(btrfs__writepage, BTRFS_I(inode)->root->root_key.objectid; ), - TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, " - "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, " - "range_end = %llu, for_kupdate = %d, " - "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu", + TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu " + "nr_to_write=%ld pages_skipped=%ld range_start=%llu " + "range_end=%llu for_kupdate=%d " + "for_reclaim=%d range_cyclic=%d writeback_index=%lu", show_root_type(__entry->root_objectid), (unsigned long)__entry->ino, __entry->index, __entry->nr_to_write, __entry->pages_skipped, @@ -408,8 +408,8 @@ TRACE_EVENT(btrfs_writepage_end_io_hook, BTRFS_I(page->mapping->host)->root->root_key.objectid; ), - TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, " - "end = %llu, uptodate = %d", + TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu start=%llu " + "end=%llu uptodate=%d", show_root_type(__entry->root_objectid), (unsigned long)__entry->ino, (unsigned long)__entry->index, (unsigned long long)__entry->start, @@ -441,7 +441,7 @@ TRACE_EVENT(btrfs_sync_file, BTRFS_I(inode)->root->root_key.objectid; ), - TP_printk_btrfs("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d", + TP_printk_btrfs("root=%llu(%s) ino=%ld parent=%ld datasync=%d", show_root_type(__entry->root_objectid), (unsigned long)__entry->ino, (unsigned long)__entry->parent, __entry->datasync) @@ -492,9 +492,9 @@ TRACE_EVENT(btrfs_add_block_group, __entry->create = create; ), - TP_printk("%pU: block_group offset = %llu, size = %llu, " - "flags = %llu(%s), bytes_used = %llu, bytes_super = %llu, " - "create = %d", __entry->fsid, + TP_printk("%pU: block_group offset=%llu size=%llu " + "flags=%llu(%s) bytes_used=%llu bytes_super=%llu " + "create=%d", __entry->fsid, (unsigned long long)__entry->offset, (unsigned long long)__entry->size, (unsigned long long)__entry->flags, @@ -543,9 +543,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref, __entry->seq = ref->seq; ), - TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, " - "parent = %llu(%s), ref_root = %llu(%s), level = %d, " - "type = %s, seq = %llu", + TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s " + "parent=%llu(%s) ref_root=%llu(%s) level=%d " + "type=%s seq=%llu", (unsigned long long)__entry->bytenr, (unsigned long long)__entry->num_bytes, show_ref_action(__entry->action), @@ -608,9 +608,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref, __entry->seq = ref->seq; ), - TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, " - "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, " - "offset = %llu, type = %s, seq = %llu", + TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s " + "parent=%llu(%s) ref_root=%llu(%s) owner=%llu " + "offset=%llu type=%s seq=%llu", (unsigned long long)__entry->bytenr, (unsigned long long)__entry->num_bytes, show_ref_action(__entry->action), @@ -665,7 +665,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head, __entry->is_data = head_ref->is_data; ), - TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d", + TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s is_data=%d", (unsigned long long)__entry->bytenr, (unsigned long long)__entry->num_bytes, show_ref_action(__entry->action), @@ -729,8 +729,8 @@ DECLARE_EVENT_CLASS(btrfs__chunk, __entry->root_objectid = fs_info->chunk_root->root_key.objectid; ), - TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, " - "num_stripes = %d, sub_stripes = %d, type = %s", + TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu " + "num_stripes=%d sub_stripes=%d type=%s", show_root_type(__entry->root_objectid), (unsigned long long)__entry->offset, (unsigned long long)__entry->size, @@ -779,8 +779,8 @@ TRACE_EVENT(btrfs_cow_block, __entry->cow_level = btrfs_header_level(cow); ), - TP_printk_btrfs("root = %llu(%s), refs = %d, orig_buf = %llu " - "(orig_level = %d), cow_buf = %llu (cow_level = %d)", + TP_printk_btrfs("root=%llu(%s) refs=%d orig_buf=%llu " + "(orig_level=%d) cow_buf=%llu (cow_level=%d)", show_root_type(__entry->root_objectid), __entry->refs, (unsigned long long)__entry->buf_start, @@ -844,7 +844,7 @@ TRACE_EVENT(btrfs_trigger_flush, __assign_str(reason, reason) ), - TP_printk("%pU: %s: flush = %d(%s), flags = %llu(%s), bytes = %llu", + TP_printk("%pU: %s: flush=%d(%s) flags=%llu(%s) bytes=%llu", __entry->fsid, __get_str(reason), __entry->flush, show_flush_action(__entry->flush), (unsigned long long)__entry->flags, @@ -887,8 +887,8 @@ TRACE_EVENT(btrfs_flush_space, __entry->ret = ret; ), - TP_printk("%pU: state = %d(%s), flags = %llu(%s), num_bytes = %llu, " - "orig_bytes = %llu, ret = %d", __entry->fsid, __entry->state, + TP_printk("%pU: state=%d(%s) flags=%llu(%s) num_bytes=%llu " + "orig_bytes=%llu ret=%d", __entry->fsid, __entry->state, show_flush_state(__entry->state), (unsigned long long)__entry->flags, __print_flags((unsigned long)__entry->flags, "|", @@ -913,7 +913,7 @@ DECLARE_EVENT_CLASS(btrfs__reserved_extent, __entry->len = len; ), - TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu", + TP_printk_btrfs("root=%llu(%s) start=%llu len=%llu", show_root_type(BTRFS_EXTENT_TREE_OBJECTID), (unsigned long long)__entry->start, (unsigned long long)__entry->len) @@ -952,7 +952,7 @@ TRACE_EVENT(find_free_extent, __entry->data = data; ), - TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)", + TP_printk_btrfs("root=%Lu(%s) len=%Lu empty_size=%Lu flags=%Lu(%s)", show_root_type(BTRFS_EXTENT_TREE_OBJECTID), __entry->num_bytes, __entry->empty_size, __entry->data, __print_flags((unsigned long)__entry->data, "|", @@ -981,8 +981,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent, __entry->len = len; ), - TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), " - "start = %Lu, len = %Lu", + TP_printk_btrfs("root=%Lu(%s) block_group=%Lu flags=%Lu(%s) " + "start=%Lu len=%Lu", show_root_type(BTRFS_EXTENT_TREE_OBJECTID), __entry->bg_objectid, __entry->flags, __print_flags((unsigned long)__entry->flags, @@ -1033,8 +1033,8 @@ TRACE_EVENT(btrfs_find_cluster, __entry->min_bytes = min_bytes; ), - TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu," - " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid, + TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) start=%Lu len=%Lu " + "empty_size=%Lu min_bytes=%Lu", __entry->bg_objectid, __entry->flags, __print_flags((unsigned long)__entry->flags, "|", BTRFS_GROUP_FLAGS), __entry->start, @@ -1055,7 +1055,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup, __entry->bg_objectid = block_group->key.objectid; ), - TP_printk_btrfs("block_group = %Lu", __entry->bg_objectid) + TP_printk_btrfs("block_group=%Lu", __entry->bg_objectid) ); TRACE_EVENT(btrfs_setup_cluster, @@ -1083,8 +1083,8 @@ TRACE_EVENT(btrfs_setup_cluster, __entry->bitmap = bitmap; ), - TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, " - "size = %Lu, max_size = %Lu, bitmap = %d", + TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) window_start=%Lu " + "size=%Lu max_size=%Lu bitmap=%d", __entry->bg_objectid, __entry->flags, __print_flags((unsigned long)__entry->flags, "|", @@ -1111,7 +1111,7 @@ TRACE_EVENT(alloc_extent_state, __entry->ip = IP ), - TP_printk("state=%p; mask = %s; caller = %pS", __entry->state, + TP_printk("state=%p mask=%s caller=%pS", __entry->state, show_gfp_flags(__entry->mask), (void *)__entry->ip) ); @@ -1131,7 +1131,7 @@ TRACE_EVENT(free_extent_state, __entry->ip = IP ), - TP_printk(" state=%p; caller = %pS", __entry->state, + TP_printk("state=%p caller=%pS", __entry->state, (void *)__entry->ip) ); @@ -1159,8 +1159,8 @@ DECLARE_EVENT_CLASS(btrfs__work, __entry->normal_work = &work->normal_work; ), - TP_printk_btrfs("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p," - " ordered_free=%p", + TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%pf ordered_func=%p " + "ordered_free=%p", __entry->work, __entry->normal_work, __entry->wq, __entry->func, __entry->ordered_func, __entry->ordered_free) ); @@ -1233,7 +1233,7 @@ DECLARE_EVENT_CLASS(btrfs__workqueue, __entry->high = high; ), - TP_printk_btrfs("name=%s%s, wq=%p", __get_str(name), + TP_printk_btrfs("name=%s%s wq=%p", __get_str(name), __print_flags(__entry->high, "", {(WQ_HIGHPRI), "-high"}), __entry->wq) @@ -1288,7 +1288,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_data_map, __entry->free_reserved = free_reserved; ), - TP_printk_btrfs("rootid=%llu, ino=%lu, free_reserved=%llu", + TP_printk_btrfs("rootid=%llu ino=%lu free_reserved=%llu", __entry->rootid, __entry->ino, __entry->free_reserved) ); @@ -1335,7 +1335,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data, __entry->op = op; ), - TP_printk_btrfs("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s", + TP_printk_btrfs("root=%llu ino=%lu start=%llu len=%llu reserved=%llu op=%s", __entry->rootid, __entry->ino, __entry->start, __entry->len, __entry->reserved, __print_flags((unsigned long)__entry->op, "", @@ -1373,7 +1373,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref, __entry->reserved = reserved; ), - TP_printk_btrfs("root=%llu, reserved=%llu, op=free", + TP_printk_btrfs("root=%llu reserved=%llu op=free", __entry->ref_root, __entry->reserved) ); @@ -1400,7 +1400,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent, __entry->num_bytes = rec->num_bytes; ), - TP_printk_btrfs("bytenr = %llu, num_bytes = %llu", + TP_printk_btrfs("bytenr=%llu num_bytes=%llu", (unsigned long long)__entry->bytenr, (unsigned long long)__entry->num_bytes) ); @@ -1442,8 +1442,8 @@ TRACE_EVENT(btrfs_qgroup_account_extent, __entry->nr_new_roots = nr_new_roots; ), - TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, " - "nr_new_roots = %llu", + TP_printk_btrfs("bytenr=%llu num_bytes=%llu nr_old_roots=%llu " + "nr_new_roots=%llu", __entry->bytenr, __entry->num_bytes, __entry->nr_old_roots, @@ -1469,7 +1469,7 @@ TRACE_EVENT(qgroup_update_counters, __entry->cur_new_count = cur_new_count; ), - TP_printk_btrfs("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu", + TP_printk_btrfs("qgid=%llu cur_old_count=%llu cur_new_count=%llu", __entry->qgid, __entry->cur_old_count, __entry->cur_new_count)