btrfs: qgroup: Record possible quota-related extent for qgroup.

Add hook in add_delayed_ref_head() to record quota-related extent record
into delayed_ref_root->dirty_extent_record rb-tree for later qgroup
accounting.

Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
Signed-off-by: Chris Mason <clm@fb.com>
This commit is contained in:
Qu Wenruo 2015-04-16 14:34:17 +08:00 committed by Chris Mason
parent 823ae5b8e3
commit 3368d001ba
5 changed files with 95 additions and 7 deletions

View File

@ -22,6 +22,7 @@
#include "ctree.h" #include "ctree.h"
#include "delayed-ref.h" #include "delayed-ref.h"
#include "transaction.h" #include "transaction.h"
#include "qgroup.h"
struct kmem_cache *btrfs_delayed_ref_head_cachep; struct kmem_cache *btrfs_delayed_ref_head_cachep;
struct kmem_cache *btrfs_delayed_tree_ref_cachep; struct kmem_cache *btrfs_delayed_tree_ref_cachep;
@ -420,12 +421,14 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
static noinline struct btrfs_delayed_ref_head * static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_fs_info *fs_info, add_delayed_ref_head(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref, u64 bytenr, struct btrfs_delayed_ref_node *ref,
u64 num_bytes, int action, int is_data) struct btrfs_qgroup_extent_record *qrecord,
u64 bytenr, u64 num_bytes, int action, int is_data)
{ {
struct btrfs_delayed_ref_head *existing; struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_head *head_ref = NULL; struct btrfs_delayed_ref_head *head_ref = NULL;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *qexisting;
int count_mod = 1; int count_mod = 1;
int must_insert_reserved = 0; int must_insert_reserved = 0;
@ -474,6 +477,18 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
head_ref->processing = 0; head_ref->processing = 0;
head_ref->total_ref_mod = count_mod; head_ref->total_ref_mod = count_mod;
/* Record qgroup extent info if provided */
if (qrecord) {
qrecord->bytenr = bytenr;
qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL;
qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
qrecord);
if (qexisting)
kfree(qrecord);
}
spin_lock_init(&head_ref->lock); spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex); mutex_init(&head_ref->mutex);
@ -624,6 +639,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_tree_ref *ref; struct btrfs_delayed_tree_ref *ref;
struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
if (!is_fstree(ref_root) || !fs_info->quota_enabled) if (!is_fstree(ref_root) || !fs_info->quota_enabled)
no_quota = 0; no_quota = 0;
@ -639,6 +655,15 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
return -ENOMEM; return -ENOMEM;
} }
if (fs_info->quota_enabled && is_fstree(ref_root)) {
record = kmalloc(sizeof(*record), GFP_NOFS);
if (!record) {
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
return -ENOMEM;
}
}
head_ref->extent_op = extent_op; head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs; delayed_refs = &trans->transaction->delayed_refs;
@ -648,7 +673,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping * insert both the head node and the new ref without dropping
* the spin lock * the spin lock
*/ */
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
bytenr, num_bytes, action, 0); bytenr, num_bytes, action, 0);
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
@ -673,6 +698,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_data_ref *ref; struct btrfs_delayed_data_ref *ref;
struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
if (!is_fstree(ref_root) || !fs_info->quota_enabled) if (!is_fstree(ref_root) || !fs_info->quota_enabled)
no_quota = 0; no_quota = 0;
@ -688,6 +714,16 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
return -ENOMEM; return -ENOMEM;
} }
if (fs_info->quota_enabled && is_fstree(ref_root)) {
record = kmalloc(sizeof(*record), GFP_NOFS);
if (!record) {
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
kmem_cache_free(btrfs_delayed_ref_head_cachep,
head_ref);
return -ENOMEM;
}
}
head_ref->extent_op = extent_op; head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs; delayed_refs = &trans->transaction->delayed_refs;
@ -697,7 +733,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping * insert both the head node and the new ref without dropping
* the spin lock * the spin lock
*/ */
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
bytenr, num_bytes, action, 1); bytenr, num_bytes, action, 1);
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
@ -725,7 +761,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
delayed_refs = &trans->transaction->delayed_refs; delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock); spin_lock(&delayed_refs->lock);
add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
num_bytes, BTRFS_UPDATE_DELAYED_HEAD, num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data); extent_op->is_data);

View File

@ -148,6 +148,9 @@ struct btrfs_delayed_ref_root {
/* head ref rbtree */ /* head ref rbtree */
struct rb_root href_root; struct rb_root href_root;
/* dirty extent records */
struct rb_root dirty_extent_root;
/* this spin lock protects the rbtree and the entries inside */ /* this spin lock protects the rbtree and the entries inside */
spinlock_t lock; spinlock_t lock;

View File

@ -1553,6 +1553,37 @@ int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
struct btrfs_qgroup_extent_record
*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record)
{
struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
struct rb_node *parent_node = NULL;
struct btrfs_qgroup_extent_record *entry;
u64 bytenr = record->bytenr;
while (*p) {
parent_node = *p;
entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
node);
if (bytenr < entry->bytenr)
p = &(*p)->rb_left;
else if (bytenr > entry->bytenr)
p = &(*p)->rb_right;
else
return entry;
}
rb_link_node(&record->node, parent_node, p);
rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
return NULL;
}
/*
* The easy accounting, if we are adding/removing the only ref for an extent
* then this qgroup and all of the parent qgroups get their refrence and
* exclusive counts adjusted.
*/
static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info, static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup_operation *oper) struct btrfs_qgroup_operation *oper)
{ {

View File

@ -19,6 +19,9 @@
#ifndef __BTRFS_QGROUP__ #ifndef __BTRFS_QGROUP__
#define __BTRFS_QGROUP__ #define __BTRFS_QGROUP__
#include "ulist.h"
#include "delayed-ref.h"
/* /*
* A description of the operations, all of these operations only happen when we * A description of the operations, all of these operations only happen when we
* are adding the 1st reference for that subvolume in the case of adding space * are adding the 1st reference for that subvolume in the case of adding space
@ -58,6 +61,17 @@ struct btrfs_qgroup_operation {
struct list_head list; struct list_head list;
}; };
/*
* Record a dirty extent, and info qgroup to update quota on it
* TODO: Use kmem cache to alloc it.
*/
struct btrfs_qgroup_extent_record {
struct rb_node node;
u64 bytenr;
u64 num_bytes;
struct ulist *old_roots;
};
int btrfs_quota_enable(struct btrfs_trans_handle *trans, int btrfs_quota_enable(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info); struct btrfs_fs_info *fs_info);
int btrfs_quota_disable(struct btrfs_trans_handle *trans, int btrfs_quota_disable(struct btrfs_trans_handle *trans,
@ -84,6 +98,9 @@ int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 bytenr, u64 num_bytes,
enum btrfs_qgroup_operation_type type, enum btrfs_qgroup_operation_type type,
int mod_seq); int mod_seq);
struct btrfs_qgroup_extent_record
*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record);
int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans, int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info); struct btrfs_fs_info *fs_info);
void btrfs_remove_qgroup_operation(struct btrfs_trans_handle *trans, void btrfs_remove_qgroup_operation(struct btrfs_trans_handle *trans,

View File

@ -225,6 +225,7 @@ static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
cur_trans->dirty_bg_run = 0; cur_trans->dirty_bg_run = 0;
cur_trans->delayed_refs.href_root = RB_ROOT; cur_trans->delayed_refs.href_root = RB_ROOT;
cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
atomic_set(&cur_trans->delayed_refs.num_entries, 0); atomic_set(&cur_trans->delayed_refs.num_entries, 0);
cur_trans->delayed_refs.num_heads_ready = 0; cur_trans->delayed_refs.num_heads_ready = 0;
cur_trans->delayed_refs.pending_csums = 0; cur_trans->delayed_refs.pending_csums = 0;