summaryrefslogtreecommitdiff
path: root/fs/jbd2/transaction.c
diff options
context:
space:
mode:
authorYongqiang Yang <xiaoqiangnk@gmail.com>2012-02-20 17:53:02 -0500
committerTheodore Ts'o <tytso@mit.edu>2012-02-20 17:53:02 -0500
commit0c2022eccb01630c037f2024531e9ff1afbe1564 (patch)
tree704d54db298e64a84fd60330fd7985d27117d98c /fs/jbd2/transaction.c
parent18aadd47f88464928b5ce57791c2e8f9f2aaece0 (diff)
downloadlwn-0c2022eccb01630c037f2024531e9ff1afbe1564.tar.gz
lwn-0c2022eccb01630c037f2024531e9ff1afbe1564.zip
jbd2: allocate transaction from separate slab cache
There is normally only a handful of these active at any one time, but putting them in a separate slab cache makes debugging memory corruption problems easier. Manish Katiyar also wanted this make it easier to test memory failure scenarios in the jbd2 layer. Cc: Manish Katiyar <mkatiyar@gmail.com> Signed-off-by: Yongqiang Yang <xiaoqiangnk@gmail.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/jbd2/transaction.c')
-rw-r--r--fs/jbd2/transaction.c36
1 files changed, 33 insertions, 3 deletions
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 526533062548..d0a8b017b281 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -33,6 +33,35 @@
static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
+static struct kmem_cache *transaction_cache;
+int __init jbd2_journal_init_transaction_cache(void)
+{
+ J_ASSERT(!transaction_cache);
+ transaction_cache = kmem_cache_create("jbd2_transaction_s",
+ sizeof(transaction_t),
+ 0,
+ SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
+ NULL);
+ if (transaction_cache)
+ return 0;
+ return -ENOMEM;
+}
+
+void jbd2_journal_destroy_transaction_cache(void)
+{
+ if (transaction_cache) {
+ kmem_cache_destroy(transaction_cache);
+ transaction_cache = NULL;
+ }
+}
+
+void jbd2_journal_free_transaction(transaction_t *transaction)
+{
+ if (unlikely(ZERO_OR_NULL_PTR(transaction)))
+ return;
+ kmem_cache_free(transaction_cache, transaction);
+}
+
/*
* jbd2_get_transaction: obtain a new transaction_t object.
*
@@ -133,7 +162,8 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
alloc_transaction:
if (!journal->j_running_transaction) {
- new_transaction = kzalloc(sizeof(*new_transaction), gfp_mask);
+ new_transaction = kmem_cache_alloc(transaction_cache,
+ gfp_mask | __GFP_ZERO);
if (!new_transaction) {
/*
* If __GFP_FS is not present, then we may be
@@ -162,7 +192,7 @@ repeat:
if (is_journal_aborted(journal) ||
(journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
read_unlock(&journal->j_state_lock);
- kfree(new_transaction);
+ jbd2_journal_free_transaction(new_transaction);
return -EROFS;
}
@@ -284,7 +314,7 @@ repeat:
read_unlock(&journal->j_state_lock);
lock_map_acquire(&handle->h_lockdep_map);
- kfree(new_transaction);
+ jbd2_journal_free_transaction(new_transaction);
return 0;
}