summaryrefslogtreecommitdiff
path: root/fs/gfs2/ops_address.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-08-04 15:41:22 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-08-04 15:41:22 -0400
commit59a1cc6bdabf5ed148b48808ad1a418d87f5e6bf (patch)
tree6463071a09201040267702e895d63359e62c393d /fs/gfs2/ops_address.c
parent899bb264507cfed83922bf14cd66a073494601ba (diff)
downloadlwn-59a1cc6bdabf5ed148b48808ad1a418d87f5e6bf.tar.gz
lwn-59a1cc6bdabf5ed148b48808ad1a418d87f5e6bf.zip
[GFS2] Fix lock ordering bug in page fault path
Mmapped files were able to trigger a lock ordering bug. Private maps do not need to take the glock so early on. Shared maps do unfortunately, however we can get around that by adding a flag into the flags for the struct gfs2_file. This only works because we are taking an exclusive lock at this point, so we know that nobody else can be racing with us. Fixes Red Hat bugzilla: #201196 Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/ops_address.c')
-rw-r--r--fs/gfs2/ops_address.c23
1 files changed, 19 insertions, 4 deletions
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index fca69f12e4de..bdd4d6b48721 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -237,14 +237,22 @@ static int gfs2_readpage(struct file *file, struct page *page)
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
struct gfs2_holder gh;
int error;
+ int do_unlock = 0;
if (likely(file != &gfs2_internal_file_sentinal)) {
+ if (file) {
+ struct gfs2_file *gf = file->private_data;
+ if (test_bit(GFF_EXLOCK, &gf->f_flags))
+ goto skip_lock;
+ }
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh);
+ do_unlock = 1;
error = gfs2_glock_nq_m_atime(1, &gh);
if (unlikely(error))
goto out_unlock;
}
+skip_lock:
if (gfs2_is_stuffed(ip)) {
error = stuffed_readpage(ip, page);
unlock_page(page);
@@ -262,7 +270,7 @@ out:
return error;
out_unlock:
unlock_page(page);
- if (file != &gfs2_internal_file_sentinal)
+ if (do_unlock)
gfs2_holder_uninit(&gh);
goto out;
}
@@ -291,17 +299,24 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
struct gfs2_holder gh;
unsigned page_idx;
int ret;
+ int do_unlock = 0;
if (likely(file != &gfs2_internal_file_sentinal)) {
+ if (file) {
+ struct gfs2_file *gf = file->private_data;
+ if (test_bit(GFF_EXLOCK, &gf->f_flags))
+ goto skip_lock;
+ }
gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh);
+ do_unlock = 1;
ret = gfs2_glock_nq_m_atime(1, &gh);
if (ret == GLR_TRYFAILED)
goto out_noerror;
if (unlikely(ret))
goto out_unlock;
}
-
+skip_lock:
if (gfs2_is_stuffed(ip)) {
struct pagevec lru_pvec;
pagevec_init(&lru_pvec, 0);
@@ -326,7 +341,7 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
}
- if (likely(file != &gfs2_internal_file_sentinal)) {
+ if (do_unlock) {
gfs2_glock_dq_m(1, &gh);
gfs2_holder_uninit(&gh);
}
@@ -344,7 +359,7 @@ out_unlock:
unlock_page(page);
page_cache_release(page);
}
- if (likely(file != &gfs2_internal_file_sentinal))
+ if (do_unlock)
gfs2_holder_uninit(&gh);
goto out;
}