summaryrefslogtreecommitdiff
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 18:16:40 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 21:40:42 -0700
commit4c21e2f2441dc5fbb957b030333f5a3f2d02dea7 (patch)
tree1f76d33bb1d76221c6424bc5fed080a4f91349a6 /mm/shmem.c
parentb38c6845b695141259019e2b7c0fe6c32a6e720d (diff)
downloadlwn-4c21e2f2441dc5fbb957b030333f5a3f2d02dea7.tar.gz
lwn-4c21e2f2441dc5fbb957b030333f5a3f2d02dea7.zip
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with a many-threaded application which concurrently initializes different parts of a large anonymous area. This patch corrects that, by using a separate spinlock per page table page, to guard the page table entries in that page, instead of using the mm's single page_table_lock. (But even then, page_table_lock is still used to guard page table allocation, and anon_vma allocation.) In this implementation, the spinlock is tucked inside the struct page of the page table page: with a BUILD_BUG_ON in case it overflows - which it would in the case of 32-bit PA-RISC with spinlock debugging enabled. Splitting the lock is not quite for free: another cacheline access. Ideally, I suppose we would use split ptlock only for multi-threaded processes on multi-cpu machines; but deciding that dynamically would have its own costs. So for now enable it by config, at some number of cpus - since the Kconfig language doesn't support inequalities, let preprocessor compare that with NR_CPUS. But I don't think it's worth being user-configurable: for good testing of both split and unsplit configs, split now at 4 cpus, and perhaps change that to 8 later. There is a benefit even for singly threaded processes: kswapd can be attacking one part of the mm while another part is busy faulting. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 37777f4c11f8..dc25565a61e9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -71,9 +71,6 @@
/* Pretend that each entry is of this size in directory's i_size */
#define BOGO_DIRENT_SIZE 20
-/* Keep swapped page count in private field of indirect struct page */
-#define nr_swapped private
-
/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
enum sgp_type {
SGP_QUICK, /* don't try more than file page cache lookup */
@@ -324,8 +321,10 @@ static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, uns
entry->val = value;
info->swapped += incdec;
- if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT)
- kmap_atomic_to_page(entry)->nr_swapped += incdec;
+ if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
+ struct page *page = kmap_atomic_to_page(entry);
+ set_page_private(page, page_private(page) + incdec);
+ }
}
/*
@@ -368,9 +367,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
spin_unlock(&info->lock);
page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
- if (page) {
- page->nr_swapped = 0;
- }
+ if (page)
+ set_page_private(page, 0);
spin_lock(&info->lock);
if (!page) {
@@ -561,7 +559,7 @@ static void shmem_truncate(struct inode *inode)
diroff = 0;
}
subdir = dir[diroff];
- if (subdir && subdir->nr_swapped) {
+ if (subdir && page_private(subdir)) {
size = limit - idx;
if (size > ENTRIES_PER_PAGE)
size = ENTRIES_PER_PAGE;
@@ -572,10 +570,10 @@ static void shmem_truncate(struct inode *inode)
nr_swaps_freed += freed;
if (offset)
spin_lock(&info->lock);
- subdir->nr_swapped -= freed;
+ set_page_private(subdir, page_private(subdir) - freed);
if (offset)
spin_unlock(&info->lock);
- BUG_ON(subdir->nr_swapped > offset);
+ BUG_ON(page_private(subdir) > offset);
}
if (offset)
offset = 0;
@@ -743,7 +741,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
dir = shmem_dir_map(subdir);
}
subdir = *dir;
- if (subdir && subdir->nr_swapped) {
+ if (subdir && page_private(subdir)) {
ptr = shmem_swp_map(subdir);
size = limit - idx;
if (size > ENTRIES_PER_PAGE)