summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mlock.c7
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/page-writeback.c17
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/shmem.c45
-rw-r--r--mm/swapfile.c4
-rw-r--r--mm/util.c20
-rw-r--r--mm/vmalloc.c20
-rw-r--r--mm/vmscan.c32
10 files changed, 119 insertions, 59 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 037161d61b4e..cbe9e0581b75 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -660,7 +660,7 @@ void *alloc_locked_buffer(size_t size)
return buffer;
}
-void free_locked_buffer(void *buffer, size_t size)
+void release_locked_buffer(void *buffer, size_t size)
{
unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -670,6 +670,11 @@ void free_locked_buffer(void *buffer, size_t size)
current->mm->locked_vm -= pgsz;
up_write(&current->mm->mmap_sem);
+}
+
+void free_locked_buffer(void *buffer, size_t size)
+{
+ release_locked_buffer(buffer, size);
kfree(buffer);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 00ced3ee49a8..1abb9185a686 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -20,6 +20,7 @@
#include <linux/fs.h>
#include <linux/personality.h>
#include <linux/security.h>
+#include <linux/ima.h>
#include <linux/hugetlb.h>
#include <linux/profile.h>
#include <linux/module.h>
@@ -1049,6 +1050,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
if (error)
return error;
+ error = ima_file_mmap(file, prot);
+ if (error)
+ return error;
return mmap_region(file, addr, len, flags, vm_flags, pgoff);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3c84128596ba..40ca7cdb653e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -66,7 +66,7 @@ static inline long sync_writeback_pages(void)
/*
* Start background writeback (via pdflush) at this percentage
*/
-int dirty_background_ratio = 5;
+int dirty_background_ratio = 10;
/*
* dirty_background_bytes starts at 0 (disabled) so that it is a function of
@@ -83,7 +83,7 @@ int vm_highmem_is_dirtyable;
/*
* The generator of dirty data starts writeback at this percentage
*/
-int vm_dirty_ratio = 10;
+int vm_dirty_ratio = 20;
/*
* vm_dirty_bytes starts at 0 (disabled) so that it is a function of
@@ -240,7 +240,7 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL_GPL(bdi_writeout_inc);
-static inline void task_dirty_inc(struct task_struct *tsk)
+void task_dirty_inc(struct task_struct *tsk)
{
prop_inc_single(&vm_dirties, &tsk->dirties);
}
@@ -1230,6 +1230,7 @@ int __set_page_dirty_nobuffers(struct page *page)
__inc_zone_page_state(page, NR_FILE_DIRTY);
__inc_bdi_stat(mapping->backing_dev_info,
BDI_RECLAIMABLE);
+ task_dirty_inc(current);
task_io_account_write(PAGE_CACHE_SIZE);
}
radix_tree_tag_set(&mapping->page_tree,
@@ -1262,7 +1263,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
* If the mapping doesn't provide a set_page_dirty a_op, then
* just fall through and assume that it wants buffer_heads.
*/
-static int __set_page_dirty(struct page *page)
+int set_page_dirty(struct page *page)
{
struct address_space *mapping = page_mapping(page);
@@ -1280,14 +1281,6 @@ static int __set_page_dirty(struct page *page)
}
return 0;
}
-
-int set_page_dirty(struct page *page)
-{
- int ret = __set_page_dirty(page);
- if (ret)
- task_dirty_inc(current);
- return ret;
-}
EXPORT_SYMBOL(set_page_dirty);
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5675b3073854..5c44ed49ca93 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2989,7 +2989,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
* was used and there are no special requirements, this is a convenient
* alternative
*/
-int __meminit early_pfn_to_nid(unsigned long pfn)
+int __meminit __early_pfn_to_nid(unsigned long pfn)
{
int i;
@@ -3000,10 +3000,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
if (start_pfn <= pfn && pfn < end_pfn)
return early_node_map[i].nid;
}
+ /* This is a memory hole */
+ return -1;
+}
+#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
+
+int __meminit early_pfn_to_nid(unsigned long pfn)
+{
+ int nid;
+ nid = __early_pfn_to_nid(pfn);
+ if (nid >= 0)
+ return nid;
+ /* just returns 0 */
return 0;
}
-#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
+
+#ifdef CONFIG_NODES_SPAN_OTHER_NODES
+bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
+{
+ int nid;
+
+ nid = __early_pfn_to_nid(pfn);
+ if (nid >= 0 && nid != node)
+ return false;
+ return true;
+}
+#endif
/* Basic iterator support to walk early_node_map[] */
#define for_each_active_range_index_in_nid(i, nid) \
diff --git a/mm/page_io.c b/mm/page_io.c
index dc6ce0afbded..3023c475e041 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -111,7 +111,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
goto out;
}
if (wbc->sync_mode == WB_SYNC_ALL)
- rw |= (1 << BIO_RW_SYNC);
+ rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
count_vm_event(PSWPOUT);
set_page_writeback(page);
unlock_page(page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 19d566ccdeea..7ec78e24a30d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -28,6 +28,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/swap.h>
+#include <linux/ima.h>
static struct vfsmount *shm_mnt;
@@ -169,13 +170,13 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
*/
static inline int shmem_acct_size(unsigned long flags, loff_t size)
{
- return (flags & VM_ACCOUNT) ?
- security_vm_enough_memory_kern(VM_ACCT(size)) : 0;
+ return (flags & VM_NORESERVE) ?
+ 0 : security_vm_enough_memory_kern(VM_ACCT(size));
}
static inline void shmem_unacct_size(unsigned long flags, loff_t size)
{
- if (flags & VM_ACCOUNT)
+ if (!(flags & VM_NORESERVE))
vm_unacct_memory(VM_ACCT(size));
}
@@ -187,13 +188,13 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
*/
static inline int shmem_acct_block(unsigned long flags)
{
- return (flags & VM_ACCOUNT) ?
- 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE));
+ return (flags & VM_NORESERVE) ?
+ security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
}
static inline void shmem_unacct_blocks(unsigned long flags, long pages)
{
- if (!(flags & VM_ACCOUNT))
+ if (flags & VM_NORESERVE)
vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
}
@@ -1515,8 +1516,8 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static struct inode *
-shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
+static struct inode *shmem_get_inode(struct super_block *sb, int mode,
+ dev_t dev, unsigned long flags)
{
struct inode *inode;
struct shmem_inode_info *info;
@@ -1537,6 +1538,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
spin_lock_init(&info->lock);
+ info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
switch (mode & S_IFMT) {
@@ -1779,9 +1781,10 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
static int
shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{
- struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
+ struct inode *inode;
int error = -ENOSPC;
+ inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
if (inode) {
error = security_inode_init_security(inode, dir, NULL, NULL,
NULL);
@@ -1920,7 +1923,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
if (len > PAGE_CACHE_SIZE)
return -ENAMETOOLONG;
- inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
+ inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
if (!inode)
return -ENOSPC;
@@ -2332,7 +2335,7 @@ static int shmem_fill_super(struct super_block *sb,
sb->s_flags |= MS_POSIXACL;
#endif
- inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0);
+ inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
if (!inode)
goto failed;
inode->i_uid = sbinfo->uid;
@@ -2574,12 +2577,12 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
return 0;
}
-#define shmem_file_operations ramfs_file_operations
-#define shmem_vm_ops generic_file_vm_ops
-#define shmem_get_inode ramfs_get_inode
-#define shmem_acct_size(a, b) 0
-#define shmem_unacct_size(a, b) do {} while (0)
-#define SHMEM_MAX_BYTES LLONG_MAX
+#define shmem_vm_ops generic_file_vm_ops
+#define shmem_file_operations ramfs_file_operations
+#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
+#define shmem_acct_size(flags, size) 0
+#define shmem_unacct_size(flags, size) do {} while (0)
+#define SHMEM_MAX_BYTES LLONG_MAX
#endif /* CONFIG_SHMEM */
@@ -2589,7 +2592,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
* shmem_file_setup - get an unlinked file living in tmpfs
* @name: name for dentry (to be seen in /proc/<pid>/maps
* @size: size to be set for the file
- * @flags: vm_flags
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
*/
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
{
@@ -2623,13 +2626,10 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
goto put_dentry;
error = -ENOSPC;
- inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
+ inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
if (!inode)
goto close_file;
-#ifdef CONFIG_SHMEM
- SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT;
-#endif
d_instantiate(dentry, inode);
inode->i_size = size;
inode->i_nlink = 0; /* It is unlinked */
@@ -2666,6 +2666,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
if (IS_ERR(file))
return PTR_ERR(file);
+ ima_shm_check(file);
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = file;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 7e6304dfafab..312fafe0ab6e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -635,7 +635,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
if (!bdev) {
if (bdev_p)
- *bdev_p = sis->bdev;
+ *bdev_p = bdget(sis->bdev->bd_dev);
spin_unlock(&swap_lock);
return i;
@@ -647,7 +647,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
struct swap_extent, list);
if (se->start_block == offset) {
if (bdev_p)
- *bdev_p = sis->bdev;
+ *bdev_p = bdget(sis->bdev->bd_dev);
spin_unlock(&swap_lock);
bdput(bdev);
diff --git a/mm/util.c b/mm/util.c
index cb00b748ce47..37eaccdf3054 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -129,6 +129,26 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
}
EXPORT_SYMBOL(krealloc);
+/**
+ * kzfree - like kfree but zero memory
+ * @p: object to free memory of
+ *
+ * The memory of the object @p points to is zeroed before freed.
+ * If @p is %NULL, kzfree() does nothing.
+ */
+void kzfree(const void *p)
+{
+ size_t ks;
+ void *mem = (void *)p;
+
+ if (unlikely(ZERO_OR_NULL_PTR(mem)))
+ return;
+ ks = ksize(mem);
+ memset(mem, 0, ks);
+ kfree(mem);
+}
+EXPORT_SYMBOL(kzfree);
+
/*
* strndup_user - duplicate an existing string from user space
* @s: The string to duplicate
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 75f49d312e8c..520a75980269 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -323,6 +323,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
unsigned long addr;
int purged = 0;
+ BUG_ON(!size);
BUG_ON(size & ~PAGE_MASK);
va = kmalloc_node(sizeof(struct vmap_area),
@@ -334,6 +335,9 @@ retry:
addr = ALIGN(vstart, align);
spin_lock(&vmap_area_lock);
+ if (addr + size - 1 < addr)
+ goto overflow;
+
/* XXX: could have a last_hole cache */
n = vmap_area_root.rb_node;
if (n) {
@@ -365,6 +369,8 @@ retry:
while (addr + size > first->va_start && addr + size <= vend) {
addr = ALIGN(first->va_end + PAGE_SIZE, align);
+ if (addr + size - 1 < addr)
+ goto overflow;
n = rb_next(&first->rb_node);
if (n)
@@ -375,6 +381,7 @@ retry:
}
found:
if (addr + size > vend) {
+overflow:
spin_unlock(&vmap_area_lock);
if (!purged) {
purge_vmap_area_lazy();
@@ -498,6 +505,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
static DEFINE_SPINLOCK(purge_lock);
LIST_HEAD(valist);
struct vmap_area *va;
+ struct vmap_area *n_va;
int nr = 0;
/*
@@ -537,7 +545,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
if (nr) {
spin_lock(&vmap_area_lock);
- list_for_each_entry(va, &valist, purge_list)
+ list_for_each_entry_safe(va, n_va, &valist, purge_list)
__free_vmap_area(va);
spin_unlock(&vmap_area_lock);
}
@@ -1012,6 +1020,8 @@ void __init vmalloc_init(void)
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
unsigned long end = addr + size;
+
+ flush_cache_vunmap(addr, end);
vunmap_page_range(addr, end);
flush_tlb_kernel_range(addr, end);
}
@@ -1106,6 +1116,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
}
EXPORT_SYMBOL_GPL(__get_vm_area);
+struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
+ unsigned long start, unsigned long end,
+ void *caller)
+{
+ return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+ caller);
+}
+
/**
* get_vm_area - reserve a contiguous kernel virtual area
* @size: size of the area
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9a27c44aa327..56ddf41149eb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1262,7 +1262,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* Move the pages to the [file or anon] inactive list.
*/
pagevec_init(&pvec, 1);
- pgmoved = 0;
lru = LRU_BASE + file * LRU_FILE;
spin_lock_irq(&zone->lru_lock);
@@ -1274,6 +1273,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
*/
reclaim_stat->recent_rotated[!!file] += pgmoved;
+ pgmoved = 0;
while (!list_empty(&l_inactive)) {
page = lru_to_page(&l_inactive);
prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1469,7 +1469,7 @@ static void shrink_zone(int priority, struct zone *zone,
int file = is_file_lru(l);
int scan;
- scan = zone_page_state(zone, NR_LRU_BASE + l);
+ scan = zone_nr_pages(zone, sc, l);
if (priority) {
scan >>= priority;
scan = (scan * percent[file]) / 100;
@@ -2057,31 +2057,31 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
int pass, struct scan_control *sc)
{
struct zone *zone;
- unsigned long nr_to_scan, ret = 0;
- enum lru_list l;
+ unsigned long ret = 0;
for_each_zone(zone) {
+ enum lru_list l;
if (!populated_zone(zone))
continue;
-
if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
continue;
for_each_evictable_lru(l) {
+ enum zone_stat_item ls = NR_LRU_BASE + l;
+ unsigned long lru_pages = zone_page_state(zone, ls);
+
/* For pass = 0, we don't shrink the active list */
- if (pass == 0 &&
- (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
+ if (pass == 0 && (l == LRU_ACTIVE_ANON ||
+ l == LRU_ACTIVE_FILE))
continue;
- zone->lru[l].nr_scan +=
- (zone_page_state(zone, NR_LRU_BASE + l)
- >> prio) + 1;
+ zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+ unsigned long nr_to_scan;
+
zone->lru[l].nr_scan = 0;
- nr_to_scan = min(nr_pages,
- zone_page_state(zone,
- NR_LRU_BASE + l));
+ nr_to_scan = min(nr_pages, lru_pages);
ret += shrink_list(l, nr_to_scan, zone,
sc, prio);
if (ret >= nr_pages)
@@ -2089,7 +2089,6 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
}
}
}
-
return ret;
}
@@ -2112,7 +2111,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
.may_swap = 0,
.swap_cluster_max = nr_pages,
.may_writepage = 1,
- .swappiness = vm_swappiness,
.isolate_pages = isolate_pages_global,
};
@@ -2146,10 +2144,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
int prio;
/* Force reclaiming mapped pages in the passes #3 and #4 */
- if (pass > 2) {
+ if (pass > 2)
sc.may_swap = 1;
- sc.swappiness = 100;
- }
for (prio = DEF_PRIORITY; prio >= 0; prio--) {
unsigned long nr_to_scan = nr_pages - ret;