summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/migrate.c59
-rw-r--r--mm/page_cgroup.c3
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c2
-rw-r--r--mm/swap.c13
-rw-r--r--mm/vmalloc.c2
7 files changed, 53 insertions, 30 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 2a56124dbc28..801c08b046e6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -176,7 +176,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
int ret = 0;
struct device *dev;
- if (WARN_ON(bdi->dev))
+ if (bdi->dev) /* The driver needs to use separate queues per device */
goto exit;
va_start(args, fmt);
diff --git a/mm/migrate.c b/mm/migrate.c
index 1e0d6b237f44..d8f07667fc80 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -987,25 +987,18 @@ out:
/*
* Determine the nodes of an array of pages and store it in an array of status.
*/
-static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
- const void __user * __user *pages,
- int __user *status)
+static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
+ const void __user **pages, int *status)
{
unsigned long i;
- int err;
down_read(&mm->mmap_sem);
for (i = 0; i < nr_pages; i++) {
- const void __user *p;
- unsigned long addr;
+ unsigned long addr = (unsigned long)(*pages);
struct vm_area_struct *vma;
struct page *page;
-
- err = -EFAULT;
- if (get_user(p, pages+i))
- goto out;
- addr = (unsigned long) p;
+ int err;
vma = find_vma(mm, addr);
if (!vma)
@@ -1024,12 +1017,52 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
err = page_to_nid(page);
set_status:
- put_user(err, status+i);
+ *status = err;
+
+ pages++;
+ status++;
+ }
+
+ up_read(&mm->mmap_sem);
+}
+
+/*
+ * Determine the nodes of a user array of pages and store it in
+ * a user array of status.
+ */
+static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
+ const void __user * __user *pages,
+ int __user *status)
+{
+#define DO_PAGES_STAT_CHUNK_NR 16
+ const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
+ int chunk_status[DO_PAGES_STAT_CHUNK_NR];
+ unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
+ int err;
+
+ for (i = 0; i < nr_pages; i += chunk_nr) {
+ if (chunk_nr + i > nr_pages)
+ chunk_nr = nr_pages - i;
+
+ err = copy_from_user(chunk_pages, &pages[i],
+ chunk_nr * sizeof(*chunk_pages));
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
+
+ err = copy_to_user(&status[i], chunk_status,
+ chunk_nr * sizeof(*chunk_status));
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
}
err = 0;
out:
- up_read(&mm->mmap_sem);
return err;
}
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 0b3cbf090a67..ab27ff750519 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -49,6 +49,9 @@ static int __init alloc_node_page_cgroup(int nid)
start_pfn = NODE_DATA(nid)->node_start_pfn;
nr_pages = NODE_DATA(nid)->node_spanned_pages;
+ if (!nr_pages)
+ return 0;
+
table_size = sizeof(struct page_cgroup) * nr_pages;
base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
diff --git a/mm/slob.c b/mm/slob.c
index cb675d126791..bf7e8fc3aed8 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -535,7 +535,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
struct kmem_cache *c;
c = slob_alloc(sizeof(struct kmem_cache),
- flags, ARCH_KMALLOC_MINALIGN, -1);
+ GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
if (c) {
c->name = name;
diff --git a/mm/slub.c b/mm/slub.c
index 749588a50a5a..a2cd47d89e0a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3597,7 +3597,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
for (i = 0; i < t.count; i++) {
struct location *l = &t.loc[i];
- if (len > PAGE_SIZE - 100)
+ if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
break;
len += sprintf(buf + len, "%7ld ", l->count);
diff --git a/mm/swap.c b/mm/swap.c
index 2881987603eb..b135ec90cdeb 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -299,7 +299,6 @@ void lru_add_drain(void)
put_cpu();
}
-#if defined(CONFIG_NUMA) || defined(CONFIG_UNEVICTABLE_LRU)
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
@@ -313,18 +312,6 @@ int lru_add_drain_all(void)
return schedule_on_each_cpu(lru_add_drain_per_cpu);
}
-#else
-
-/*
- * Returns 0 for success
- */
-int lru_add_drain_all(void)
-{
- lru_add_drain();
- return 0;
-}
-#endif
-
/*
* Batched page_cache_release(). Decrement the reference count on all the
* passed pages. If it fell to zero then remove the page from the LRU and
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f3f6e0758562..1ddb77ba3995 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1717,7 +1717,7 @@ static int s_show(struct seq_file *m, void *p)
v->addr, v->addr + v->size, v->size);
if (v->caller) {
- char buff[2 * KSYM_NAME_LEN];
+ char buff[KSYM_SYMBOL_LEN];
seq_putc(m, ' ');
sprint_symbol(buff, (unsigned long)v->caller);