summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug80
-rw-r--r--lib/Kconfig.kgdb11
-rw-r--r--lib/Makefile7
-rw-r--r--lib/bitmap.c11
-rw-r--r--lib/cmdline.c2
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/debugobjects.c46
-rw-r--r--lib/idr.c2
-rw-r--r--lib/iomap.c3
-rw-r--r--lib/iommu-helper.c5
-rw-r--r--lib/klist.c96
-rw-r--r--lib/kobject.c3
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/lmb.c2
-rw-r--r--lib/percpu_counter.c8
-rw-r--r--lib/plist.c13
-rw-r--r--lib/radix-tree.c180
-rw-r--r--lib/random32.c48
-rw-r--r--lib/ratelimit.c3
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/show_mem.c63
-rw-r--r--lib/smp_processor_id.c5
-rw-r--r--lib/string_helpers.c64
-rw-r--r--lib/swiotlb.c51
-rw-r--r--lib/syscall.c75
-rw-r--r--lib/vsprintf.c13
26 files changed, 634 insertions, 169 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e1d4764435ed..aa81d2848448 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -394,7 +394,7 @@ config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
select STACKTRACE
- select FRAME_POINTER if !X86 && !MIPS
+ select FRAME_POINTER if !X86 && !MIPS && !PPC
select KALLSYMS
select KALLSYMS_ALL
@@ -495,6 +495,15 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_VIRTUAL
+ bool "Debug VM translations"
+ depends on DEBUG_KERNEL && X86
+ help
+ Enable some costly sanity checks in virtual to page code. This can
+ catch mistakes with virt_to_page() and friends.
+
+ If unsure, say N.
+
config DEBUG_WRITECOUNT
bool "Debug filesystem writers count"
depends on DEBUG_KERNEL
@@ -597,6 +606,19 @@ config RCU_TORTURE_TEST_RUNNABLE
Say N here if you want the RCU torture tests to start only
after being manually enabled via /proc.
+config RCU_CPU_STALL_DETECTOR
+ bool "Check for stalled CPUs delaying RCU grace periods"
+ depends on CLASSIC_RCU
+ default n
+ help
+ This option causes RCU to printk information on which
+ CPUs are delaying the current grace period, but only when
+ the grace period extends for excessive time periods.
+
+ Say Y if you want RCU to perform such checks.
+
+ Say N if you are unsure.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
@@ -624,6 +646,28 @@ config BACKTRACE_SELF_TEST
Say N if you are unsure.
+config DEBUG_BLOCK_EXT_DEVT
+ bool "Force extended block device numbers and spread them"
+ depends on DEBUG_KERNEL
+ depends on BLOCK
+ default n
+ help
+ Conventionally, block device numbers are allocated from
+ predetermined contiguous area. However, extended block area
+ may introduce non-contiguous block device numbers. This
+ option forces most block device numbers to be allocated from
+ the extended space and spreads them to discover kernel or
+ userland code paths which assume predetermined contiguous
+ device number allocation.
+
+ Note that turning on this debug option shuffles all the
+ device numbers for all IDE and SCSI devices including libata
+ ones, so root partition specified using device number
+ directly (via rdev or root=MAJ:MIN) won't work anymore.
+ Textual device names (root=/dev/sdXn) will continue to work.
+
+ Say N if you are unsure.
+
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_KERNEL
@@ -661,10 +705,21 @@ config FAIL_PAGE_ALLOC
config FAIL_MAKE_REQUEST
bool "Fault-injection capability for disk IO"
- depends on FAULT_INJECTION
+ depends on FAULT_INJECTION && BLOCK
help
Provide fault-injection capability for disk IO.
+config FAIL_IO_TIMEOUT
+ bool "Faul-injection capability for faking disk interrupts"
+ depends on FAULT_INJECTION && BLOCK
+ help
+ Provide fault-injection capability on end IO handling. This
+ will make the block layer "forget" an interrupt as configured,
+ thus exercising the error handling.
+
+ Only works with drivers that use the generic timeout handling,
+ for others it wont do anything.
+
config FAULT_INJECTION_DEBUG_FS
bool "Debugfs entries for fault-injection capabilities"
depends on FAULT_INJECTION && SYSFS && DEBUG_FS
@@ -676,13 +731,13 @@ config FAULT_INJECTION_STACKTRACE_FILTER
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
depends on !X86_64
select STACKTRACE
- select FRAME_POINTER
+ select FRAME_POINTER if !PPC
help
Provide stacktrace filter for fault-injection capabilities
config LATENCYTOP
bool "Latency measuring infrastructure"
- select FRAME_POINTER if !MIPS
+ select FRAME_POINTER if !MIPS && !PPC
select KALLSYMS
select KALLSYMS_ALL
select STACKTRACE
@@ -693,6 +748,14 @@ config LATENCYTOP
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
+config SYSCTL_SYSCALL_CHECK
+ bool "Sysctl checks"
+ depends on SYSCTL_SYSCALL
+ ---help---
+ sys_sysctl uses binary paths that have been found challenging
+ to properly maintain and use. This enables checks that help
+ you to keep things correct.
+
source kernel/trace/Kconfig
config PROVIDE_OHCI1394_DMA_INIT
@@ -735,6 +798,15 @@ config FIREWIRE_OHCI_REMOTE_DMA
If unsure, say N.
+menuconfig BUILD_DOCSRC
+ bool "Build targets in Documentation/ tree"
+ depends on HEADERS_CHECK
+ help
+ This option attempts to build objects from the source files in the
+ kernel Documentation/ tree.
+
+ Say N if you are unsure.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 2cfd2721f7ed..9b5d1d7f2ef7 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -4,14 +4,17 @@ config HAVE_ARCH_KGDB
menuconfig KGDB
bool "KGDB: kernel debugging with remote gdb"
- select FRAME_POINTER
depends on HAVE_ARCH_KGDB
depends on DEBUG_KERNEL && EXPERIMENTAL
help
If you say Y here, it will be possible to remotely debug the
- kernel using gdb. Documentation of kernel debugger is available
- at http://kgdb.sourceforge.net as well as in DocBook form
- in Documentation/DocBook/. If unsure, say N.
+ kernel using gdb. It is recommended but not required, that
+ you also turn on the kernel config option
+ CONFIG_FRAME_POINTER to aid in producing more reliable stack
+ backtraces in the external debugger. Documentation of
+ kernel debugger is available at http://kgdb.sourceforge.net
+ as well as in DocBook form in Documentation/DocBook/. If
+ unsure, say N.
if KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 9085ad6fa53d..44001af76a7d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,7 +11,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o \
idr.o int_sqrt.o extable.o prio_tree.o \
sha1.o irq_regs.o reciprocal_div.o argv_split.o \
- proportions.o prio_heap.o ratelimit.o
+ proportions.o prio_heap.o ratelimit.o show_mem.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -19,7 +19,8 @@ lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
- bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o
+ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
+ string_helpers.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -78,6 +79,8 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_LMB) += lmb.o
+obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 482df94ea21e..06fb57c86de0 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,6 +316,17 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
EXPORT_SYMBOL(bitmap_scnprintf);
/**
+ * bitmap_scnprintf_len - return buffer length needed to convert
+ * bitmap to an ASCII hex string
+ * @nr_bits: number of bits to be converted
+ */
+int bitmap_scnprintf_len(unsigned int nr_bits)
+{
+ unsigned int nr_nibbles = ALIGN(nr_bits, 4) / 4;
+ return nr_nibbles + ALIGN(nr_nibbles, CHUNKSZ / 4) / (CHUNKSZ / 4) - 1;
+}
+
+/**
* __bitmap_parse - convert an ASCII hex string into a bitmap.
* @buf: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 5ba8a942a478..f5f3ad8b62ff 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -126,7 +126,7 @@ char *get_options(const char *str, int nints, int *ints)
* megabyte, or one gigabyte, respectively.
*/
-unsigned long long memparse(char *ptr, char **retptr)
+unsigned long long memparse(const char *ptr, char **retptr)
{
char *endptr; /* local pointer to end of parsed string */
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 0ef01d14727c..0218b4693dd8 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -8,6 +8,7 @@
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
+#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -37,6 +38,7 @@ int debug_locks_off(void)
{
if (xchg(&debug_locks, 0)) {
if (!debug_locks_silent) {
+ oops_in_progress = 1;
console_verbose();
return 1;
}
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index f86196390cfd..e3ab374e1334 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -112,6 +112,7 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
/*
* Allocate a new object. If the pool is empty, switch off the debugger.
+ * Must be called with interrupts disabled.
*/
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
@@ -148,17 +149,18 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
static void free_object(struct debug_obj *obj)
{
unsigned long idx = (unsigned long)(obj - obj_static_pool);
+ unsigned long flags;
if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
} else {
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
obj_pool_used--;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
kmem_cache_free(obj_cache, obj);
}
}
@@ -171,6 +173,7 @@ static void debug_objects_oom(void)
{
struct debug_bucket *db = obj_hash;
struct hlist_node *node, *tmp;
+ HLIST_HEAD(freelist);
struct debug_obj *obj;
unsigned long flags;
int i;
@@ -179,11 +182,14 @@ static void debug_objects_oom(void)
for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
spin_lock_irqsave(&db->lock, flags);
- hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+ hlist_move_list(&db->list, &freelist);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ /* Now free them */
+ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
hlist_del(&obj->node);
free_object(obj);
}
- spin_unlock_irqrestore(&db->lock, flags);
}
}
@@ -205,9 +211,8 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
if (limit < 5 && obj->descr != descr_test) {
limit++;
- printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
+ WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
obj_states[obj->state], obj->descr->name);
- WARN_ON(1);
}
debug_objects_warnings++;
}
@@ -499,8 +504,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
return;
default:
hlist_del(&obj->node);
+ spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
- break;
+ return;
}
out_unlock:
spin_unlock_irqrestore(&db->lock, flags);
@@ -511,6 +517,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
struct hlist_node *node, *tmp;
+ HLIST_HEAD(freelist);
struct debug_obj_descr *descr;
enum debug_obj_state state;
struct debug_bucket *db;
@@ -546,11 +553,18 @@ repeat:
goto repeat;
default:
hlist_del(&obj->node);
- free_object(obj);
+ hlist_add_head(&obj->node, &freelist);
break;
}
}
spin_unlock_irqrestore(&db->lock, flags);
+
+ /* Now free them */
+ hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
+ hlist_del(&obj->node);
+ free_object(obj);
+ }
+
if (cnt > debug_objects_maxchain)
debug_objects_maxchain = cnt;
}
@@ -733,26 +747,22 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
obj = lookup_object(addr, db);
if (!obj && state != ODEBUG_STATE_NONE) {
- printk(KERN_ERR "ODEBUG: selftest object not found\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
goto out;
}
if (obj && obj->state != state) {
- printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
+ WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
obj->state, state);
- WARN_ON(1);
goto out;
}
if (fixups != debug_objects_fixups) {
- printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
+ WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
fixups, debug_objects_fixups);
- WARN_ON(1);
goto out;
}
if (warnings != debug_objects_warnings) {
- printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
+ WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
warnings, debug_objects_warnings);
- WARN_ON(1);
goto out;
}
res = 0;
diff --git a/lib/idr.c b/lib/idr.c
index 3476f8203e97..e728c7fccc4d 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -607,7 +607,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
}
EXPORT_SYMBOL(idr_replace);
-static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer)
+static void idr_cache_ctor(void *idr_layer)
{
memset(idr_layer, 0, sizeof(struct idr_layer));
}
diff --git a/lib/iomap.c b/lib/iomap.c
index 37a3ea4cac9f..d32229385151 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -40,8 +40,7 @@ static void bad_io_access(unsigned long port, const char *access)
static int count = 10;
if (count) {
count--;
- printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
- WARN_ON(1);
+ WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
}
}
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index a3b8d4c3f77a..5d90074dca75 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -30,8 +30,7 @@ again:
return index;
}
-static inline void set_bit_area(unsigned long *map, unsigned long i,
- int len)
+void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
{
unsigned long end = i + len;
while (i < end) {
@@ -64,7 +63,7 @@ again:
start = index + 1;
goto again;
}
- set_bit_area(map, index, nr);
+ iommu_area_reserve(map, index, nr);
}
return index;
}
diff --git a/lib/klist.c b/lib/klist.c
index cca37f96faa2..bbdd3015c2c7 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -37,6 +37,37 @@
#include <linux/klist.h>
#include <linux/module.h>
+/*
+ * Use the lowest bit of n_klist to mark deleted nodes and exclude
+ * dead ones from iteration.
+ */
+#define KNODE_DEAD 1LU
+#define KNODE_KLIST_MASK ~KNODE_DEAD
+
+static struct klist *knode_klist(struct klist_node *knode)
+{
+ return (struct klist *)
+ ((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
+}
+
+static bool knode_dead(struct klist_node *knode)
+{
+ return (unsigned long)knode->n_klist & KNODE_DEAD;
+}
+
+static void knode_set_klist(struct klist_node *knode, struct klist *klist)
+{
+ knode->n_klist = klist;
+ /* no knode deserves to start its life dead */
+ WARN_ON(knode_dead(knode));
+}
+
+static void knode_kill(struct klist_node *knode)
+{
+ /* and no knode should die twice ever either, see we're very humane */
+ WARN_ON(knode_dead(knode));
+ *(unsigned long *)&knode->n_klist |= KNODE_DEAD;
+}
/**
* klist_init - Initialize a klist structure.
@@ -79,7 +110,7 @@ static void klist_node_init(struct klist *k, struct klist_node *n)
INIT_LIST_HEAD(&n->n_node);
init_completion(&n->n_removed);
kref_init(&n->n_ref);
- n->n_klist = k;
+ knode_set_klist(n, k);
if (k->get)
k->get(n);
}
@@ -115,7 +146,7 @@ EXPORT_SYMBOL_GPL(klist_add_tail);
*/
void klist_add_after(struct klist_node *n, struct klist_node *pos)
{
- struct klist *k = pos->n_klist;
+ struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
@@ -131,7 +162,7 @@ EXPORT_SYMBOL_GPL(klist_add_after);
*/
void klist_add_before(struct klist_node *n, struct klist_node *pos)
{
- struct klist *k = pos->n_klist;
+ struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
@@ -144,9 +175,10 @@ static void klist_release(struct kref *kref)
{
struct klist_node *n = container_of(kref, struct klist_node, n_ref);
+ WARN_ON(!knode_dead(n));
list_del(&n->n_node);
complete(&n->n_removed);
- n->n_klist = NULL;
+ knode_set_klist(n, NULL);
}
static int klist_dec_and_del(struct klist_node *n)
@@ -154,22 +186,29 @@ static int klist_dec_and_del(struct klist_node *n)
return kref_put(&n->n_ref, klist_release);
}
-/**
- * klist_del - Decrement the reference count of node and try to remove.
- * @n: node we're deleting.
- */
-void klist_del(struct klist_node *n)
+static void klist_put(struct klist_node *n, bool kill)
{
- struct klist *k = n->n_klist;
+ struct klist *k = knode_klist(n);
void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
+ if (kill)
+ knode_kill(n);
if (!klist_dec_and_del(n))
put = NULL;
spin_unlock(&k->k_lock);
if (put)
put(n);
}
+
+/**
+ * klist_del - Decrement the reference count of node and try to remove.
+ * @n: node we're deleting.
+ */
+void klist_del(struct klist_node *n)
+{
+ klist_put(n, true);
+}
EXPORT_SYMBOL_GPL(klist_del);
/**
@@ -206,7 +245,6 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n)
{
i->i_klist = k;
- i->i_head = &k->k_list;
i->i_cur = n;
if (n)
kref_get(&n->n_ref);
@@ -237,7 +275,7 @@ EXPORT_SYMBOL_GPL(klist_iter_init);
void klist_iter_exit(struct klist_iter *i)
{
if (i->i_cur) {
- klist_del(i->i_cur);
+ klist_put(i->i_cur, false);
i->i_cur = NULL;
}
}
@@ -258,27 +296,33 @@ static struct klist_node *to_klist_node(struct list_head *n)
*/
struct klist_node *klist_next(struct klist_iter *i)
{
- struct list_head *next;
- struct klist_node *lnode = i->i_cur;
- struct klist_node *knode = NULL;
void (*put)(struct klist_node *) = i->i_klist->put;
+ struct klist_node *last = i->i_cur;
+ struct klist_node *next;
spin_lock(&i->i_klist->k_lock);
- if (lnode) {
- next = lnode->n_node.next;
- if (!klist_dec_and_del(lnode))
+
+ if (last) {
+ next = to_klist_node(last->n_node.next);
+ if (!klist_dec_and_del(last))
put = NULL;
} else
- next = i->i_head->next;
+ next = to_klist_node(i->i_klist->k_list.next);
- if (next != i->i_head) {
- knode = to_klist_node(next);
- kref_get(&knode->n_ref);
+ i->i_cur = NULL;
+ while (next != to_klist_node(&i->i_klist->k_list)) {
+ if (likely(!knode_dead(next))) {
+ kref_get(&next->n_ref);
+ i->i_cur = next;
+ break;
+ }
+ next = to_klist_node(next->n_node.next);
}
- i->i_cur = knode;
+
spin_unlock(&i->i_klist->k_lock);
- if (put && lnode)
- put(lnode);
- return knode;
+
+ if (put && last)
+ put(last);
+ return i->i_cur;
}
EXPORT_SYMBOL_GPL(klist_next);
diff --git a/lib/kobject.c b/lib/kobject.c
index bd732ffebc85..fbf0ae282376 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -223,8 +223,7 @@ static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
return -ENOMEM;
/* ewww... some of these buggers have '/' in the name ... */
- s = strchr(kobj->name, '/');
- if (s)
+ while ((s = strchr(kobj->name, '/')))
s[0] = '!';
kfree(old_name);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 9f8d599459d1..3f914725bda8 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -285,8 +285,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
int len;
if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
- printk(KERN_ERR "add_uevent_var: too many keys\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
return -ENOMEM;
}
@@ -297,8 +296,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
va_end(args);
if (len >= (sizeof(env->buf) - env->buflen)) {
- printk(KERN_ERR "add_uevent_var: buffer size too small\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
return -ENOMEM;
}
diff --git a/lib/lmb.c b/lib/lmb.c
index 5d7b9286503e..97e547037084 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -462,6 +462,8 @@ void __init lmb_enforce_memory_limit(u64 memory_limit)
if (lmb.memory.region[0].size < lmb.rmo_size)
lmb.rmo_size = lmb.memory.region[0].size;
+ memory_limit = lmb_end_of_DRAM();
+
/* And truncate any reserves above the limit also. */
for (i = 0; i < lmb.reserved.cnt; i++) {
p = &lmb.reserved.region[i];
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 4a8ba4bf5f6f..a8663890a88c 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
-s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
@@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
- if (set)
- *pcount = 0;
+ *pcount = 0;
}
- if (set)
- fbc->count = ret;
+ fbc->count = ret;
spin_unlock(&fbc->lock);
return ret;
diff --git a/lib/plist.c b/lib/plist.c
index 3074a02272f3..d6c64a824e1d 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -31,12 +31,13 @@
static void plist_check_prev_next(struct list_head *t, struct list_head *p,
struct list_head *n)
{
- if (n->prev != p || p->next != n) {
- printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev);
- printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev);
- printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev);
- WARN_ON(1);
- }
+ WARN(n->prev != p || p->next != n,
+ "top: %p, n: %p, p: %p\n"
+ "prev: %p, n: %p, p: %p\n"
+ "next: %p, n: %p, p: %p\n",
+ t, t->next, t->prev,
+ p, p->next, p->prev,
+ n, n->next, n->prev);
}
static void plist_check_list(struct list_head *top)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 56ec21a7f73d..be86b32bc874 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -359,18 +359,17 @@ EXPORT_SYMBOL(radix_tree_insert);
* Returns: the slot corresponding to the position @index in the
* radix tree @root. This is useful for update-if-exists operations.
*
- * This function cannot be called under rcu_read_lock, it must be
- * excluded from writers, as must the returned slot for subsequent
- * use by radix_tree_deref_slot() and radix_tree_replace slot.
- * Caller must hold tree write locked across slot lookup and
- * replace.
+ * This function can be called under rcu_read_lock iff the slot is not
+ * modified by radix_tree_replace_slot, otherwise it must be called
+ * exclusive from other writers. Any dereference of the slot must be done
+ * using radix_tree_deref_slot.
*/
void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
{
unsigned int height, shift;
struct radix_tree_node *node, **slot;
- node = root->rnode;
+ node = rcu_dereference(root->rnode);
if (node == NULL)
return NULL;
@@ -390,7 +389,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
do {
slot = (struct radix_tree_node **)
(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
- node = *slot;
+ node = rcu_dereference(*slot);
if (node == NULL)
return NULL;
@@ -667,7 +666,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
EXPORT_SYMBOL(radix_tree_next_hole);
static unsigned int
-__lookup(struct radix_tree_node *slot, void **results, unsigned long index,
+__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
unsigned int max_items, unsigned long *next_index)
{
unsigned int nr_found = 0;
@@ -701,11 +700,9 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index,
/* Bottom level: grab some items */
for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
- struct radix_tree_node *node;
index++;
- node = slot->slots[i];
- if (node) {
- results[nr_found++] = rcu_dereference(node);
+ if (slot->slots[i]) {
+ results[nr_found++] = &(slot->slots[i]);
if (nr_found == max_items)
goto out;
}
@@ -759,13 +756,22 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
ret = 0;
while (ret < max_items) {
- unsigned int nr_found;
+ unsigned int nr_found, slots_found, i;
unsigned long next_index; /* Index of next search */
if (cur_index > max_index)
break;
- nr_found = __lookup(node, results + ret, cur_index,
+ slots_found = __lookup(node, (void ***)results + ret, cur_index,
max_items - ret, &next_index);
+ nr_found = 0;
+ for (i = 0; i < slots_found; i++) {
+ struct radix_tree_node *slot;
+ slot = *(((void ***)results)[ret + i]);
+ if (!slot)
+ continue;
+ results[ret + nr_found] = rcu_dereference(slot);
+ nr_found++;
+ }
ret += nr_found;
if (next_index == 0)
break;
@@ -776,12 +782,71 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
}
EXPORT_SYMBOL(radix_tree_gang_lookup);
+/**
+ * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
+ * @root: radix tree root
+ * @results: where the results of the lookup are placed
+ * @first_index: start the lookup from this key
+ * @max_items: place up to this many items at *results
+ *
+ * Performs an index-ascending scan of the tree for present items. Places
+ * their slots at *@results and returns the number of items which were
+ * placed at *@results.
+ *
+ * The implementation is naive.
+ *
+ * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
+ * be dereferenced with radix_tree_deref_slot, and if using only RCU
+ * protection, radix_tree_deref_slot may fail requiring a retry.
+ */
+unsigned int
+radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items)
+{
+ unsigned long max_index;
+ struct radix_tree_node *node;
+ unsigned long cur_index = first_index;
+ unsigned int ret;
+
+ node = rcu_dereference(root->rnode);
+ if (!node)
+ return 0;
+
+ if (!radix_tree_is_indirect_ptr(node)) {
+ if (first_index > 0)
+ return 0;
+ results[0] = (void **)&root->rnode;
+ return 1;
+ }
+ node = radix_tree_indirect_to_ptr(node);
+
+ max_index = radix_tree_maxindex(node->height);
+
+ ret = 0;
+ while (ret < max_items) {
+ unsigned int slots_found;
+ unsigned long next_index; /* Index of next search */
+
+ if (cur_index > max_index)
+ break;
+ slots_found = __lookup(node, results + ret, cur_index,
+ max_items - ret, &next_index);
+ ret += slots_found;
+ if (next_index == 0)
+ break;
+ cur_index = next_index;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
+
/*
* FIXME: the two tag_get()s here should use find_next_bit() instead of
* open-coding the search.
*/
static unsigned int
-__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
+__lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
unsigned int max_items, unsigned long *next_index, unsigned int tag)
{
unsigned int nr_found = 0;
@@ -811,11 +876,9 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
unsigned long j = index & RADIX_TREE_MAP_MASK;
for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
- struct radix_tree_node *node;
index++;
if (!tag_get(slot, tag, j))
continue;
- node = slot->slots[j];
/*
* Even though the tag was found set, we need to
* recheck that we have a non-NULL node, because
@@ -826,9 +889,8 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
* lookup ->slots[x] without a lock (ie. can't
* rely on its value remaining the same).
*/
- if (node) {
- node = rcu_dereference(node);
- results[nr_found++] = node;
+ if (slot->slots[j]) {
+ results[nr_found++] = &(slot->slots[j]);
if (nr_found == max_items)
goto out;
}
@@ -887,13 +949,22 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
ret = 0;
while (ret < max_items) {
- unsigned int nr_found;
+ unsigned int nr_found, slots_found, i;
unsigned long next_index; /* Index of next search */
if (cur_index > max_index)
break;
- nr_found = __lookup_tag(node, results + ret, cur_index,
- max_items - ret, &next_index, tag);
+ slots_found = __lookup_tag(node, (void ***)results + ret,
+ cur_index, max_items - ret, &next_index, tag);
+ nr_found = 0;
+ for (i = 0; i < slots_found; i++) {
+ struct radix_tree_node *slot;
+ slot = *(((void ***)results)[ret + i]);
+ if (!slot)
+ continue;
+ results[ret + nr_found] = rcu_dereference(slot);
+ nr_found++;
+ }
ret += nr_found;
if (next_index == 0)
break;
@@ -905,6 +976,67 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
/**
+ * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
+ * radix tree based on a tag
+ * @root: radix tree root
+ * @results: where the results of the lookup are placed
+ * @first_index: start the lookup from this key
+ * @max_items: place up to this many items at *results
+ * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
+ *
+ * Performs an index-ascending scan of the tree for present items which
+ * have the tag indexed by @tag set. Places the slots at *@results and
+ * returns the number of slots which were placed at *@results.
+ */
+unsigned int
+radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items,
+ unsigned int tag)
+{
+ struct radix_tree_node *node;
+ unsigned long max_index;
+ unsigned long cur_index = first_index;
+ unsigned int ret;
+
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
+ node = rcu_dereference(root->rnode);
+ if (!node)
+ return 0;
+
+ if (!radix_tree_is_indirect_ptr(node)) {
+ if (first_index > 0)
+ return 0;
+ results[0] = (void **)&root->rnode;
+ return 1;
+ }
+ node = radix_tree_indirect_to_ptr(node);
+
+ max_index = radix_tree_maxindex(node->height);
+
+ ret = 0;
+ while (ret < max_items) {
+ unsigned int slots_found;
+ unsigned long next_index; /* Index of next search */
+
+ if (cur_index > max_index)
+ break;
+ slots_found = __lookup_tag(node, results + ret,
+ cur_index, max_items - ret, &next_index, tag);
+ ret += slots_found;
+ if (next_index == 0)
+ break;
+ cur_index = next_index;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
+
+
+/**
* radix_tree_shrink - shrink height of a radix tree to minimal
* @root radix tree root
*/
@@ -1051,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
EXPORT_SYMBOL(radix_tree_tagged);
static void
-radix_tree_node_ctor(struct kmem_cache *cachep, void *node)
+radix_tree_node_ctor(void *node)
{
memset(node, 0, sizeof(struct radix_tree_node));
}
diff --git a/lib/random32.c b/lib/random32.c
index ca87d86992bd..217d5c4b666d 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -56,23 +56,12 @@ static u32 __random32(struct rnd_state *state)
return (state->s1 ^ state->s2 ^ state->s3);
}
-static void __set_random32(struct rnd_state *state, unsigned long s)
+/*
+ * Handle minimum values for seeds
+ */
+static inline u32 __seed(u32 x, u32 m)
{
- if (s == 0)
- s = 1; /* default seed is 1 */
-
-#define LCG(n) (69069 * n)
- state->s1 = LCG(s);
- state->s2 = LCG(state->s1);
- state->s3 = LCG(state->s2);
-
- /* "warm it up" */
- __random32(state);
- __random32(state);
- __random32(state);
- __random32(state);
- __random32(state);
- __random32(state);
+ return (x < m) ? x + m : x;
}
/**
@@ -107,7 +96,7 @@ void srandom32(u32 entropy)
*/
for_each_possible_cpu (i) {
struct rnd_state *state = &per_cpu(net_rand_state, i);
- __set_random32(state, state->s1 ^ entropy);
+ state->s1 = __seed(state->s1 ^ entropy, 1);
}
}
EXPORT_SYMBOL(srandom32);
@@ -122,7 +111,19 @@ static int __init random32_init(void)
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
- __set_random32(state, i + jiffies);
+
+#define LCG(x) ((x) * 69069) /* super-duper LCG */
+ state->s1 = __seed(LCG(i + jiffies), 1);
+ state->s2 = __seed(LCG(state->s1), 7);
+ state->s3 = __seed(LCG(state->s2), 15);
+
+ /* "warm it up" */
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
}
return 0;
}
@@ -135,13 +136,18 @@ core_initcall(random32_init);
static int __init random32_reseed(void)
{
int i;
- unsigned long seed;
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
+ u32 seeds[3];
+
+ get_random_bytes(&seeds, sizeof(seeds));
+ state->s1 = __seed(seeds[0], 1);
+ state->s2 = __seed(seeds[1], 7);
+ state->s3 = __seed(seeds[2], 15);
- get_random_bytes(&seed, sizeof(seed));
- __set_random32(state, seed);
+ /* mix it in */
+ __random32(state);
}
return 0;
}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 35136671b215..26187edcc7ea 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
static DEFINE_SPINLOCK(ratelimit_lock);
-static unsigned long flags;
/*
* __ratelimit - rate limiting
@@ -26,6 +25,8 @@ static unsigned long flags;
*/
int __ratelimit(struct ratelimit_state *rs)
{
+ unsigned long flags;
+
if (!rs->interval)
return 1;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 876ba6d5b670..8d2688ff1352 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -422,9 +422,12 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
{
unsigned int offset = 0;
struct sg_mapping_iter miter;
+ unsigned long flags;
sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
+ local_irq_save(flags);
+
while (sg_miter_next(&miter) && offset < buflen) {
unsigned int len;
@@ -442,6 +445,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
sg_miter_stop(&miter);
+ local_irq_restore(flags);
return offset;
}
diff --git a/lib/show_mem.c b/lib/show_mem.c
new file mode 100644
index 000000000000..238e72a18ce1
--- /dev/null
+++ b/lib/show_mem.c
@@ -0,0 +1,63 @@
+/*
+ * Generic show_mem() implementation
+ *
+ * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
+ * All code subject to the GPL version 2.
+ */
+
+#include <linux/mm.h>
+#include <linux/nmi.h>
+#include <linux/quicklist.h>
+
+void show_mem(void)
+{
+ pg_data_t *pgdat;
+ unsigned long total = 0, reserved = 0, shared = 0,
+ nonshared = 0, highmem = 0;
+
+ printk(KERN_INFO "Mem-Info:\n");
+ show_free_areas();
+
+ for_each_online_pgdat(pgdat) {
+ unsigned long i, flags;
+
+ pgdat_resize_lock(pgdat, &flags);
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page;
+ unsigned long pfn = pgdat->node_start_pfn + i;
+
+ if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
+ touch_nmi_watchdog();
+
+ if (!pfn_valid(pfn))
+ continue;
+
+ page = pfn_to_page(pfn);
+
+ if (PageHighMem(page))
+ highmem++;
+
+ if (PageReserved(page))
+ reserved++;
+ else if (page_count(page) == 1)
+ nonshared++;
+ else if (page_count(page) > 1)
+ shared += page_count(page) - 1;
+
+ total++;
+ }
+ pgdat_resize_unlock(pgdat, &flags);
+ }
+
+ printk(KERN_INFO "%lu pages RAM\n", total);
+#ifdef CONFIG_HIGHMEM
+ printk(KERN_INFO "%lu pages HighMem\n", highmem);
+#endif
+ printk(KERN_INFO "%lu pages reserved\n", reserved);
+ printk(KERN_INFO "%lu pages shared\n", shared);
+ printk(KERN_INFO "%lu pages non-shared\n", nonshared);
+#ifdef CONFIG_QUICKLIST
+ printk(KERN_INFO "%lu pages in pagetable cache\n",
+ quicklist_total_size());
+#endif
+}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index c4381d9516f6..0f8fc22ed103 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -11,7 +11,6 @@ notrace unsigned int debug_smp_processor_id(void)
{
unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
- cpumask_of_cpu_ptr_declare(this_mask);
if (likely(preempt_count))
goto out;
@@ -23,9 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- cpumask_of_cpu_ptr_next(this_mask, this_cpu);
-
- if (cpus_equal(current->cpus_allowed, *this_mask))
+ if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
goto out;
/*
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
new file mode 100644
index 000000000000..8347925030ff
--- /dev/null
+++ b/lib/string_helpers.c
@@ -0,0 +1,64 @@
+/*
+ * Helpers for formatting and printing strings
+ *
+ * Copyright 31 August 2008 James Bottomley
+ */
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/string_helpers.h>
+
+/**
+ * string_get_size - get the size in the specified units
+ * @size: The size to be converted
+ * @units: units to use (powers of 1000 or 1024)
+ * @buf: buffer to format to
+ * @len: length of buffer
+ *
+ * This function returns a string formatted to 3 significant figures
+ * giving the size in the required units. Returns 0 on success or
+ * error on failure. @buf is always zero terminated.
+ *
+ */
+int string_get_size(u64 size, const enum string_size_units units,
+ char *buf, int len)
+{
+ const char *units_10[] = { "B", "KB", "MB", "GB", "TB", "PB",
+ "EB", "ZB", "YB", NULL};
+ const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
+ "EiB", "ZiB", "YiB", NULL };
+ const char **units_str[] = {
+ [STRING_UNITS_10] = units_10,
+ [STRING_UNITS_2] = units_2,
+ };
+ const int divisor[] = {
+ [STRING_UNITS_10] = 1000,
+ [STRING_UNITS_2] = 1024,
+ };
+ int i, j;
+ u64 remainder = 0, sf_cap;
+ char tmp[8];
+
+ tmp[0] = '\0';
+
+ for (i = 0; size > divisor[units] && units_str[units][i]; i++)
+ remainder = do_div(size, divisor[units]);
+
+ sf_cap = size;
+ for (j = 0; sf_cap*10 < 1000; j++)
+ sf_cap *= 10;
+
+ if (j) {
+ remainder *= 1000;
+ do_div(remainder, divisor[units]);
+ snprintf(tmp, sizeof(tmp), ".%03lld",
+ (unsigned long long)remainder);
+ tmp[j+1] = '\0';
+ }
+
+ snprintf(buf, len, "%lld%s%s", (unsigned long long)size,
+ tmp, units_str[units][i]);
+
+ return 0;
+}
+EXPORT_SYMBOL(string_get_size);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index d568894df8cc..f8eebd489149 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -274,13 +274,14 @@ cleanup1:
}
static int
-address_needs_mapping(struct device *hwdev, dma_addr_t addr)
+address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
{
- dma_addr_t mask = 0xffffffff;
- /* If the device has a mask, use it, otherwise default to 32 bits */
- if (hwdev && hwdev->dma_mask)
- mask = *hwdev->dma_mask;
- return (addr & ~mask) != 0;
+ return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
+}
+
+static int is_swiotlb_buffer(char *addr)
+{
+ return addr >= io_tlb_start && addr < io_tlb_end;
}
/*
@@ -467,15 +468,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
void *ret;
int order = get_order(size);
- /*
- * XXX fix me: the DMA API should pass us an explicit DMA mask
- * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
- * bit range instead of a 16MB one).
- */
- flags |= GFP_DMA;
-
ret = (void *)__get_free_pages(flags, order);
- if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
+ if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
@@ -490,19 +484,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
* swiotlb_map_single(), which will grab memory from
* the lowest available address range.
*/
- dma_addr_t handle;
- handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
- if (swiotlb_dma_mapping_error(handle))
+ ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
+ if (!ret)
return NULL;
-
- ret = bus_to_virt(handle);
}
memset(ret, 0, size);
dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
- if (address_needs_mapping(hwdev, dev_addr)) {
+ if (address_needs_mapping(hwdev, dev_addr, size)) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)*hwdev->dma_mask,
(unsigned long long)dev_addr);
@@ -518,12 +509,11 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
WARN_ON(irqs_disabled());
- if (!(vaddr >= (void *)io_tlb_start
- && vaddr < (void *)io_tlb_end))
+ if (!is_swiotlb_buffer(vaddr))
free_pages((unsigned long) vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
- swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
+ unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
}
static void
@@ -567,7 +557,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+ if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
return dev_addr;
/*
@@ -584,7 +574,7 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
/*
* Ensure that the address returned is DMA'ble
*/
- if (address_needs_mapping(hwdev, dev_addr))
+ if (address_needs_mapping(hwdev, dev_addr, size))
panic("map_single: bounce buffer is not DMA'ble");
return dev_addr;
@@ -612,7 +602,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
unmap_single(hwdev, dma_addr, size, dir);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -642,7 +632,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -673,7 +663,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
char *dma_addr = bus_to_virt(dev_addr) + offset;
BUG_ON(dir == DMA_NONE);
- if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ if (is_swiotlb_buffer(dma_addr))
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(dma_addr, size);
@@ -727,7 +717,8 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
for_each_sg(sgl, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
dev_addr = virt_to_bus(addr);
- if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
+ if (swiotlb_force ||
+ address_needs_mapping(hwdev, dev_addr, sg->length)) {
void *map = map_single(hwdev, addr, sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users
@@ -824,7 +815,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
}
int
-swiotlb_dma_mapping_error(dma_addr_t dma_addr)
+swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
}
diff --git a/lib/syscall.c b/lib/syscall.c
new file mode 100644
index 000000000000..a4f7067f72fa
--- /dev/null
+++ b/lib/syscall.c
@@ -0,0 +1,75 @@
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <asm/syscall.h>
+
+static int collect_syscall(struct task_struct *target, long *callno,
+ unsigned long args[6], unsigned int maxargs,
+ unsigned long *sp, unsigned long *pc)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ if (unlikely(!regs))
+ return -EAGAIN;
+
+ *sp = user_stack_pointer(regs);
+ *pc = instruction_pointer(regs);
+
+ *callno = syscall_get_nr(target, regs);
+ if (*callno != -1L && maxargs > 0)
+ syscall_get_arguments(target, regs, 0, maxargs, args);
+
+ return 0;
+}
+
+/**
+ * task_current_syscall - Discover what a blocked task is doing.
+ * @target: thread to examine
+ * @callno: filled with system call number or -1
+ * @args: filled with @maxargs system call arguments
+ * @maxargs: number of elements in @args to fill
+ * @sp: filled with user stack pointer
+ * @pc: filled with user PC
+ *
+ * If @target is blocked in a system call, returns zero with *@callno
+ * set to the the call's number and @args filled in with its arguments.
+ * Registers not used for system call arguments may not be available and
+ * it is not kosher to use &struct user_regset calls while the system
+ * call is still in progress. Note we may get this result if @target
+ * has finished its system call but not yet returned to user mode, such
+ * as when it's stopped for signal handling or syscall exit tracing.
+ *
+ * If @target is blocked in the kernel during a fault or exception,
+ * returns zero with *@callno set to -1 and does not fill in @args.
+ * If so, it's now safe to examine @target using &struct user_regset
+ * get() calls as long as we're sure @target won't return to user mode.
+ *
+ * Returns -%EAGAIN if @target does not remain blocked.
+ *
+ * Returns -%EINVAL if @maxargs is too large (maximum is six).
+ */
+int task_current_syscall(struct task_struct *target, long *callno,
+ unsigned long args[6], unsigned int maxargs,
+ unsigned long *sp, unsigned long *pc)
+{
+ long state;
+ unsigned long ncsw;
+
+ if (unlikely(maxargs > 6))
+ return -EINVAL;
+
+ if (target == current)
+ return collect_syscall(target, callno, args, maxargs, sp, pc);
+
+ state = target->state;
+ if (unlikely(!state))
+ return -EAGAIN;
+
+ ncsw = wait_task_inactive(target, state);
+ if (unlikely(!ncsw) ||
+ unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
+ unlikely(wait_task_inactive(target, state) != ncsw))
+ return -EAGAIN;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(task_current_syscall);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 1dc2d1d18fa8..c399bc1093cb 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -27,6 +27,7 @@
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/div64.h>
+#include <asm/sections.h> /* for dereference_function_descriptor() */
/* Works only for digits and letters, but small and fast */
#define TOLOWER(x) ((x) | 0x20)
@@ -220,7 +221,7 @@ int strict_strtou##type(const char *cp, unsigned int base, valtype *res)\
if (len == 0) \
return -EINVAL; \
\
- val = simple_strtoul(cp, &tail, base); \
+ val = simple_strtou##type(cp, &tail, base); \
if ((*tail == '\0') || \
((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {\
*res = val; \
@@ -513,16 +514,6 @@ static char *string(char *buf, char *end, char *s, int field_width, int precisio
return buf;
}
-static inline void *dereference_function_descriptor(void *ptr)
-{
-#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
- void *p;
- if (!probe_kernel_address(ptr, p))
- ptr = p;
-#endif
- return ptr;
-}
-
static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags)
{
unsigned long value = (unsigned long) ptr;