diff options
author | David S. Miller <davem@davemloft.net> | 2009-09-24 15:13:11 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-09-24 15:13:11 -0700 |
commit | 8b3f6af86378d0a10ca2f1ded1da124aef13b62c (patch) | |
tree | de6ca90295730343c495be8d98be8efa322140ef /lib | |
parent | 139d6065c83071d5f66cd013a274a43699f8e2c1 (diff) | |
parent | 94e0fb086fc5663c38bbc0fe86d698be8314f82f (diff) | |
download | lwn-8b3f6af86378d0a10ca2f1ded1da124aef13b62c.tar.gz lwn-8b3f6af86378d0a10ca2f1ded1da124aef13b62c.zip |
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Conflicts:
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/cpc-usb/TODO
drivers/staging/cpc-usb/cpc-usb_drv.c
drivers/staging/cpc-usb/cpc.h
drivers/staging/cpc-usb/cpc_int.h
drivers/staging/cpc-usb/cpcusb.h
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 25 | ||||
-rw-r--r-- | lib/Kconfig.kmemcheck | 3 | ||||
-rw-r--r-- | lib/decompress_inflate.c | 8 | ||||
-rw-r--r-- | lib/decompress_unlzma.c | 10 | ||||
-rw-r--r-- | lib/flex_array.c | 121 | ||||
-rw-r--r-- | lib/inflate.c | 2 | ||||
-rw-r--r-- | lib/vsprintf.c | 30 | ||||
-rw-r--r-- | lib/zlib_deflate/deflate.c | 4 |
8 files changed, 147 insertions, 56 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7dbd5d9c29a4..891155817bc6 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -50,6 +50,14 @@ config MAGIC_SYSRQ keys are documented in <file:Documentation/sysrq.txt>. Don't say Y unless you really know what this hack does. +config STRIP_ASM_SYMS + bool "Strip assembler-generated symbols during link" + default n + help + Strip internal assembler-generated symbols during a link (symbols + that look like '.Lxxx') so they don't pollute the output of + get_wchan() and suchlike. + config UNUSED_SYMBOLS bool "Enable unused/obsolete exported symbols" default y if X86 @@ -338,7 +346,7 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" - depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ + depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM || PPC) && \ !MEMORY_HOTPLUG select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT @@ -805,6 +813,21 @@ config DEBUG_BLOCK_EXT_DEVT Say N if you are unsure. +config DEBUG_FORCE_WEAK_PER_CPU + bool "Force weak per-cpu definitions" + depends on DEBUG_KERNEL + help + s390 and alpha require percpu variables in modules to be + defined weak to work around addressing range issue which + puts the following two restrictions on percpu variable + definitions. + + 1. percpu symbols must be unique whether static or not + 2. percpu variables can't be defined inside a function + + To ensure that generic code follows the above rules, this + option forces all percpu variables to be defined as weak. + config LKDTM tristate "Linux Kernel Dump Test Tool Module" depends on DEBUG_KERNEL diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck index 603c81b66549..846e039a86b4 100644 --- a/lib/Kconfig.kmemcheck +++ b/lib/Kconfig.kmemcheck @@ -1,6 +1,8 @@ config HAVE_ARCH_KMEMCHECK bool +if HAVE_ARCH_KMEMCHECK + menuconfig KMEMCHECK bool "kmemcheck: trap use of uninitialized memory" depends on DEBUG_KERNEL @@ -89,3 +91,4 @@ config KMEMCHECK_BITOPS_OK accesses where not all the bits are initialized at the same time. This may also hide some real bugs. +endif diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index 68dfce59c1b8..fc686c7a0a0d 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c @@ -27,6 +27,11 @@ #define GZIP_IOBUF_SIZE (16*1024) +static int nofill(void *buffer, unsigned int len) +{ + return -1; +} + /* Included from initramfs et al code */ STATIC int INIT gunzip(unsigned char *buf, int len, int(*fill)(void*, unsigned int), @@ -76,6 +81,9 @@ STATIC int INIT gunzip(unsigned char *buf, int len, goto gunzip_nomem4; } + if (!fill) + fill = nofill; + if (len == 0) len = fill(zbuf, GZIP_IOBUF_SIZE); diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c index 0b954e04bd30..ca82fde81c8f 100644 --- a/lib/decompress_unlzma.c +++ b/lib/decompress_unlzma.c @@ -82,6 +82,11 @@ struct rc { #define RC_MODEL_TOTAL_BITS 11 +static int nofill(void *buffer, unsigned int len) +{ + return -1; +} + /* Called twice: once at startup and once in rc_normalize() */ static void INIT rc_read(struct rc *rc) { @@ -97,7 +102,10 @@ static inline void INIT rc_init(struct rc *rc, int (*fill)(void*, unsigned int), char *buffer, int buffer_size) { - rc->fill = fill; + if (fill) + rc->fill = fill; + else + rc->fill = nofill; rc->buffer = (uint8_t *)buffer; rc->buffer_size = buffer_size; rc->buffer_end = rc->buffer + rc->buffer_size; diff --git a/lib/flex_array.c b/lib/flex_array.c index 7baed2fc3bc8..66eef2e4483e 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c @@ -28,23 +28,6 @@ struct flex_array_part { char elements[FLEX_ARRAY_PART_SIZE]; }; -static inline int __elements_per_part(int element_size) -{ - return FLEX_ARRAY_PART_SIZE / element_size; -} - -static inline int bytes_left_in_base(void) -{ - int element_offset = offsetof(struct flex_array, parts); - int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset; - return bytes_left; -} - -static inline int nr_base_part_ptrs(void) -{ - return bytes_left_in_base() / sizeof(struct flex_array_part *); -} - /* * If a user requests an allocation which is small * enough, we may simply use the space in the @@ -54,7 +37,7 @@ static inline int nr_base_part_ptrs(void) static inline int elements_fit_in_base(struct flex_array *fa) { int data_size = fa->element_size * fa->total_nr_elements; - if (data_size <= bytes_left_in_base()) + if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT) return 1; return 0; } @@ -63,6 +46,7 @@ static inline int elements_fit_in_base(struct flex_array *fa) * flex_array_alloc - allocate a new flexible array * @element_size: the size of individual elements in the array * @total: total number of elements that this should hold + * @flags: page allocation flags to use for base array * * Note: all locking must be provided by the caller. * @@ -103,7 +87,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, gfp_t flags) { struct flex_array *ret; - int max_size = nr_base_part_ptrs() * __elements_per_part(element_size); + int max_size = FLEX_ARRAY_NR_BASE_PTRS * + FLEX_ARRAY_ELEMENTS_PER_PART(element_size); /* max_size will end up 0 if element_size > PAGE_SIZE */ if (total > max_size) @@ -113,17 +98,21 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, return NULL; ret->element_size = element_size; ret->total_nr_elements = total; + if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) + memset(ret->parts[0], FLEX_ARRAY_FREE, + FLEX_ARRAY_BASE_BYTES_LEFT); return ret; } static int fa_element_to_part_nr(struct flex_array *fa, unsigned int element_nr) { - return element_nr / __elements_per_part(fa->element_size); + return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); } /** * flex_array_free_parts - just free the second-level pages + * @fa: the flex array from which to free parts * * This is to be used in cases where the base 'struct flex_array' * has been statically allocated and should not be free. @@ -131,11 +120,10 @@ static int fa_element_to_part_nr(struct flex_array *fa, void flex_array_free_parts(struct flex_array *fa) { int part_nr; - int max_part = nr_base_part_ptrs(); if (elements_fit_in_base(fa)) return; - for (part_nr = 0; part_nr < max_part; part_nr++) + for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) kfree(fa->parts[part_nr]); } @@ -150,7 +138,8 @@ static unsigned int index_inside_part(struct flex_array *fa, { unsigned int part_offset; - part_offset = element_nr % __elements_per_part(fa->element_size); + part_offset = element_nr % + FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); return part_offset * fa->element_size; } @@ -159,15 +148,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) { struct flex_array_part *part = fa->parts[part_nr]; if (!part) { - /* - * This leaves the part pages uninitialized - * and with potentially random data, just - * as if the user had kmalloc()'d the whole. - * __GFP_ZERO can be used to zero it. - */ - part = kmalloc(FLEX_ARRAY_PART_SIZE, flags); + part = kmalloc(sizeof(struct flex_array_part), flags); if (!part) return NULL; + if (!(flags & __GFP_ZERO)) + memset(part, FLEX_ARRAY_FREE, + sizeof(struct flex_array_part)); fa->parts[part_nr] = part; } return part; @@ -175,9 +161,12 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) /** * flex_array_put - copy data into the array at @element_nr - * @src: address of data to copy into the array + * @fa: the flex array to copy data into * @element_nr: index of the position in which to insert * the new element. + * @src: address of data to copy into the array + * @flags: page allocation flags to use for array expansion + * * * Note that this *copies* the contents of @src into * the array. If you are trying to store an array of @@ -207,9 +196,38 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, } /** + * flex_array_clear - clear element in array at @element_nr + * @fa: the flex array of the element. + * @element_nr: index of the position to clear. + * + * Locking must be provided by the caller. + */ +int flex_array_clear(struct flex_array *fa, unsigned int element_nr) +{ + int part_nr = fa_element_to_part_nr(fa, element_nr); + struct flex_array_part *part; + void *dst; + + if (element_nr >= fa->total_nr_elements) + return -ENOSPC; + if (elements_fit_in_base(fa)) + part = (struct flex_array_part *)&fa->parts[0]; + else { + part = fa->parts[part_nr]; + if (!part) + return -EINVAL; + } + dst = &part->elements[index_inside_part(fa, element_nr)]; + memset(dst, FLEX_ARRAY_FREE, fa->element_size); + return 0; +} + +/** * flex_array_prealloc - guarantee that array space exists + * @fa: the flex array for which to preallocate parts * @start: index of first array element for which space is allocated * @end: index of last (inclusive) element for which space is allocated + * @flags: page allocation flags * * This will guarantee that no future calls to flex_array_put() * will allocate memory. It can be used if you are expecting to @@ -242,6 +260,7 @@ int flex_array_prealloc(struct flex_array *fa, unsigned int start, /** * flex_array_get - pull data back out of the array + * @fa: the flex array from which to extract data * @element_nr: index of the element to fetch from the array * * Returns a pointer to the data at index @element_nr. Note @@ -266,3 +285,43 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr) } return &part->elements[index_inside_part(fa, element_nr)]; } + +static int part_is_free(struct flex_array_part *part) +{ + int i; + + for (i = 0; i < sizeof(struct flex_array_part); i++) + if (part->elements[i] != FLEX_ARRAY_FREE) + return 0; + return 1; +} + +/** + * flex_array_shrink - free unused second-level pages + * @fa: the flex array to shrink + * + * Frees all second-level pages that consist solely of unused + * elements. Returns the number of pages freed. + * + * Locking must be provided by the caller. + */ +int flex_array_shrink(struct flex_array *fa) +{ + struct flex_array_part *part; + int part_nr; + int ret = 0; + + if (elements_fit_in_base(fa)) + return ret; + for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) { + part = fa->parts[part_nr]; + if (!part) + continue; + if (part_is_free(part)) { + fa->parts[part_nr] = NULL; + kfree(part); + ret++; + } + } + return ret; +} diff --git a/lib/inflate.c b/lib/inflate.c index 1a8e8a978128..d10255973a9f 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -7,7 +7,7 @@ * Adapted for booting Linux by Hannu Savolainen 1993 * based on gzip-1.0.3 * - * Nicolas Pitre <nico@cam.org>, 1999/04/14 : + * Nicolas Pitre <nico@fluxnic.net>, 1999/04/14 : * Little mods for all variable to reside either into rodata or bss segments * by marking constant variables with 'const' and initializing all the others * at run-time only. This allows for the kernel uncompressor to run diff --git a/lib/vsprintf.c b/lib/vsprintf.c index a1941f8d205f..b91839e9e892 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -581,7 +581,7 @@ static char *symbol_string(char *buf, char *end, void *ptr, unsigned long value = (unsigned long) ptr; #ifdef CONFIG_KALLSYMS char sym[KSYM_SYMBOL_LEN]; - if (ext != 'f') + if (ext != 'f' && ext != 's') sprint_symbol(sym, value); else kallsyms_lookup(value, NULL, NULL, NULL, sym); @@ -799,7 +799,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, * * - 'F' For symbolic function descriptor pointers with offset * - 'f' For simple symbolic function names without offset - * - 'S' For symbolic direct pointers + * - 'S' For symbolic direct pointers with offset + * - 's' For symbolic direct pointers without offset * - 'R' For a struct resource pointer, it prints the range of * addresses (not the name nor the flags) * - 'M' For a 6-byte MAC address, it prints the address in the @@ -827,6 +828,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'F': case 'f': ptr = dereference_function_descriptor(ptr); + case 's': /* Fallthrough */ case 'S': return symbol_string(buf, end, ptr, spec, *fmt); @@ -1068,10 +1070,12 @@ qualifier: * @args: Arguments for the format string * * This function follows C99 vsnprintf, but has some extensions: - * %pS output the name of a text symbol + * %pS output the name of a text symbol with offset + * %ps output the name of a text symbol without offset * %pF output the name of a function pointer with its offset * %pf output the name of a function pointer without its offset * %pR output the address range in a struct resource + * %n is ignored * * The return value is the number of characters which would * be generated for the given input, excluding the trailing @@ -1093,13 +1097,8 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) /* Reject out-of-range values early. Large positive sizes are used for unknown buffer sizes. */ - if (unlikely((int) size < 0)) { - /* There can be only one.. */ - static char warn = 1; - WARN_ON(warn); - warn = 0; + if (WARN_ON_ONCE((int) size < 0)) return 0; - } str = buf; end = buf + size; @@ -1527,11 +1526,7 @@ EXPORT_SYMBOL_GPL(vbin_printf); * a binary buffer that generated by vbin_printf. * * The format follows C99 vsnprintf, but has some extensions: - * %pS output the name of a text symbol - * %pF output the name of a function pointer with its offset - * %pf output the name of a function pointer without its offset - * %pR output the address range in a struct resource - * %n is ignored + * see vsnprintf comment for details. * * The return value is the number of characters which would * be generated for the given input, excluding the trailing @@ -1549,13 +1544,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) struct printf_spec spec = {0}; - if (unlikely((int) size < 0)) { - /* There can be only one.. */ - static char warn = 1; - WARN_ON(warn); - warn = 0; + if (WARN_ON_ONCE((int) size < 0)) return 0; - } str = buf; end = buf + size; diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c index c3e4a2baf835..46a31e5f49c3 100644 --- a/lib/zlib_deflate/deflate.c +++ b/lib/zlib_deflate/deflate.c @@ -135,7 +135,7 @@ static const config configuration_table[10] = { /* =========================================================================== * Update a hash value with the given input byte - * IN assertion: all calls to to UPDATE_HASH are made with consecutive + * IN assertion: all calls to UPDATE_HASH are made with consecutive * input characters, so that a running hash key can be computed from the * previous key instead of complete recalculation each time. */ @@ -146,7 +146,7 @@ static const config configuration_table[10] = { * Insert string str in the dictionary and set match_head to the previous head * of the hash chain (the most recent string with same hash key). Return * the previous length of the hash chain. - * IN assertion: all calls to to INSERT_STRING are made with consecutive + * IN assertion: all calls to INSERT_STRING are made with consecutive * input characters and the first MIN_MATCH bytes of str are valid * (except for the last MIN_MATCH-1 bytes of the input file). */ |