diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 34 | ||||
-rw-r--r-- | lib/assoc_array.c | 2 | ||||
-rw-r--r-- | lib/debugobjects.c | 3 | ||||
-rw-r--r-- | lib/fault-inject.c | 8 | ||||
-rw-r--r-- | lib/locking-selftest.c | 123 | ||||
-rw-r--r-- | lib/mpi/mpicoder.c | 4 | ||||
-rw-r--r-- | lib/radix-tree.c | 1 | ||||
-rw-r--r-- | lib/swiotlb.c | 57 | ||||
-rw-r--r-- | lib/test_kmod.c | 16 |
9 files changed, 222 insertions, 26 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 98fe715522e8..7396f5044397 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -374,6 +374,9 @@ config STACK_VALIDATION pointers (if CONFIG_FRAME_POINTER is enabled). This helps ensure that runtime stack traces are more reliable. + This is also a prerequisite for generation of ORC unwind data, which + is needed for CONFIG_ORC_UNWINDER. + For more information, see tools/objtool/Documentation/stack-validation.txt. @@ -798,6 +801,13 @@ config HARDLOCKUP_DETECTOR_PERF select SOFTLOCKUP_DETECTOR # +# Enables a timestamp based low pass filter to compensate for perf based +# hard lockup detection which runs too fast due to turbo modes. +# +config HARDLOCKUP_CHECK_TIMESTAMP + bool + +# # arch/ can define HAVE_HARDLOCKUP_DETECTOR_ARCH to provide their own hard # lockup detector rather than the perf based detector. # @@ -1081,6 +1091,8 @@ config PROVE_LOCKING select DEBUG_MUTEXES select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_LOCK_ALLOC + select LOCKDEP_CROSSRELEASE + select LOCKDEP_COMPLETIONS select TRACE_IRQFLAGS default n help @@ -1121,7 +1133,7 @@ config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select STACKTRACE - select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE + select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE && !X86 select KALLSYMS select KALLSYMS_ALL @@ -1150,6 +1162,22 @@ config LOCK_STAT CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. (CONFIG_LOCKDEP defines "acquire" and "release" events.) +config LOCKDEP_CROSSRELEASE + bool + help + This makes lockdep work for crosslock which is a lock allowed to + be released in a different context from the acquisition context. + Normally a lock must be released in the context acquiring the lock. + However, relexing this constraint helps synchronization primitives + such as page locks or completions can use the lock correctness + detector, lockdep. + +config LOCKDEP_COMPLETIONS + bool + help + A deadlock caused by wait_for_completion() and complete() can be + detected by lockdep using crossrelease feature. + config DEBUG_LOCKDEP bool "Lock dependency engine debugging" depends on DEBUG_KERNEL && LOCKDEP @@ -1540,7 +1568,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT depends on !X86_64 select STACKTRACE - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE && !X86 help Provide stacktrace filter for fault-injection capabilities @@ -1549,7 +1577,7 @@ config LATENCYTOP depends on DEBUG_KERNEL depends on STACKTRACE_SUPPORT depends on PROC_FS - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86 select KALLSYMS select KALLSYMS_ALL select STACKTRACE diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 59fd7c0b119c..155c55d8db5f 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -1,6 +1,6 @@ /* Generic associative array implementation. * - * See Documentation/assoc_array.txt for information. + * See Documentation/core-api/assoc_array.rst for information. * * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 17afb0430161..2f5349c6e81a 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -18,6 +18,7 @@ #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/hash.h> +#include <linux/kmemleak.h> #define ODEBUG_HASH_BITS 14 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) @@ -110,6 +111,7 @@ static void fill_pool(void) if (!new) return; + kmemleak_ignore(new); raw_spin_lock_irqsave(&pool_lock, flags); hlist_add_head(&new->node, &obj_pool); debug_objects_allocated++; @@ -1080,6 +1082,7 @@ static int __init debug_objects_replace_static_objects(void) obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); if (!obj) goto free; + kmemleak_ignore(obj); hlist_add_head(&obj->node, &objects); } diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 7d315fdb9f13..cf7b129b0b2b 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -110,10 +110,12 @@ bool should_fail(struct fault_attr *attr, ssize_t size) if (in_task()) { unsigned int fail_nth = READ_ONCE(current->fail_nth); - if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1)) - goto fail; + if (fail_nth) { + if (!WRITE_ONCE(current->fail_nth, fail_nth - 1)) + goto fail; - return false; + return false; + } } /* No need to check any other properties if the probability is 0 */ diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 6f2b135dc5e8..cd0b5c964bd0 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -363,6 +363,103 @@ static void rsem_AA3(void) } /* + * read_lock(A) + * spin_lock(B) + * spin_lock(B) + * write_lock(A) + */ +static void rlock_ABBA1(void) +{ + RL(X1); + L(Y1); + U(Y1); + RU(X1); + + L(Y1); + WL(X1); + WU(X1); + U(Y1); // should fail +} + +static void rwsem_ABBA1(void) +{ + RSL(X1); + ML(Y1); + MU(Y1); + RSU(X1); + + ML(Y1); + WSL(X1); + WSU(X1); + MU(Y1); // should fail +} + +/* + * read_lock(A) + * spin_lock(B) + * spin_lock(B) + * read_lock(A) + */ +static void rlock_ABBA2(void) +{ + RL(X1); + L(Y1); + U(Y1); + RU(X1); + + L(Y1); + RL(X1); + RU(X1); + U(Y1); // should NOT fail +} + +static void rwsem_ABBA2(void) +{ + RSL(X1); + ML(Y1); + MU(Y1); + RSU(X1); + + ML(Y1); + RSL(X1); + RSU(X1); + MU(Y1); // should fail +} + + +/* + * write_lock(A) + * spin_lock(B) + * spin_lock(B) + * write_lock(A) + */ +static void rlock_ABBA3(void) +{ + WL(X1); + L(Y1); + U(Y1); + WU(X1); + + L(Y1); + WL(X1); + WU(X1); + U(Y1); // should fail +} + +static void rwsem_ABBA3(void) +{ + WSL(X1); + ML(Y1); + MU(Y1); + WSU(X1); + + ML(Y1); + WSL(X1); + WSU(X1); + MU(Y1); // should fail +} + +/* * ABBA deadlock: */ @@ -1056,8 +1153,6 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) if (debug_locks != expected) { unexpected_testcase_failures++; pr_cont("FAILED|"); - - dump_stack(); } else { testcase_successes++; pr_cont(" ok |"); @@ -1933,6 +2028,30 @@ void locking_selftest(void) dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM); pr_cont("\n"); + print_testname("mixed read-lock/lock-write ABBA"); + pr_cont(" |"); + dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK); + /* + * Lockdep does indeed fail here, but there's nothing we can do about + * that now. Don't kill lockdep for it. + */ + unexpected_testcase_failures--; + + pr_cont(" |"); + dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM); + + print_testname("mixed read-lock/lock-read ABBA"); + pr_cont(" |"); + dotest(rlock_ABBA2, SUCCESS, LOCKTYPE_RWLOCK); + pr_cont(" |"); + dotest(rwsem_ABBA2, FAILURE, LOCKTYPE_RWSEM); + + print_testname("mixed write-lock/lock-write ABBA"); + pr_cont(" |"); + dotest(rlock_ABBA3, FAILURE, LOCKTYPE_RWLOCK); + pr_cont(" |"); + dotest(rwsem_ABBA3, FAILURE, LOCKTYPE_RWSEM); + printk(" --------------------------------------------------------------------------\n"); /* diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 5a0f75a3bf01..eead4b339466 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -364,11 +364,11 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) } miter.consumed = lzeros; - sg_miter_stop(&miter); nbytes -= lzeros; nbits = nbytes * 8; if (nbits > MAX_EXTERN_MPI_BITS) { + sg_miter_stop(&miter); pr_info("MPI: mpi too large (%u bits)\n", nbits); return NULL; } @@ -376,6 +376,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) if (nbytes > 0) nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8); + sg_miter_stop(&miter); + nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); val = mpi_alloc(nlimbs); if (!val) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 898e87998417..3527eb364964 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -2022,6 +2022,7 @@ void radix_tree_iter_delete(struct radix_tree_root *root, if (__radix_tree_delete(root, iter->node, slot)) iter->index = iter->next_index; } +EXPORT_SYMBOL(radix_tree_iter_delete); /** * radix_tree_delete_item - delete an item from a radix tree diff --git a/lib/swiotlb.c b/lib/swiotlb.c index a8d74a733a38..8c6c83ef57a4 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -30,6 +30,7 @@ #include <linux/highmem.h> #include <linux/gfp.h> #include <linux/scatterlist.h> +#include <linux/mem_encrypt.h> #include <asm/io.h> #include <asm/dma.h> @@ -155,6 +156,15 @@ unsigned long swiotlb_size_or_default(void) return size ? size : (IO_TLB_DEFAULT_SIZE); } +void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { } + +/* For swiotlb, clear memory encryption mask from dma addresses */ +static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev, + phys_addr_t address) +{ + return __sme_clr(phys_to_dma(hwdev, address)); +} + /* Note that this doesn't work with highmem page */ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) @@ -183,6 +193,31 @@ void swiotlb_print_info(void) bytes >> 20, vstart, vend - 1); } +/* + * Early SWIOTLB allocation may be too early to allow an architecture to + * perform the desired operations. This function allows the architecture to + * call SWIOTLB when the operations are possible. It needs to be called + * before the SWIOTLB memory is used. + */ +void __init swiotlb_update_mem_attributes(void) +{ + void *vaddr; + unsigned long bytes; + + if (no_iotlb_memory || late_alloc) + return; + + vaddr = phys_to_virt(io_tlb_start); + bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); + swiotlb_set_mem_attributes(vaddr, bytes); + memset(vaddr, 0, bytes); + + vaddr = phys_to_virt(io_tlb_overflow_buffer); + bytes = PAGE_ALIGN(io_tlb_overflow); + swiotlb_set_mem_attributes(vaddr, bytes); + memset(vaddr, 0, bytes); +} + int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { void *v_overflow_buffer; @@ -320,6 +355,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) io_tlb_start = virt_to_phys(tlb); io_tlb_end = io_tlb_start + bytes; + swiotlb_set_mem_attributes(tlb, bytes); memset(tlb, 0, bytes); /* @@ -330,6 +366,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!v_overflow_buffer) goto cleanup2; + swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); + memset(v_overflow_buffer, 0, io_tlb_overflow); io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); /* @@ -469,6 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, if (no_iotlb_memory) panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); + if (sme_active()) + pr_warn_once("SME is active and system is using DMA bounce buffers\n"); + mask = dma_get_seg_boundary(hwdev); tbl_dma_addr &= mask; @@ -581,7 +622,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size, return SWIOTLB_MAP_ERROR; } - start_dma_addr = phys_to_dma(hwdev, io_tlb_start); + start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start); return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir, attrs); } @@ -702,7 +743,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, goto err_warn; ret = phys_to_virt(paddr); - dev_addr = phys_to_dma(hwdev, paddr); + dev_addr = swiotlb_phys_to_dma(hwdev, paddr); /* Confirm address can be DMA'd by device */ if (dev_addr + size - 1 > dma_mask) { @@ -812,10 +853,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, map = map_single(dev, phys, size, dir, attrs); if (map == SWIOTLB_MAP_ERROR) { swiotlb_full(dev, size, dir, 1); - return phys_to_dma(dev, io_tlb_overflow_buffer); + return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); } - dev_addr = phys_to_dma(dev, map); + dev_addr = swiotlb_phys_to_dma(dev, map); /* Ensure that the address returned is DMA'ble */ if (dma_capable(dev, dev_addr, size)) @@ -824,7 +865,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, attrs |= DMA_ATTR_SKIP_CPU_SYNC; swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); - return phys_to_dma(dev, io_tlb_overflow_buffer); + return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); } EXPORT_SYMBOL_GPL(swiotlb_map_page); @@ -958,7 +999,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, sg_dma_len(sgl) = 0; return 0; } - sg->dma_address = phys_to_dma(hwdev, map); + sg->dma_address = swiotlb_phys_to_dma(hwdev, map); } else sg->dma_address = dev_addr; sg_dma_len(sg) = sg->length; @@ -1026,7 +1067,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_device); int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { - return (dma_addr == phys_to_dma(hwdev, io_tlb_overflow_buffer)); + return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer)); } EXPORT_SYMBOL(swiotlb_dma_mapping_error); @@ -1039,6 +1080,6 @@ EXPORT_SYMBOL(swiotlb_dma_mapping_error); int swiotlb_dma_supported(struct device *hwdev, u64 mask) { - return phys_to_dma(hwdev, io_tlb_end - 1) <= mask; + return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; } EXPORT_SYMBOL(swiotlb_dma_supported); diff --git a/lib/test_kmod.c b/lib/test_kmod.c index 6c1d678bcf8b..ff9148969b92 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -485,7 +485,7 @@ static ssize_t config_show(struct device *dev, config->test_driver); else len += snprintf(buf+len, PAGE_SIZE - len, - "driver:\tEMTPY\n"); + "driver:\tEMPTY\n"); if (config->test_fs) len += snprintf(buf+len, PAGE_SIZE - len, @@ -493,7 +493,7 @@ static ssize_t config_show(struct device *dev, config->test_fs); else len += snprintf(buf+len, PAGE_SIZE - len, - "fs:\tEMTPY\n"); + "fs:\tEMPTY\n"); mutex_unlock(&test_dev->config_mutex); @@ -746,11 +746,11 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev, strlen(test_str)); break; case TEST_KMOD_FS_TYPE: - break; kfree_const(config->test_fs); config->test_driver = NULL; copied = config_copy_test_fs(config, test_str, strlen(test_str)); + break; default: mutex_unlock(&test_dev->config_mutex); return -EINVAL; @@ -880,10 +880,10 @@ static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev, int (*test_sync)(struct kmod_test_device *test_dev)) { int ret; - long new; + unsigned long new; unsigned int old_val; - ret = kstrtol(buf, 10, &new); + ret = kstrtoul(buf, 10, &new); if (ret) return ret; @@ -918,9 +918,9 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev, unsigned int max) { int ret; - long new; + unsigned long new; - ret = kstrtol(buf, 10, &new); + ret = kstrtoul(buf, 10, &new); if (ret) return ret; @@ -1146,7 +1146,7 @@ static struct kmod_test_device *register_test_dev_kmod(void) struct kmod_test_device *test_dev = NULL; int ret; - mutex_unlock(®_dev_mutex); + mutex_lock(®_dev_mutex); /* int should suffice for number of devices, test for wrap */ if (unlikely(num_test_devs + 1) < 0) { |