diff options
Diffstat (limited to 'arch/powerpc')
100 files changed, 529 insertions, 1046 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a0ce777f9706..8a2a6e403eb1 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -127,6 +127,8 @@ config PPC select ARCH_ENABLE_MEMORY_HOTPLUG select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_HAS_COPY_MC if PPC64 + select ARCH_HAS_CRC32 if PPC64 && ALTIVEC + select ARCH_HAS_CRC_T10DIF if PPC64 && ALTIVEC select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE @@ -145,6 +147,7 @@ config PPC select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PMEM_API + select ARCH_HAS_PREEMPT_LAZY select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 @@ -156,6 +159,7 @@ config PPC select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN + select ARCH_HAS_VDSO_ARCH_DATA select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_EXTRA_ELF_NOTES if SPU_BASE select ARCH_KEEP_MEMBLOCK @@ -206,6 +210,7 @@ config PPC select GENERIC_PTDUMP select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL + select GENERIC_VDSO_DATA_STORE select GENERIC_VDSO_TIME_NS select HAS_IOPORT if PCI select HAVE_ARCH_AUDITSYSCALL @@ -240,6 +245,7 @@ config PPC select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_GUP_FAST + select HAVE_FTRACE_GRAPH_FUNC select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1 diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig index da6fc203e2dc..6f58ee1edf1f 100644 --- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig +++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig @@ -223,7 +223,6 @@ CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=y CONFIG_CRC_CCITT=y CONFIG_CRC_T10DIF=y -CONFIG_LIBCRC32C=y CONFIG_MAGIC_SYSRQ=y CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_MD5=y diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig index 97f4d4851735..3c6445c98a85 100644 --- a/arch/powerpc/configs/adder875_defconfig +++ b/arch/powerpc/configs/adder875_defconfig @@ -44,7 +44,6 @@ CONFIG_TMPFS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -CONFIG_CRC32_SLICEBY4=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index 53f43a34e1a9..b33f0034990c 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig @@ -168,7 +168,6 @@ CONFIG_INFINIBAND_MTHCA=m CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y CONFIG_EDAC=y -CONFIG_EDAC_CELL=y CONFIG_UIO=m CONFIG_EXT2_FS=y CONFIG_EXT4_FS=y diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig index 50cc59eb36cf..354180ab94bc 100644 --- a/arch/powerpc/configs/ep88xc_defconfig +++ b/arch/powerpc/configs/ep88xc_defconfig @@ -47,7 +47,6 @@ CONFIG_TMPFS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -CONFIG_CRC32_SLICEBY4=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config index 3009b0efaf34..d6d2a458847b 100644 --- a/arch/powerpc/configs/fsl-emb-nonhw.config +++ b/arch/powerpc/configs/fsl-emb-nonhw.config @@ -112,7 +112,6 @@ CONFIG_QNX4FS_FS=m CONFIG_RCU_TRACE=y CONFIG_RESET_CONTROLLER=y CONFIG_ROOT_NFS=y -CONFIG_SYSV_FS=m CONFIG_SYSVIPC=y CONFIG_TMPFS=y CONFIG_UBIFS_FS=y diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig index 6f449411abf7..a0d27c59ea78 100644 --- a/arch/powerpc/configs/mpc866_ads_defconfig +++ b/arch/powerpc/configs/mpc866_ads_defconfig @@ -39,4 +39,3 @@ CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_CRC_CCITT=y -CONFIG_CRC32_SLICEBY4=y diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 77306be62e9e..89da51d724fb 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -70,7 +70,6 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_CRYPTO=y CONFIG_CRYPTO_DEV_TALITOS=y -CONFIG_CRC32_SLICEBY4=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_FS=y diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index ee84ade7a033..6b6d7467fecf 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -320,8 +320,6 @@ CONFIG_XMON=y CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_CRC32C_VPMSUM=m -CONFIG_CRYPTO_CRCT10DIF_VPMSUM=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA1_PPC=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index f39c0d000c43..465eb96c755e 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -389,9 +389,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_LZO=m -CONFIG_CRYPTO_CRC32C_VPMSUM=m -CONFIG_CRYPTO_CRCT10DIF_VPMSUM=m -CONFIG_CRYPTO_VPMSUM_TESTER=m CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_AES_GCM_P10=m diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index ca0c90e95837..a91a766b71a4 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -485,7 +485,6 @@ CONFIG_VIA_VELOCITY=m CONFIG_PCMCIA_XIRC2PS=m CONFIG_FDDI=y CONFIG_SKFP=m -CONFIG_NET_SB1000=m CONFIG_BROADCOM_PHY=m CONFIG_CICADA_PHY=m CONFIG_DAVICOM_PHY=m @@ -986,7 +985,6 @@ CONFIG_MINIX_FS=m CONFIG_OMFS_FS=m CONFIG_QNX4FS_FS=m CONFIG_ROMFS_FS=m -CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=m CONFIG_NFS_V3_ACL=y diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig index 9d44e6630908..1eb446452fc0 100644 --- a/arch/powerpc/configs/skiroot_defconfig +++ b/arch/powerpc/configs/skiroot_defconfig @@ -281,7 +281,6 @@ CONFIG_LSM="yama,loadpin,safesetid,integrity" # CONFIG_CRYPTO_HW is not set CONFIG_CRC16=y CONFIG_CRC_ITU_T=y -CONFIG_LIBCRC32C=y # CONFIG_XZ_DEC_X86 is not set # CONFIG_XZ_DEC_IA64 is not set # CONFIG_XZ_DEC_ARM is not set diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig index 383c0966e92f..425f10837a18 100644 --- a/arch/powerpc/configs/tqm8xx_defconfig +++ b/arch/powerpc/configs/tqm8xx_defconfig @@ -54,7 +54,6 @@ CONFIG_TMPFS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y -CONFIG_CRC32_SLICEBY4=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_MAGIC_SYSRQ=y CONFIG_DETECT_HUNG_TASK=y diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index 951a43726461..5b315e9756b3 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -13,39 +13,6 @@ config CRYPTO_CURVE25519_PPC64 Architecture: PowerPC64 - Little-endian -config CRYPTO_CRC32C_VPMSUM - tristate "CRC32c" - depends on PPC64 && ALTIVEC - select CRYPTO_HASH - select CRC32 - help - CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720) - - Architecture: powerpc64 using - - AltiVec extensions - - Enable on POWER8 and newer processors for improved performance. - -config CRYPTO_CRCT10DIF_VPMSUM - tristate "CRC32T10DIF" - depends on PPC64 && ALTIVEC && CRC_T10DIF - select CRYPTO_HASH - help - CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF) - - Architecture: powerpc64 using - - AltiVec extensions - - Enable on POWER8 and newer processors for improved performance. - -config CRYPTO_VPMSUM_TESTER - tristate "CRC32c and CRC32T10DIF hardware acceleration tester" - depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM - help - Stress test for CRC32c and CRCT10DIF algorithms implemented with - powerpc64 AltiVec extensions (POWER8 vpmsum instructions). - Unless you are testing these algorithms, you don't need this. - config CRYPTO_MD5_PPC tristate "Digests: MD5" depends on PPC diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index 59808592f0a1..9b38f4a7bc15 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile @@ -10,9 +10,6 @@ obj-$(CONFIG_CRYPTO_MD5_PPC) += md5-ppc.o obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o -obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o -obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o -obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o @@ -24,8 +21,6 @@ md5-ppc-y := md5-asm.o md5-glue.o sha1-powerpc-y := sha1-powerpc-asm.o sha1.o sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o -crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o -crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c index f37b3d13fc53..679f52794baf 100644 --- a/arch/powerpc/crypto/aes-gcm-p10-glue.c +++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c @@ -214,7 +214,6 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv, struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN); u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN]; struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN); - struct scatter_walk assoc_sg_walk; struct skcipher_walk walk; u8 *assocmem = NULL; u8 *assoc; @@ -234,8 +233,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv, /* Linearize assoc, if not already linear */ if (req->src->length >= assoclen && req->src->length) { - scatterwalk_start(&assoc_sg_walk, req->src); - assoc = scatterwalk_map(&assoc_sg_walk); + assoc = sg_virt(req->src); /* ppc64 is !HIGHMEM */ } else { gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; @@ -253,10 +251,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv, gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen); vsx_end(); - if (!assocmem) - scatterwalk_unmap(assoc); - else - kfree(assocmem); + kfree(assocmem); if (enc) ret = skcipher_walk_aead_encrypt(&walk, req, false); diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c deleted file mode 100644 index c61a874a3a5c..000000000000 --- a/arch/powerpc/crypto/crc-vpmsum_test.c +++ /dev/null @@ -1,133 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * CRC vpmsum tester - * Copyright 2017 Daniel Axtens, IBM Corporation. - */ - -#include <linux/crc-t10dif.h> -#include <linux/crc32.h> -#include <crypto/internal/hash.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/random.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/cpufeature.h> -#include <asm/switch_to.h> - -static unsigned long iterations = 10000; - -#define MAX_CRC_LENGTH 65535 - - -static int __init crc_test_init(void) -{ - u16 crc16 = 0, verify16 = 0; - __le32 verify32le = 0; - unsigned char *data; - u32 verify32 = 0; - unsigned long i; - __le32 crc32; - int ret; - - struct crypto_shash *crct10dif_tfm; - struct crypto_shash *crc32c_tfm; - - if (!cpu_has_feature(CPU_FTR_ARCH_207S)) - return -ENODEV; - - data = kmalloc(MAX_CRC_LENGTH, GFP_KERNEL); - if (!data) - return -ENOMEM; - - crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); - - if (IS_ERR(crct10dif_tfm)) { - pr_err("Error allocating crc-t10dif\n"); - goto free_buf; - } - - crc32c_tfm = crypto_alloc_shash("crc32c", 0, 0); - - if (IS_ERR(crc32c_tfm)) { - pr_err("Error allocating crc32c\n"); - goto free_16; - } - - do { - SHASH_DESC_ON_STACK(crct10dif_shash, crct10dif_tfm); - SHASH_DESC_ON_STACK(crc32c_shash, crc32c_tfm); - - crct10dif_shash->tfm = crct10dif_tfm; - ret = crypto_shash_init(crct10dif_shash); - - if (ret) { - pr_err("Error initing crc-t10dif\n"); - goto free_32; - } - - - crc32c_shash->tfm = crc32c_tfm; - ret = crypto_shash_init(crc32c_shash); - - if (ret) { - pr_err("Error initing crc32c\n"); - goto free_32; - } - - pr_info("crc-vpmsum_test begins, %lu iterations\n", iterations); - for (i=0; i<iterations; i++) { - size_t offset = get_random_u32_below(16); - size_t len = get_random_u32_below(MAX_CRC_LENGTH); - - if (len <= offset) - continue; - get_random_bytes(data, len); - len -= offset; - - crypto_shash_update(crct10dif_shash, data+offset, len); - crypto_shash_final(crct10dif_shash, (u8 *)(&crc16)); - verify16 = crc_t10dif_generic(verify16, data+offset, len); - - - if (crc16 != verify16) { - pr_err("FAILURE in CRC16: got 0x%04x expected 0x%04x (len %lu)\n", - crc16, verify16, len); - break; - } - - crypto_shash_update(crc32c_shash, data+offset, len); - crypto_shash_final(crc32c_shash, (u8 *)(&crc32)); - verify32 = le32_to_cpu(verify32le); - verify32le = ~cpu_to_le32(__crc32c_le(~verify32, data+offset, len)); - if (crc32 != verify32le) { - pr_err("FAILURE in CRC32: got 0x%08x expected 0x%08x (len %lu)\n", - crc32, verify32, len); - break; - } - cond_resched(); - } - pr_info("crc-vpmsum_test done, completed %lu iterations\n", i); - } while (0); - -free_32: - crypto_free_shash(crc32c_tfm); - -free_16: - crypto_free_shash(crct10dif_tfm); - -free_buf: - kfree(data); - - return 0; -} - -static void __exit crc_test_exit(void) {} - -module_init(crc_test_init); -module_exit(crc_test_exit); -module_param(iterations, long, 0400); - -MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>"); -MODULE_DESCRIPTION("Vector polynomial multiply-sum CRC tester"); -MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c deleted file mode 100644 index 63760b7dbb76..000000000000 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ /dev/null @@ -1,173 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -#include <linux/crc32.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/simd.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/cpufeature.h> -#include <asm/simd.h> -#include <asm/switch_to.h> - -#define CHKSUM_BLOCK_SIZE 1 -#define CHKSUM_DIGEST_SIZE 4 - -#define VMX_ALIGN 16 -#define VMX_ALIGN_MASK (VMX_ALIGN-1) - -#define VECTOR_BREAKPOINT 512 - -u32 __crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len); - -static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len) -{ - unsigned int prealign; - unsigned int tail; - - if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable()) - return __crc32c_le(crc, p, len); - - if ((unsigned long)p & VMX_ALIGN_MASK) { - prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); - crc = __crc32c_le(crc, p, prealign); - len -= prealign; - p += prealign; - } - - if (len & ~VMX_ALIGN_MASK) { - preempt_disable(); - pagefault_disable(); - enable_kernel_altivec(); - crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); - disable_kernel_altivec(); - pagefault_enable(); - preempt_enable(); - } - - tail = len & VMX_ALIGN_MASK; - if (tail) { - p += len & ~VMX_ALIGN_MASK; - crc = __crc32c_le(crc, p, tail); - } - - return crc; -} - -static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm) -{ - u32 *key = crypto_tfm_ctx(tfm); - - *key = ~0; - - return 0; -} - -/* - * Setting the seed allows arbitrary accumulators and flexible XOR policy - * If your algorithm starts with ~0, then XOR with ~0 before you set - * the seed. - */ -static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key, - unsigned int keylen) -{ - u32 *mctx = crypto_shash_ctx(hash); - - if (keylen != sizeof(u32)) - return -EINVAL; - *mctx = le32_to_cpup((__le32 *)key); - return 0; -} - -static int crc32c_vpmsum_init(struct shash_desc *desc) -{ - u32 *mctx = crypto_shash_ctx(desc->tfm); - u32 *crcp = shash_desc_ctx(desc); - - *crcp = *mctx; - - return 0; -} - -static int crc32c_vpmsum_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - u32 *crcp = shash_desc_ctx(desc); - - *crcp = crc32c_vpmsum(*crcp, data, len); - - return 0; -} - -static int __crc32c_vpmsum_finup(u32 *crcp, const u8 *data, unsigned int len, - u8 *out) -{ - *(__le32 *)out = ~cpu_to_le32(crc32c_vpmsum(*crcp, data, len)); - - return 0; -} - -static int crc32c_vpmsum_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __crc32c_vpmsum_finup(shash_desc_ctx(desc), data, len, out); -} - -static int crc32c_vpmsum_final(struct shash_desc *desc, u8 *out) -{ - u32 *crcp = shash_desc_ctx(desc); - - *(__le32 *)out = ~cpu_to_le32p(crcp); - - return 0; -} - -static int crc32c_vpmsum_digest(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *out) -{ - return __crc32c_vpmsum_finup(crypto_shash_ctx(desc->tfm), data, len, - out); -} - -static struct shash_alg alg = { - .setkey = crc32c_vpmsum_setkey, - .init = crc32c_vpmsum_init, - .update = crc32c_vpmsum_update, - .final = crc32c_vpmsum_final, - .finup = crc32c_vpmsum_finup, - .digest = crc32c_vpmsum_digest, - .descsize = sizeof(u32), - .digestsize = CHKSUM_DIGEST_SIZE, - .base = { - .cra_name = "crc32c", - .cra_driver_name = "crc32c-vpmsum", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, - .cra_blocksize = CHKSUM_BLOCK_SIZE, - .cra_ctxsize = sizeof(u32), - .cra_module = THIS_MODULE, - .cra_init = crc32c_vpmsum_cra_init, - } -}; - -static int __init crc32c_vpmsum_mod_init(void) -{ - if (!cpu_has_feature(CPU_FTR_ARCH_207S)) - return -ENODEV; - - return crypto_register_shash(&alg); -} - -static void __exit crc32c_vpmsum_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init); -module_exit(crc32c_vpmsum_mod_fini); - -MODULE_AUTHOR("Anton Blanchard <anton@samba.org>"); -MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("crc32c"); -MODULE_ALIAS_CRYPTO("crc32c-vpmsum"); diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index c3efacab4b94..aa90a048f319 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -77,9 +77,17 @@ /* * With 4K page size the real_pte machinery is all nops. */ -#define __real_pte(e, p, o) ((real_pte_t){(e)}) +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset) +{ + return (real_pte_t){pte}; +} + #define __rpte_to_pte(r) ((r).pte) -#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) + +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) +{ + return pte_val(__rpte_to_pte(rpte)) >> H_PAGE_F_GIX_SHIFT; +} #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ do { \ diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index db481b336bca..82da7c7a1d12 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -43,6 +43,13 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs * return arch_ftrace_regs(fregs)->regs.msr ? &arch_ftrace_regs(fregs)->regs : NULL; } +#define arch_ftrace_fill_perf_regs(fregs, _regs) do { \ + (_regs)->result = 0; \ + (_regs)->nip = arch_ftrace_regs(fregs)->regs.nip; \ + (_regs)->gpr[1] = arch_ftrace_regs(fregs)->regs.gpr[1]; \ + asm volatile("mfmsr %0" : "=r" ((_regs)->msr)); \ + } while (0) + static __always_inline void ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip) @@ -50,6 +57,12 @@ ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, regs_set_return_ip(&arch_ftrace_regs(fregs)->regs, ip); } +static __always_inline unsigned long +ftrace_regs_get_return_address(struct ftrace_regs *fregs) +{ + return arch_ftrace_regs(fregs)->regs.link; +} + struct ftrace_ops; #define ftrace_graph_func ftrace_graph_func diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 18a3028ac3b6..86326587e58d 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -15,6 +15,15 @@ extern bool hugetlb_disabled; +static inline bool hugepages_supported(void) +{ + if (hugetlb_disabled) + return false; + + return HPAGE_SHIFT != 0; +} +#define hugepages_supported hugepages_supported + void __init hugetlbpage_init_defaultsize(void); int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, @@ -36,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) + unsigned long addr, pte_t *ptep, + unsigned long sz) { return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); } @@ -46,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; + unsigned long sz = huge_page_size(hstate_vma(vma)); - pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz); flush_hugetlb_page(vma, addr); return pte; } diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 270ee93a0f7d..601e569303e1 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -61,7 +61,6 @@ struct pt_regs; extern void kexec_smp_wait(void); /* get and clear naca physid, wait for master to copy new code to 0 */ extern void default_machine_kexec(struct kimage *image); -extern void machine_kexec_mask_interrupts(void); void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer, unsigned long start_address) __noreturn; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 6e1108f8fce6..2d139c807577 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -902,7 +902,6 @@ struct kvm_vcpu_arch { #define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_CREATE_DEVICE -static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 6ebca2996f18..2785c7462ebf 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -103,6 +103,7 @@ void arch_setup_new_exec(void); #define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ +#define TIF_NEED_RESCHED_LAZY 9 /* Scheduler driven lazy preemption */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ @@ -122,6 +123,7 @@ void arch_setup_new_exec(void); #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) #define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_32BIT (1<<TIF_32BIT) @@ -142,9 +144,10 @@ void arch_setup_new_exec(void); _TIF_SYSCALL_EMU) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \ - _TIF_NOTIFY_SIGNAL) + _TIF_NEED_RESCHED_LAZY | _TIF_NOTIFY_RESUME | \ + _TIF_UPROBE | _TIF_RESTORE_TM | \ + _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL) + #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) /* Bits in local_flags */ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 221c8f8ff89b..9bdd8080299b 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -86,7 +86,7 @@ static inline unsigned long tb_ticks_since(unsigned long tstamp) #define mulhdu(x,y) \ ({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;}) #else -extern u64 mulhdu(u64, u64); +#define mulhdu(x, y) mul_u64_u64_shr(x, y, 64) #endif extern void div128_by_32(u64 dividend_high, u64 dividend_low, diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 1ca7d4c4b90d..2058e8d3e013 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -37,6 +37,7 @@ extern void tlb_flush(struct mmu_gather *tlb); */ #define tlb_needs_table_invalidate() radix_enabled() +#define __HAVE_ARCH_TLB_REMOVE_TABLE /* Get the generic bits... */ #include <asm-generic/tlb.h> diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 16bacfe8c7a2..da15b5efe807 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -152,6 +152,7 @@ static inline bool topology_is_primary_thread(unsigned int cpu) { return cpu == cpu_first_thread_sibling(cpu); } +#define topology_is_primary_thread topology_is_primary_thread static inline bool topology_smt_thread_allowed(unsigned int cpu) { diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 8d972bc98b55..1ca23fbfe087 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -3,6 +3,7 @@ #define _ASM_POWERPC_VDSO_H #define VDSO_VERSION_STRING LINUX_2.6.15 +#define __VDSO_PAGES 4 #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/vdso/arch_data.h b/arch/powerpc/include/asm/vdso/arch_data.h new file mode 100644 index 000000000000..c240a6b87518 --- /dev/null +++ b/arch/powerpc/include/asm/vdso/arch_data.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM + * Copyright (C) 2005 Benjamin Herrenschmidy <benh@kernel.crashing.org>, + * IBM Corp. + */ +#ifndef _ASM_POWERPC_VDSO_ARCH_DATA_H +#define _ASM_POWERPC_VDSO_ARCH_DATA_H + +#include <linux/unistd.h> +#include <linux/types.h> + +#define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32) + +#ifdef CONFIG_PPC64 + +struct vdso_arch_data { + __u64 tb_ticks_per_sec; /* Timebase tics / sec */ + __u32 dcache_block_size; /* L1 d-cache block size */ + __u32 icache_block_size; /* L1 i-cache block size */ + __u32 dcache_log_block_size; /* L1 d-cache log block size */ + __u32 icache_log_block_size; /* L1 i-cache log block size */ + __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ + __u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */ +}; + +#else /* CONFIG_PPC64 */ + +struct vdso_arch_data { + __u64 tb_ticks_per_sec; /* Timebase tics / sec */ + __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ + __u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */ +}; + +#endif /* CONFIG_PPC64 */ + +#endif /* _ASM_POWERPC_VDSO_ARCH_DATA_H */ diff --git a/arch/powerpc/include/asm/vdso/getrandom.h b/arch/powerpc/include/asm/vdso/getrandom.h index 80ce0709725e..067a5396aac6 100644 --- a/arch/powerpc/include/asm/vdso/getrandom.h +++ b/arch/powerpc/include/asm/vdso/getrandom.h @@ -43,20 +43,21 @@ static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsig (unsigned long)len, (unsigned long)flags); } -static __always_inline struct vdso_rng_data *__arch_get_vdso_rng_data(void) +static __always_inline const struct vdso_rng_data *__arch_get_vdso_u_rng_data(void) { - struct vdso_arch_data *data; + struct vdso_rng_data *data; asm ( " bcl 20, 31, .+4 ;" "0: mflr %0 ;" - " addis %0, %0, (_vdso_datapage - 0b)@ha ;" - " addi %0, %0, (_vdso_datapage - 0b)@l ;" + " addis %0, %0, (vdso_u_rng_data - 0b)@ha ;" + " addi %0, %0, (vdso_u_rng_data - 0b)@l ;" : "=r" (data) : : "lr" ); - return &data->rng_data; + return data; } +#define __arch_get_vdso_u_rng_data __arch_get_vdso_u_rng_data ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state, size_t opaque_len); diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h index c6390890a60c..99c9d6f43fde 100644 --- a/arch/powerpc/include/asm/vdso/gettimeofday.h +++ b/arch/powerpc/include/asm/vdso/gettimeofday.h @@ -94,22 +94,12 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) #endif static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return get_tb(); } -const struct vdso_data *__arch_get_vdso_data(void); - -#ifdef CONFIG_TIME_NS -static __always_inline -const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) -{ - return (void *)vd + (1U << CONFIG_PAGE_SHIFT); -} -#endif - -static inline bool vdso_clocksource_ok(const struct vdso_data *vd) +static inline bool vdso_clocksource_ok(const struct vdso_clock *vc) { return true; } @@ -135,21 +125,22 @@ static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift) #ifdef __powerpc64__ int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts, - const struct vdso_data *vd); + const struct vdso_time_data *vd); int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res, - const struct vdso_data *vd); + const struct vdso_time_data *vd); #else int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts, - const struct vdso_data *vd); + const struct vdso_time_data *vd); int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts, - const struct vdso_data *vd); + const struct vdso_time_data *vd); int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res, - const struct vdso_data *vd); + const struct vdso_time_data *vd); #endif int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz, - const struct vdso_data *vd); + const struct vdso_time_data *vd); __kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, - const struct vdso_data *vd); + const struct vdso_time_data *vd); + #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/powerpc/include/asm/vdso/vsyscall.h b/arch/powerpc/include/asm/vdso/vsyscall.h index 48560a119559..c2c9ae1b22e7 100644 --- a/arch/powerpc/include/asm/vdso/vsyscall.h +++ b/arch/powerpc/include/asm/vdso/vsyscall.h @@ -6,19 +6,6 @@ #include <asm/vdso_datapage.h> -static __always_inline -struct vdso_data *__arch_get_k_vdso_data(void) -{ - return vdso_data->data; -} -#define __arch_get_k_vdso_data __arch_get_k_vdso_data - -static __always_inline -struct vdso_rng_data *__arch_get_k_vdso_rng_data(void) -{ - return &vdso_data->rng_data; -} - /* The asm-generic header needs to be included after the definitions above */ #include <asm-generic/vdso/vsyscall.h> diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index a202f5b63479..95d45a50355d 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -11,56 +11,18 @@ #ifndef __ASSEMBLY__ -#include <linux/unistd.h> -#include <linux/time.h> #include <vdso/datapage.h> -#define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32) - -#ifdef CONFIG_PPC64 - -struct vdso_arch_data { - __u64 tb_ticks_per_sec; /* Timebase tics / sec */ - __u32 dcache_block_size; /* L1 d-cache block size */ - __u32 icache_block_size; /* L1 i-cache block size */ - __u32 dcache_log_block_size; /* L1 d-cache log block size */ - __u32 icache_log_block_size; /* L1 i-cache log block size */ - __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ - __u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */ - - struct vdso_rng_data rng_data; - - struct vdso_data data[CS_BASES] __aligned(1 << CONFIG_PAGE_SHIFT); -}; - -#else /* CONFIG_PPC64 */ - -struct vdso_arch_data { - __u64 tb_ticks_per_sec; /* Timebase tics / sec */ - __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */ - __u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */ - struct vdso_rng_data rng_data; - - struct vdso_data data[CS_BASES] __aligned(1 << CONFIG_PAGE_SHIFT); -}; - -#endif /* CONFIG_PPC64 */ - -extern struct vdso_arch_data *vdso_data; - #else /* __ASSEMBLY__ */ -.macro get_datapage ptr offset=0 +.macro get_datapage ptr symbol bcl 20, 31, .+4 999: mflr \ptr - addis \ptr, \ptr, (_vdso_datapage - 999b + \offset)@ha - addi \ptr, \ptr, (_vdso_datapage - 999b + \offset)@l + addis \ptr, \ptr, (\symbol - 999b)@ha + addi \ptr, \ptr, (\symbol - 999b)@l .endm -#include <asm/asm-offsets.h> -#include <asm/page.h> - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 7a390bd4f4af..b3048f6d3822 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -334,7 +334,6 @@ int main(void) #endif /* ! CONFIG_PPC64 */ /* datapage offsets for use by vdso */ - OFFSET(VDSO_DATA_OFFSET, vdso_arch_data, data); OFFSET(CFG_TB_TICKS_PER_SEC, vdso_arch_data, tb_ticks_per_sec); #ifdef CONFIG_PPC64 OFFSET(CFG_ICACHE_BLOCKSZ, vdso_arch_data, icache_block_size); diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 1bee15c013e7..3af6c06af02f 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -1087,12 +1087,10 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char /* Count and allocate space for cpu features */ of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes, &nr_dt_cpu_features); - dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE); - if (!dt_cpu_features) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, - sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, - PAGE_SIZE); + dt_cpu_features = + memblock_alloc_or_panic( + sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, + PAGE_SIZE); cpufeatures_setup_start(isa); diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 4b371c738213..d44349fe8e2b 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -751,7 +751,7 @@ u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) * prstatus.pr_pid = ???? */ elf_core_copy_regs(&prstatus.pr_reg, regs); - buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS, + buf = append_elf_note(buf, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); return buf; } diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 30b56c67fa61..e527cd3ef128 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -97,7 +97,7 @@ void power4_idle(void) /* * Register the sysctl to set/clear powersave_nap. */ -static struct ctl_table powersave_nap_ctl_table[] = { +static const struct ctl_table powersave_nap_ctl_table[] = { { .procname = "powersave-nap", .data = &powersave_nap, diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index af62ec974b97..8f4acc55407b 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -185,7 +185,7 @@ again: ti_flags = read_thread_flags(); while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) { local_irq_enable(); - if (ti_flags & _TIF_NEED_RESCHED) { + if (ti_flags & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) { schedule(); } else { /* @@ -396,7 +396,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs) /* Returning to a kernel context with local irqs enabled. */ WARN_ON_ONCE(!(regs->msr & MSR_EE)); again: - if (IS_ENABLED(CONFIG_PREEMPT)) { + if (IS_ENABLED(CONFIG_PREEMPTION)) { /* Return to preemptible kernel context */ if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) { if (preempt_count() == 0) diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 76381e14e800..0ebae6e4c19d 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -687,7 +687,7 @@ void iommu_table_clear(struct iommu_table *tbl) void iommu_table_reserve_pages(struct iommu_table *tbl, unsigned long res_start, unsigned long res_end) { - int i; + unsigned long i; WARN_ON_ONCE(res_end < res_start); /* diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 033cd00aa0fc..acb727f54e9d 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -28,32 +28,6 @@ .text /* - * This returns the high 64 bits of the product of two 64-bit numbers. - */ -_GLOBAL(mulhdu) - cmpwi r6,0 - cmpwi cr1,r3,0 - mr r10,r4 - mulhwu r4,r4,r5 - beq 1f - mulhwu r0,r10,r6 - mullw r7,r10,r5 - addc r7,r0,r7 - addze r4,r4 -1: beqlr cr1 /* all done if high part of A is 0 */ - mullw r9,r3,r5 - mulhwu r10,r3,r5 - beq 2f - mullw r0,r3,r6 - mulhwu r8,r3,r6 - addc r7,r0,r7 - adde r4,r4,r8 - addze r10,r10 -2: addc r4,r4,r9 - addze r3,r10 - blr - -/* * reloc_got2 runs through the .got2 section adding an offset * to each entry. */ diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 45dac7b46aa3..34a5aec4908f 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -369,6 +369,24 @@ static void dedotify_versions(struct modversion_info *vers, } } +/* Same as normal versions, remove a leading dot if present. */ +static void dedotify_ext_version_names(char *str_seq, unsigned long size) +{ + unsigned long out = 0; + unsigned long in; + char last = '\0'; + + for (in = 0; in < size; in++) { + /* Skip one leading dot */ + if (last == '\0' && str_seq[in] == '.') + in++; + last = str_seq[in]; + str_seq[out++] = last; + } + /* Zero the trailing portion of the names table for robustness */ + memset(&str_seq[out], 0, size - out); +} + /* * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC. * seem to be defined (value set later). @@ -438,10 +456,12 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, me->arch.toc_section = i; if (sechdrs[i].sh_addralign < 8) sechdrs[i].sh_addralign = 8; - } - else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0) + } else if (strcmp(secstrings + sechdrs[i].sh_name, "__versions") == 0) dedotify_versions((void *)hdr + sechdrs[i].sh_offset, sechdrs[i].sh_size); + else if (strcmp(secstrings + sechdrs[i].sh_name, "__version_ext_names") == 0) + dedotify_ext_version_names((void *)hdr + sechdrs[i].sh_offset, + sechdrs[i].sh_size); if (sechdrs[i].sh_type == SHT_SYMTAB) dedotify((void *)hdr + sechdrs[i].sh_offset, diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index ce0c8623e563..f8a3bd8cfae4 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -213,11 +213,8 @@ pci_create_OF_bus_map(void) struct property* of_prop; struct device_node *dn; - of_prop = memblock_alloc(sizeof(struct property) + 256, + of_prop = memblock_alloc_or_panic(sizeof(struct property) + 256, SMP_CACHE_BYTES); - if (!of_prop) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct property) + 256); dn = of_find_node_by_path("/"); if (dn) { memset(of_prop, -1, sizeof(struct property) + 256); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7b739b9a91ab..ef91f71e07c4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1960,8 +1960,8 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) * address of _start and the second entry is the TOC * value we need to use. */ - __get_user(entry, (unsigned long __user *)start); - __get_user(toc, (unsigned long __user *)start+1); + get_user(entry, (unsigned long __user *)start); + get_user(toc, (unsigned long __user *)start+1); /* Check whether the e_entry function descriptor entries * need to be relocated before we can use them. diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 8e776ba39497..57082fac4668 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2792,7 +2792,6 @@ static void __init flatten_device_tree(void) dt_struct_start, dt_struct_end); } -#ifdef CONFIG_PPC_CHRP /* * Pegasos and BriQ lacks the "ranges" property in the isa node * Pegasos needs decimal IRQ 14/15, not hexadecimal @@ -2843,11 +2842,7 @@ static void __init fixup_device_tree_chrp(void) } } } -#else -#define fixup_device_tree_chrp() -#endif -#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) static void __init fixup_device_tree_pmac64(void) { phandle u3, i2c, mpic; @@ -2887,11 +2882,7 @@ static void __init fixup_device_tree_pmac64(void) prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent", &parent, sizeof(parent)); } -#else -#define fixup_device_tree_pmac64() -#endif -#ifdef CONFIG_PPC_PMAC static void __init fixup_device_tree_pmac(void) { __be32 val = 1; @@ -2911,11 +2902,7 @@ static void __init fixup_device_tree_pmac(void) prom_setprop(node, NULL, "#size-cells", &val, sizeof(val)); } } -#else -static inline void fixup_device_tree_pmac(void) { } -#endif -#ifdef CONFIG_PPC_EFIKA /* * The MPC5200 FEC driver requires an phy-handle property to tell it how * to talk to the phy. If the phy-handle property is missing, then this @@ -3047,11 +3034,7 @@ static void __init fixup_device_tree_efika(void) /* Make sure ethernet phy-handle property exists */ fixup_device_tree_efika_add_phy(); } -#else -#define fixup_device_tree_efika() -#endif -#ifdef CONFIG_PPC_PASEMI_NEMO /* * CFE supplied on Nemo is broken in several ways, biggest * problem is that it reassigns ISA interrupts to unused mpic ints. @@ -3127,17 +3110,23 @@ static void __init fixup_device_tree_pasemi(void) prom_setprop(iob, name, "device_type", "isa", sizeof("isa")); } -#else /* !CONFIG_PPC_PASEMI_NEMO */ -static inline void fixup_device_tree_pasemi(void) { } -#endif static void __init fixup_device_tree(void) { - fixup_device_tree_chrp(); - fixup_device_tree_pmac(); - fixup_device_tree_pmac64(); - fixup_device_tree_efika(); - fixup_device_tree_pasemi(); + if (IS_ENABLED(CONFIG_PPC_CHRP)) + fixup_device_tree_chrp(); + + if (IS_ENABLED(CONFIG_PPC_PMAC)) + fixup_device_tree_pmac(); + + if (IS_ENABLED(CONFIG_PPC_PMAC) && IS_ENABLED(CONFIG_PPC64)) + fixup_device_tree_pmac64(); + + if (IS_ENABLED(CONFIG_PPC_EFIKA)) + fixup_device_tree_efika(); + + if (IS_ENABLED(CONFIG_PPC_PASEMI_NEMO)) + fixup_device_tree_pasemi(); } static void __init prom_find_boot_cpu(void) diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index 727ed4a14545..c6997df63287 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -215,7 +215,7 @@ static int do_seccomp(struct pt_regs *regs) * have already loaded -ENOSYS into r3, or seccomp has put * something else in r3 (via SECCOMP_RET_ERRNO/TRACE). */ - if (__secure_computing(NULL)) + if (__secure_computing()) return -1; /* diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 6fa179448c33..a08b0ede4e64 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -458,11 +458,8 @@ void __init smp_setup_cpu_maps(void) DBG("smp_setup_cpu_maps()\n"); - cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32), + cpu_to_phys_id = memblock_alloc_or_panic(nr_cpu_ids * sizeof(u32), __alignof__(u32)); - if (!cpu_to_phys_id) - panic("%s: Failed to allocate %zu bytes align=0x%zx\n", - __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32)); for_each_node_by_type(dn, "cpu") { const __be32 *intserv; @@ -834,8 +831,8 @@ static int __init check_cache_coherency(void) if (devtree_coherency != KERNEL_COHERENCY) { printk(KERN_ERR "kernel coherency:%s != device tree_coherency:%s\n", - KERNEL_COHERENCY ? "on" : "off", - devtree_coherency ? "on" : "off"); + str_on_off(KERNEL_COHERENCY), + str_on_off(devtree_coherency)); BUG(); } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 75dbf3e0d9c4..5a1bf501fbe1 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -140,13 +140,7 @@ arch_initcall(ppc_init); static void *__init alloc_stack(void) { - void *ptr = memblock_alloc(THREAD_SIZE, THREAD_ALIGN); - - if (!ptr) - panic("cannot allocate %d bytes for stack at %pS\n", - THREAD_SIZE, (void *)_RET_IP_); - - return ptr; + return memblock_alloc_or_panic(THREAD_SIZE, THREAD_ALIGN); } void __init irqstack_early_init(void) diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index d8b4ab78bef0..9a084bdb8926 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -557,3 +557,4 @@ 464 common getxattrat sys_getxattrat 465 common listxattrat sys_listxattrat 466 common removexattrat sys_removexattrat +467 common open_tree_attr sys_open_tree_attr diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 0727332ad86f..15784c5c95c7 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -950,7 +950,7 @@ void __init time_init(void) sys_tz.tz_dsttime = 0; } - vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; + vdso_k_arch_data->tb_ticks_per_sec = tb_ticks_per_sec; #ifdef CONFIG_PPC64_PROC_SYSTEMCFG systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; #endif diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 5ccd791761e8..2f776f137a89 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -658,7 +658,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) { unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1]; - int bit; if (unlikely(ftrace_graph_is_dead())) goto out; @@ -666,14 +665,9 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; - bit = ftrace_test_recursion_trylock(ip, parent_ip); - if (bit < 0) - goto out; - - if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp)) + if (!function_graph_enter_regs(parent_ip, ip, 0, (unsigned long *)sp, fregs)) parent_ip = ppc_function_entry(return_to_handler); - ftrace_test_recursion_unlock(bit); out: arch_ftrace_regs(fregs)->regs.link = parent_ip; } diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.c b/arch/powerpc/kernel/trace/ftrace_64_pg.c index 98787376eb87..ac35015f04c6 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_pg.c +++ b/arch/powerpc/kernel/trace/ftrace_64_pg.c @@ -787,10 +787,10 @@ int ftrace_disable_ftrace_graph_caller(void) * in current thread info. Return the address we want to divert to. */ static unsigned long -__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp) +__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp, + struct ftrace_regs *fregs) { unsigned long return_hooker; - int bit; if (unlikely(ftrace_graph_is_dead())) goto out; @@ -798,16 +798,11 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; - bit = ftrace_test_recursion_trylock(ip, parent); - if (bit < 0) - goto out; - return_hooker = ppc_function_entry(return_to_handler); - if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp)) + if (!function_graph_enter_regs(parent, ip, 0, (unsigned long *)sp, fregs)) parent = return_hooker; - ftrace_test_recursion_unlock(bit); out: return parent; } @@ -816,13 +811,14 @@ out: void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) { - arch_ftrace_regs(fregs)->regs.link = __prepare_ftrace_return(parent_ip, ip, arch_ftrace_regs(fregs)->regs.gpr[1]); + arch_ftrace_regs(fregs)->regs.link = __prepare_ftrace_return(parent_ip, ip, + arch_ftrace_regs(fregs)->regs.gpr[1], fregs); } #else unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp) { - return __prepare_ftrace_return(parent, ip, sp); + return __prepare_ftrace_return(parent, ip, sp, NULL); } #endif #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index edf5cabe5dfd..cb8e9357383e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -263,10 +263,9 @@ static int __die(const char *str, struct pt_regs *regs, long err) { printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); - printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n", + printk("%s PAGE_SIZE=%luK%s %s%s%s%s %s\n", IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE", PAGE_SIZE / 1024, get_mmu_str(), - IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", IS_ENABLED(CONFIG_SMP) ? " SMP" : "", IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 43379365ce1b..219d67bcf747 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -17,7 +17,7 @@ #include <linux/elf.h> #include <linux/security.h> #include <linux/syscalls.h> -#include <linux/time_namespace.h> +#include <linux/vdso_datastore.h> #include <vdso/datapage.h> #include <asm/syscall.h> @@ -32,6 +32,8 @@ #include <asm/vdso_datapage.h> #include <asm/setup.h> +static_assert(__VDSO_PAGES == VDSO_NR_PAGES); + /* The alignment of the vDSO */ #define VDSO_ALIGNMENT (1 << 16) @@ -40,24 +42,6 @@ extern char vdso64_start, vdso64_end; long sys_ni_syscall(void); -/* - * The vdso data page (aka. systemcfg for old ppc64 fans) is here. - * Once the early boot kernel code no longer needs to muck around - * with it, it will become dynamically allocated - */ -static union { - struct vdso_arch_data data; - u8 page[2 * PAGE_SIZE]; -} vdso_data_store __page_aligned_data; -struct vdso_arch_data *vdso_data = &vdso_data_store.data; - -enum vvar_pages { - VVAR_BASE_PAGE_OFFSET, - VVAR_TIME_PAGE_OFFSET, - VVAR_TIMENS_PAGE_OFFSET, - VVAR_NR_PAGES, -}; - static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma, unsigned long text_size) { @@ -96,14 +80,6 @@ static void vdso_close(const struct vm_special_mapping *sm, struct vm_area_struc mm->context.vdso = NULL; } -static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, - struct vm_area_struct *vma, struct vm_fault *vmf); - -static struct vm_special_mapping vvar_spec __ro_after_init = { - .name = "[vvar]", - .fault = vvar_fault, -}; - static struct vm_special_mapping vdso32_spec __ro_after_init = { .name = "[vdso]", .mremap = vdso32_mremap, @@ -116,73 +92,6 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = { .close = vdso_close, }; -#ifdef CONFIG_TIME_NS -struct vdso_data *arch_get_vdso_data(void *vvar_page) -{ - return vvar_page; -} - -/* - * The vvar mapping contains data for a specific time namespace, so when a task - * changes namespace we must unmap its vvar data for the old namespace. - * Subsequent faults will map in data for the new namespace. - * - * For more details see timens_setup_vdso_data(). - */ -int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) -{ - struct mm_struct *mm = task->mm; - VMA_ITERATOR(vmi, mm, 0); - struct vm_area_struct *vma; - - mmap_read_lock(mm); - for_each_vma(vmi, vma) { - if (vma_is_special_mapping(vma, &vvar_spec)) - zap_vma_pages(vma); - } - mmap_read_unlock(mm); - - return 0; -} -#endif - -static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, - struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct page *timens_page = find_timens_vvar_page(vma); - unsigned long pfn; - - switch (vmf->pgoff) { - case VVAR_BASE_PAGE_OFFSET: - pfn = virt_to_pfn(vdso_data); - break; - case VVAR_TIME_PAGE_OFFSET: - if (timens_page) - pfn = page_to_pfn(timens_page); - else - pfn = virt_to_pfn(vdso_data->data); - break; -#ifdef CONFIG_TIME_NS - case VVAR_TIMENS_PAGE_OFFSET: - /* - * If a task belongs to a time namespace then a namespace - * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and - * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET - * offset. - * See also the comment near timens_setup_vdso_data(). - */ - if (!timens_page) - return VM_FAULT_SIGBUS; - pfn = virt_to_pfn(vdso_data->data); - break; -#endif /* CONFIG_TIME_NS */ - default: - return VM_FAULT_SIGBUS; - } - - return vmf_insert_pfn(vma, vmf->address, pfn); -} - /* * This is called from binfmt_elf, we create the special vma for the * vDSO and insert it into the mm struct tree @@ -191,7 +100,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int { unsigned long vdso_size, vdso_base, mappings_size; struct vm_special_mapping *vdso_spec; - unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE; + unsigned long vvar_size = VDSO_NR_PAGES * PAGE_SIZE; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -217,9 +126,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int /* Add required alignment. */ vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT); - vma = _install_special_mapping(mm, vdso_base, vvar_size, - VM_READ | VM_MAYREAD | VM_IO | - VM_DONTDUMP | VM_PFNMAP, &vvar_spec); + vma = vdso_install_vvar_mapping(mm, vdso_base); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -299,10 +206,10 @@ static void __init vdso_setup_syscall_map(void) for (i = 0; i < NR_syscalls; i++) { if (sys_call_table[i] != (void *)&sys_ni_syscall) - vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); + vdso_k_arch_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); if (IS_ENABLED(CONFIG_COMPAT) && compat_sys_call_table[i] != (void *)&sys_ni_syscall) - vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); + vdso_k_arch_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); } } @@ -352,10 +259,10 @@ static struct page ** __init vdso_setup_pages(void *start, void *end) static int __init vdso_init(void) { #ifdef CONFIG_PPC64 - vdso_data->dcache_block_size = ppc64_caches.l1d.block_size; - vdso_data->icache_block_size = ppc64_caches.l1i.block_size; - vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size; - vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size; + vdso_k_arch_data->dcache_block_size = ppc64_caches.l1d.block_size; + vdso_k_arch_data->icache_block_size = ppc64_caches.l1i.block_size; + vdso_k_arch_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size; + vdso_k_arch_data->icache_log_block_size = ppc64_caches.l1i.log_block_size; #endif /* CONFIG_PPC64 */ vdso_setup_syscall_map(); diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile index 0e3ed6fb199f..e8824f933326 100644 --- a/arch/powerpc/kernel/vdso/Makefile +++ b/arch/powerpc/kernel/vdso/Makefile @@ -3,7 +3,7 @@ # List of files in the vdso, has to be asm only for now # Include the generic Makefile to check the built vdso. -include $(srctree)/lib/vdso/Makefile +include $(srctree)/lib/vdso/Makefile.include obj-vdso32 = sigtramp32-32.o gettimeofday-32.o datapage-32.o cacheflush-32.o note-32.o getcpu-32.o obj-vdso64 = sigtramp64-64.o gettimeofday-64.o datapage-64.o cacheflush-64.o note-64.o getcpu-64.o diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S index 0085ae464dac..488d3ade11e6 100644 --- a/arch/powerpc/kernel/vdso/cacheflush.S +++ b/arch/powerpc/kernel/vdso/cacheflush.S @@ -30,7 +30,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) #ifdef CONFIG_PPC64 mflr r12 .cfi_register lr,r12 - get_datapage r10 + get_datapage r10 vdso_u_arch_data mtlr r12 .cfi_restore lr #endif diff --git a/arch/powerpc/kernel/vdso/datapage.S b/arch/powerpc/kernel/vdso/datapage.S index db8e167f0166..d23b2e8e2a34 100644 --- a/arch/powerpc/kernel/vdso/datapage.S +++ b/arch/powerpc/kernel/vdso/datapage.S @@ -28,7 +28,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) mflr r12 .cfi_register lr,r12 mr. r4,r3 - get_datapage r3 + get_datapage r3 vdso_u_arch_data mtlr r12 #ifdef __powerpc64__ addi r3,r3,CFG_SYSCALL_MAP64 @@ -52,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq) .cfi_startproc mflr r12 .cfi_register lr,r12 - get_datapage r3 + get_datapage r3 vdso_u_arch_data #ifndef __powerpc64__ lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3) #endif diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S index 5333848322ca..79c967212444 100644 --- a/arch/powerpc/kernel/vdso/gettimeofday.S +++ b/arch/powerpc/kernel/vdso/gettimeofday.S @@ -33,9 +33,9 @@ .cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT #endif .ifeq \call_time - get_datapage r5 VDSO_DATA_OFFSET + get_datapage r5 vdso_u_time_data .else - get_datapage r4 VDSO_DATA_OFFSET + get_datapage r4 vdso_u_time_data .endif bl CFUNC(DOTSYM(\funct)) PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1) diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S index 1a1b0b6d681a..72a1012b8a20 100644 --- a/arch/powerpc/kernel/vdso/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso/vdso32.lds.S @@ -6,6 +6,7 @@ #include <asm/vdso.h> #include <asm/page.h> #include <asm-generic/vmlinux.lds.h> +#include <vdso/datapage.h> #ifdef __LITTLE_ENDIAN__ OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle") @@ -16,7 +17,8 @@ OUTPUT_ARCH(powerpc:common) SECTIONS { - PROVIDE(_vdso_datapage = . - 3 * PAGE_SIZE); + VDSO_VVAR_SYMS + . = SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S index e21b5506cad6..32102a05eaa7 100644 --- a/arch/powerpc/kernel/vdso/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso/vdso64.lds.S @@ -6,6 +6,7 @@ #include <asm/vdso.h> #include <asm/page.h> #include <asm-generic/vmlinux.lds.h> +#include <vdso/datapage.h> #ifdef __LITTLE_ENDIAN__ OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle") @@ -16,7 +17,8 @@ OUTPUT_ARCH(powerpc:common64) SECTIONS { - PROVIDE(_vdso_datapage = . - 3 * PAGE_SIZE); + VDSO_VVAR_SYMS + . = SIZEOF_HEADERS; .hash : { *(.hash) } :text diff --git a/arch/powerpc/kernel/vdso/vgettimeofday.c b/arch/powerpc/kernel/vdso/vgettimeofday.c index 55a287c9a736..6f5167d81af5 100644 --- a/arch/powerpc/kernel/vdso/vgettimeofday.c +++ b/arch/powerpc/kernel/vdso/vgettimeofday.c @@ -7,43 +7,43 @@ #ifdef __powerpc64__ int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_clock_gettime_data(vd, clock, ts); } int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_clock_getres_data(vd, clock_id, res); } #else int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_clock_gettime32_data(vd, clock, ts); } int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_clock_gettime_data(vd, clock, ts); } int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_clock_getres_time32_data(vd, clock_id, res); } #endif int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz, - const struct vdso_data *vd) + const struct vdso_time_data *vd) { return __cvdso_gettimeofday_data(vd, tv, tz); } -__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, const struct vdso_data *vd) +__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, const struct vdso_time_data *vd) { return __cvdso_time_data(vd, time); } diff --git a/arch/powerpc/kernel/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32_wrapper.S index 10f92f265d51..20bca3548b44 100644 --- a/arch/powerpc/kernel/vdso32_wrapper.S +++ b/arch/powerpc/kernel/vdso32_wrapper.S @@ -2,7 +2,7 @@ #include <linux/linkage.h> #include <asm/page.h> - __PAGE_ALIGNED_DATA + .section ".data..ro_after_init", "aw" .globl vdso32_start, vdso32_end .balign PAGE_SIZE diff --git a/arch/powerpc/kernel/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64_wrapper.S index 839d1a61411d..1912936fa227 100644 --- a/arch/powerpc/kernel/vdso64_wrapper.S +++ b/arch/powerpc/kernel/vdso64_wrapper.S @@ -2,7 +2,7 @@ #include <linux/linkage.h> #include <asm/page.h> - __PAGE_ALIGNED_DATA + .section ".data..ro_after_init", "aw" .globl vdso64_start, vdso64_end .balign PAGE_SIZE diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 8c464a5d8246..2429cb1c7baa 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -495,8 +495,7 @@ static void start_watchdog(void *arg) *this_cpu_ptr(&wd_timer_tb) = get_tb(); - hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hrtimer->function = watchdog_timer_fn; + hrtimer_setup(hrtimer, watchdog_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms), HRTIMER_MODE_REL_PINNED); } diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index b8333a49ea5d..58a930a47422 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -22,28 +22,6 @@ #include <asm/setup.h> #include <asm/firmware.h> -void machine_kexec_mask_interrupts(void) { - unsigned int i; - struct irq_desc *desc; - - for_each_irq_desc(i, desc) { - struct irq_chip *chip; - - chip = irq_desc_get_chip(desc); - if (!chip) - continue; - - if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) - chip->irq_eoi(&desc->irq_data); - - if (chip->irq_mask) - chip->irq_mask(&desc->irq_data); - - if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) - chip->irq_disable(&desc->irq_data); - } -} - #ifdef CONFIG_CRASH_DUMP void machine_crash_shutdown(struct pt_regs *regs) { diff --git a/arch/powerpc/kexec/core_32.c b/arch/powerpc/kexec/core_32.c index c95f96850c9e..deb28eb44f30 100644 --- a/arch/powerpc/kexec/core_32.c +++ b/arch/powerpc/kexec/core_32.c @@ -7,6 +7,7 @@ * Copyright (C) 2005 IBM Corporation. */ +#include <linux/irq.h> #include <linux/kexec.h> #include <linux/mm.h> #include <linux/string.h> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 25429905ae90..86bff159c51e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4957,7 +4957,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, * states are synchronized from L0 to L1. L1 needs to inform L0 about * MER=1 only when there are pending external interrupts. * In the above if check, MER bit is set if there are pending - * external interrupts. Hence, explicity mask off MER bit + * external interrupts. Hence, explicitly mask off MER bit * here as otherwise it may generate spurious interrupts in L2 KVM * causing an endless loop, which results in L2 guest getting hung. */ diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 6d0d329cbb35..f9acf866c709 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -34,6 +34,8 @@ enum vcpu_ftr { #define E500_TLB_BITMAP (1 << 30) /* TLB1 entry is mapped by host TLB0 */ #define E500_TLB_TLB0 (1 << 29) +/* entry is writable on the host */ +#define E500_TLB_WRITABLE (1 << 28) /* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */ #define E500_TLB_MAS2_ATTR (0x7f) diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index e5a145b578a4..06caf8bbbe2b 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -45,11 +45,14 @@ static inline unsigned int tlb1_max_shadow_size(void) return host_tlb_params[1].entries - tlbcam_index - 1; } -static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) +static inline u32 e500_shadow_mas3_attrib(u32 mas3, bool writable, int usermode) { /* Mask off reserved bits. */ mas3 &= MAS3_ATTRIB_MASK; + if (!writable) + mas3 &= ~(MAS3_UW|MAS3_SW); + #ifndef CONFIG_KVM_BOOKE_HV if (!usermode) { /* Guest is in supervisor mode, @@ -242,17 +245,18 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); } -static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref, +static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, - kvm_pfn_t pfn, unsigned int wimg) + kvm_pfn_t pfn, unsigned int wimg, + bool writable) { ref->pfn = pfn; ref->flags = E500_TLB_VALID; + if (writable) + ref->flags |= E500_TLB_WRITABLE; /* Use guest supplied MAS2_G and MAS2_E */ ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; - - return tlbe_is_writable(gtlbe); } static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) @@ -305,6 +309,7 @@ static void kvmppc_e500_setup_stlbe( { kvm_pfn_t pfn = ref->pfn; u32 pr = vcpu->arch.shared->msr & MSR_PR; + bool writable = !!(ref->flags & E500_TLB_WRITABLE); BUG_ON(!(ref->flags & E500_TLB_VALID)); @@ -312,7 +317,7 @@ static void kvmppc_e500_setup_stlbe( stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | - e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); + e500_shadow_mas3_attrib(gtlbe->mas7_3, writable, pr); } static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, @@ -321,15 +326,14 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, struct tlbe_ref *ref) { struct kvm_memory_slot *slot; - unsigned long pfn = 0; /* silence GCC warning */ + unsigned int psize; + unsigned long pfn; struct page *page = NULL; unsigned long hva; - int pfnmap = 0; int tsize = BOOK3E_PAGESZ_4K; int ret = 0; unsigned long mmu_seq; struct kvm *kvm = vcpu_e500->vcpu.kvm; - unsigned long tsize_pages = 0; pte_t *ptep; unsigned int wimg = 0; pgd_t *pgdir; @@ -351,110 +355,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); hva = gfn_to_hva_memslot(slot, gfn); - if (tlbsel == 1) { - struct vm_area_struct *vma; - mmap_read_lock(kvm->mm); - - vma = find_vma(kvm->mm, hva); - if (vma && hva >= vma->vm_start && - (vma->vm_flags & VM_PFNMAP)) { - /* - * This VMA is a physically contiguous region (e.g. - * /dev/mem) that bypasses normal Linux page - * management. Find the overlap between the - * vma and the memslot. - */ - - unsigned long start, end; - unsigned long slot_start, slot_end; - - pfnmap = 1; - - start = vma->vm_pgoff; - end = start + - vma_pages(vma); - - pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); - - slot_start = pfn - (gfn - slot->base_gfn); - slot_end = slot_start + slot->npages; - - if (start < slot_start) - start = slot_start; - if (end > slot_end) - end = slot_end; - - tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> - MAS1_TSIZE_SHIFT; - - /* - * e500 doesn't implement the lowest tsize bit, - * or 1K pages. - */ - tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); - - /* - * Now find the largest tsize (up to what the guest - * requested) that will cover gfn, stay within the - * range, and for which gfn and pfn are mutually - * aligned. - */ - - for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { - unsigned long gfn_start, gfn_end; - tsize_pages = 1UL << (tsize - 2); - - gfn_start = gfn & ~(tsize_pages - 1); - gfn_end = gfn_start + tsize_pages; - - if (gfn_start + pfn - gfn < start) - continue; - if (gfn_end + pfn - gfn > end) - continue; - if ((gfn & (tsize_pages - 1)) != - (pfn & (tsize_pages - 1))) - continue; - - gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); - pfn &= ~(tsize_pages - 1); - break; - } - } else if (vma && hva >= vma->vm_start && - is_vm_hugetlb_page(vma)) { - unsigned long psize = vma_kernel_pagesize(vma); - - tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> - MAS1_TSIZE_SHIFT; - - /* - * Take the largest page size that satisfies both host - * and guest mapping - */ - tsize = min(__ilog2(psize) - 10, tsize); - - /* - * e500 doesn't implement the lowest tsize bit, - * or 1K pages. - */ - tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); - } - - mmap_read_unlock(kvm->mm); - } - - if (likely(!pfnmap)) { - tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT); - pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page); - if (is_error_noslot_pfn(pfn)) { - if (printk_ratelimit()) - pr_err("%s: real page not found for gfn %lx\n", - __func__, (long)gfn); - return -EINVAL; - } - - /* Align guest and physical address to page map boundaries */ - pfn &= ~(tsize_pages - 1); - gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); + pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, &writable, &page); + if (is_error_noslot_pfn(pfn)) { + if (printk_ratelimit()) + pr_err("%s: real page not found for gfn %lx\n", + __func__, (long)gfn); + return -EINVAL; } spin_lock(&kvm->mmu_lock); @@ -472,14 +378,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, * can't run hence pfn won't change. */ local_irq_save(flags); - ptep = find_linux_pte(pgdir, hva, NULL, NULL); + ptep = find_linux_pte(pgdir, hva, NULL, &psize); if (ptep) { pte_t pte = READ_ONCE(*ptep); if (pte_present(pte)) { wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; - local_irq_restore(flags); } else { local_irq_restore(flags); pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n", @@ -488,10 +393,72 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, goto out; } } - writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); + local_irq_restore(flags); + + if (psize && tlbsel == 1) { + unsigned long psize_pages, tsize_pages; + unsigned long start, end; + unsigned long slot_start, slot_end; + + psize_pages = 1UL << (psize - PAGE_SHIFT); + start = pfn & ~(psize_pages - 1); + end = start + psize_pages; + + slot_start = pfn - (gfn - slot->base_gfn); + slot_end = slot_start + slot->npages; + + if (start < slot_start) + start = slot_start; + if (end > slot_end) + end = slot_end; + + tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> + MAS1_TSIZE_SHIFT; + + /* + * Any page size that doesn't satisfy the host mapping + * will fail the start and end tests. + */ + tsize = min(psize - PAGE_SHIFT + BOOK3E_PAGESZ_4K, tsize); + + /* + * e500 doesn't implement the lowest tsize bit, + * or 1K pages. + */ + tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); + + /* + * Now find the largest tsize (up to what the guest + * requested) that will cover gfn, stay within the + * range, and for which gfn and pfn are mutually + * aligned. + */ + + for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { + unsigned long gfn_start, gfn_end; + tsize_pages = 1UL << (tsize - 2); + + gfn_start = gfn & ~(tsize_pages - 1); + gfn_end = gfn_start + tsize_pages; + + if (gfn_start + pfn - gfn < start) + continue; + if (gfn_end + pfn - gfn > end) + continue; + if ((gfn & (tsize_pages - 1)) != + (pfn & (tsize_pages - 1))) + continue; + + gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); + pfn &= ~(tsize_pages - 1); + break; + } + } + kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg, writable); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); + writable = tlbe_is_writable(stlbe); /* Clear i-cache for new pages */ kvmppc_mmu_flush_icache(pfn); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index ce1d91eed231..61f2b7e007fa 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -766,8 +766,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { int err; - hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); - vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; + hrtimer_setup(&vcpu->arch.dec_timer, kvmppc_decrementer_wakeup, CLOCK_REALTIME, + HRTIMER_MODE_ABS); #ifdef CONFIG_KVM_EXIT_TIMING mutex_init(&vcpu->arch.exit_timing_lock); diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index f14ecab674a3..dd8a4b52a0cc 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -78,4 +78,10 @@ CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec) # Enable <altivec.h> CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include) +obj-$(CONFIG_CRC32_ARCH) += crc32-powerpc.o +crc32-powerpc-y := crc32-glue.o crc32c-vpmsum_asm.o + +obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-powerpc.o +crc-t10dif-powerpc-y := crc-t10dif-glue.o crct10dif-vpmsum_asm.o + obj-$(CONFIG_PPC64) += $(obj64-y) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index af97fbb3c257..f84e0337cc02 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -108,7 +108,7 @@ static int text_area_cpu_up(unsigned int cpu) unsigned long addr; int err; - area = get_vm_area(PAGE_SIZE, VM_ALLOC); + area = get_vm_area(PAGE_SIZE, 0); if (!area) { WARN_ONCE(1, "Failed to create text area for cpu %d\n", cpu); @@ -493,7 +493,9 @@ static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool rep orig_mm = start_using_temp_mm(patching_mm); + kasan_disable_current(); err = __patch_instructions(patch_addr, code, len, repeat_instr); + kasan_enable_current(); /* context synchronisation performed by __patch_instructions */ stop_using_temp_mm(patching_mm, orig_mm); diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c b/arch/powerpc/lib/crc-t10dif-glue.c index 1dc8b6915178..f411b0120cc5 100644 --- a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c +++ b/arch/powerpc/lib/crc-t10dif-glue.c @@ -7,7 +7,6 @@ */ #include <linux/crc-t10dif.h> -#include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <linux/init.h> #include <linux/module.h> @@ -22,15 +21,18 @@ #define VECTOR_BREAKPOINT 64 +static DEFINE_STATIC_KEY_FALSE(have_vec_crypto); + u32 __crct10dif_vpmsum(u32 crc, unsigned char const *p, size_t len); -static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len) +u16 crc_t10dif_arch(u16 crci, const u8 *p, size_t len) { unsigned int prealign; unsigned int tail; u32 crc = crci; - if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable()) + if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || + !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable()) return crc_t10dif_generic(crc, p, len); if ((unsigned long)p & VMX_ALIGN_MASK) { @@ -60,67 +62,22 @@ static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len) return crc & 0xffff; } +EXPORT_SYMBOL(crc_t10dif_arch); -static int crct10dif_vpmsum_init(struct shash_desc *desc) +static int __init crc_t10dif_powerpc_init(void) { - u16 *crc = shash_desc_ctx(desc); - - *crc = 0; + if (cpu_has_feature(CPU_FTR_ARCH_207S) && + (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) + static_branch_enable(&have_vec_crypto); return 0; } +arch_initcall(crc_t10dif_powerpc_init); -static int crct10dif_vpmsum_update(struct shash_desc *desc, const u8 *data, - unsigned int length) +static void __exit crc_t10dif_powerpc_exit(void) { - u16 *crc = shash_desc_ctx(desc); - - *crc = crct10dif_vpmsum(*crc, data, length); - - return 0; -} - - -static int crct10dif_vpmsum_final(struct shash_desc *desc, u8 *out) -{ - u16 *crcp = shash_desc_ctx(desc); - - *(u16 *)out = *crcp; - return 0; } - -static struct shash_alg alg = { - .init = crct10dif_vpmsum_init, - .update = crct10dif_vpmsum_update, - .final = crct10dif_vpmsum_final, - .descsize = CRC_T10DIF_DIGEST_SIZE, - .digestsize = CRC_T10DIF_DIGEST_SIZE, - .base = { - .cra_name = "crct10dif", - .cra_driver_name = "crct10dif-vpmsum", - .cra_priority = 200, - .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}; - -static int __init crct10dif_vpmsum_mod_init(void) -{ - if (!cpu_has_feature(CPU_FTR_ARCH_207S)) - return -ENODEV; - - return crypto_register_shash(&alg); -} - -static void __exit crct10dif_vpmsum_mod_fini(void) -{ - crypto_unregister_shash(&alg); -} - -module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crct10dif_vpmsum_mod_init); -module_exit(crct10dif_vpmsum_mod_fini); +module_exit(crc_t10dif_powerpc_exit); MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>"); MODULE_DESCRIPTION("CRCT10DIF using vector polynomial multiply-sum instructions"); MODULE_LICENSE("GPL"); -MODULE_ALIAS_CRYPTO("crct10dif"); -MODULE_ALIAS_CRYPTO("crct10dif-vpmsum"); diff --git a/arch/powerpc/lib/crc32-glue.c b/arch/powerpc/lib/crc32-glue.c new file mode 100644 index 000000000000..dbd10f339183 --- /dev/null +++ b/arch/powerpc/lib/crc32-glue.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/crc32.h> +#include <crypto/internal/simd.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/cpufeature.h> +#include <asm/simd.h> +#include <asm/switch_to.h> + +#define VMX_ALIGN 16 +#define VMX_ALIGN_MASK (VMX_ALIGN-1) + +#define VECTOR_BREAKPOINT 512 + +static DEFINE_STATIC_KEY_FALSE(have_vec_crypto); + +u32 __crc32c_vpmsum(u32 crc, const u8 *p, size_t len); + +u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) +{ + return crc32_le_base(crc, p, len); +} +EXPORT_SYMBOL(crc32_le_arch); + +u32 crc32c_arch(u32 crc, const u8 *p, size_t len) +{ + unsigned int prealign; + unsigned int tail; + + if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || + !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable()) + return crc32c_base(crc, p, len); + + if ((unsigned long)p & VMX_ALIGN_MASK) { + prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); + crc = crc32c_base(crc, p, prealign); + len -= prealign; + p += prealign; + } + + if (len & ~VMX_ALIGN_MASK) { + preempt_disable(); + pagefault_disable(); + enable_kernel_altivec(); + crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); + disable_kernel_altivec(); + pagefault_enable(); + preempt_enable(); + } + + tail = len & VMX_ALIGN_MASK; + if (tail) { + p += len & ~VMX_ALIGN_MASK; + crc = crc32c_base(crc, p, tail); + } + + return crc; +} +EXPORT_SYMBOL(crc32c_arch); + +u32 crc32_be_arch(u32 crc, const u8 *p, size_t len) +{ + return crc32_be_base(crc, p, len); +} +EXPORT_SYMBOL(crc32_be_arch); + +static int __init crc32_powerpc_init(void) +{ + if (cpu_has_feature(CPU_FTR_ARCH_207S) && + (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO)) + static_branch_enable(&have_vec_crypto); + return 0; +} +arch_initcall(crc32_powerpc_init); + +static void __exit crc32_powerpc_exit(void) +{ +} +module_exit(crc32_powerpc_exit); + +u32 crc32_optimizations(void) +{ + if (static_key_enabled(&have_vec_crypto)) + return CRC32C_OPTIMIZATION; + return 0; +} +EXPORT_SYMBOL(crc32_optimizations); + +MODULE_AUTHOR("Anton Blanchard <anton@samba.org>"); +MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions"); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/crypto/crc32-vpmsum_core.S b/arch/powerpc/lib/crc32-vpmsum_core.S index b0f87f595b26..b0f87f595b26 100644 --- a/arch/powerpc/crypto/crc32-vpmsum_core.S +++ b/arch/powerpc/lib/crc32-vpmsum_core.S diff --git a/arch/powerpc/crypto/crc32c-vpmsum_asm.S b/arch/powerpc/lib/crc32c-vpmsum_asm.S index bf442004ea1f..bf442004ea1f 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_asm.S +++ b/arch/powerpc/lib/crc32c-vpmsum_asm.S diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_asm.S b/arch/powerpc/lib/crct10dif-vpmsum_asm.S index f0b93a0fe168..f0b93a0fe168 100644 --- a/arch/powerpc/crypto/crct10dif-vpmsum_asm.S +++ b/arch/powerpc/lib/crct10dif-vpmsum_asm.S diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c index d491da8d1838..58ed6bd613a6 100644 --- a/arch/powerpc/lib/vmx-helper.c +++ b/arch/powerpc/lib/vmx-helper.c @@ -45,7 +45,7 @@ int exit_vmx_usercopy(void) * set and we are preemptible. The hack here is to schedule a * decrementer to fire here and reschedule for us if necessary. */ - if (IS_ENABLED(CONFIG_PREEMPT) && need_resched()) + if (IS_ENABLED(CONFIG_PREEMPTION) && need_resched()) set_dec(1); return 0; } diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 6978344edcb4..be9c4106e22f 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -377,10 +377,7 @@ void __init MMU_init_hw(void) * Find some memory for the hash table. */ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); - Hash = memblock_alloc(Hash_size, Hash_size); - if (!Hash) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, Hash_size, Hash_size); + Hash = memblock_alloc_or_panic(Hash_size, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; pr_info("Total memory = %lldMB; using %ldkB for hash table\n", diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c index 1715b07c630c..4e1e45420bd4 100644 --- a/arch/powerpc/mm/book3s64/mmu_context.c +++ b/arch/powerpc/mm/book3s64/mmu_context.c @@ -253,7 +253,7 @@ static void pmd_frag_destroy(void *pmd_frag) count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 374542528080..ce64abea9e3e 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -330,11 +330,7 @@ void __init mmu_partition_table_init(void) unsigned long ptcr; /* Initialize the Partition Table with no entries */ - partition_tb = memblock_alloc(patb_size, patb_size); - if (!partition_tb) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, patb_size, patb_size); - + partition_tb = memblock_alloc_or_panic(patb_size, patb_size); ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); set_ptcr_when_no_uv(ptcr); powernv_set_nmmu_ptcr(ptcr); @@ -477,7 +473,7 @@ void pmd_fragment_free(unsigned long *pmd) BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0); if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) { - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } } diff --git a/arch/powerpc/mm/kasan/init_book3e_64.c b/arch/powerpc/mm/kasan/init_book3e_64.c index 43c03b84ff32..60c78aac0f63 100644 --- a/arch/powerpc/mm/kasan/init_book3e_64.c +++ b/arch/powerpc/mm/kasan/init_book3e_64.c @@ -40,19 +40,19 @@ static int __init kasan_map_kernel_page(unsigned long ea, unsigned long pa, pgpr pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); if (kasan_pud_table(*p4dp)) { - pudp = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE); + pudp = memblock_alloc_or_panic(PUD_TABLE_SIZE, PUD_TABLE_SIZE); memcpy(pudp, kasan_early_shadow_pud, PUD_TABLE_SIZE); p4d_populate(&init_mm, p4dp, pudp); } pudp = pud_offset(p4dp, ea); if (kasan_pmd_table(*pudp)) { - pmdp = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE); + pmdp = memblock_alloc_or_panic(PMD_TABLE_SIZE, PMD_TABLE_SIZE); memcpy(pmdp, kasan_early_shadow_pmd, PMD_TABLE_SIZE); pud_populate(&init_mm, pudp, pmdp); } pmdp = pmd_offset(pudp, ea); if (kasan_pte_table(*pmdp)) { - ptep = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE); + ptep = memblock_alloc_or_panic(PTE_TABLE_SIZE, PTE_TABLE_SIZE); memcpy(ptep, kasan_early_shadow_pte, PTE_TABLE_SIZE); pmd_populate_kernel(&init_mm, pmdp, ptep); } @@ -74,7 +74,7 @@ static void __init kasan_init_phys_region(void *start, void *end) k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE); k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE); - va = memblock_alloc(k_end - k_start, PAGE_SIZE); + va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE); for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE) kasan_map_kernel_page(k_cur, __pa(va), PAGE_KERNEL); } diff --git a/arch/powerpc/mm/kasan/init_book3s_64.c b/arch/powerpc/mm/kasan/init_book3s_64.c index 3fb5ce4f48f4..7d959544c077 100644 --- a/arch/powerpc/mm/kasan/init_book3s_64.c +++ b/arch/powerpc/mm/kasan/init_book3s_64.c @@ -32,7 +32,7 @@ static void __init kasan_init_phys_region(void *start, void *end) k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE); k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE); - va = memblock_alloc(k_end - k_start, PAGE_SIZE); + va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE); for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE) map_kernel_page(k_cur, __pa(va), PAGE_KERNEL); } diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c index 0b181da40ddb..a1a4e697251a 100644 --- a/arch/powerpc/mm/nohash/mmu_context.c +++ b/arch/powerpc/mm/nohash/mmu_context.c @@ -385,21 +385,11 @@ void __init mmu_context_init(void) /* * Allocate the maps used by context management */ - context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); - if (!context_map) - panic("%s: Failed to allocate %zu bytes\n", __func__, - CTX_MAP_SIZE); - context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), + context_map = memblock_alloc_or_panic(CTX_MAP_SIZE, SMP_CACHE_BYTES); + context_mm = memblock_alloc_or_panic(sizeof(void *) * (LAST_CONTEXT + 1), SMP_CACHE_BYTES); - if (!context_mm) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(void *) * (LAST_CONTEXT + 1)); if (IS_ENABLED(CONFIG_SMP)) { - stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); - if (!stale_map[boot_cpuid]) - panic("%s: Failed to allocate %zu bytes\n", __func__, - CTX_MAP_SIZE); - + stale_map[boot_cpuid] = memblock_alloc_or_panic(CTX_MAP_SIZE, SMP_CACHE_BYTES); cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, "powerpc/mmu/ctx:prepare", mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead); diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c index e89f64a0f24a..713268ccb1a0 100644 --- a/arch/powerpc/mm/pgtable-frag.c +++ b/arch/powerpc/mm/pgtable-frag.c @@ -25,7 +25,7 @@ void pte_frag_destroy(void *pte_frag) count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ if (atomic_sub_and_test(PTE_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } } @@ -111,7 +111,7 @@ static void pte_free_now(struct rcu_head *head) struct ptdesc *ptdesc; ptdesc = container_of(head, struct ptdesc, pt_rcu_head); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 787b22206386..15276068f657 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -50,13 +50,8 @@ notrace void __init early_ioremap_init(void) void __init *early_alloc_pgtable(unsigned long size) { - void *ptr = memblock_alloc(size, size); + return memblock_alloc_or_panic(size, size); - if (!ptr) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, size, size); - - return ptr; } pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 2b79171ee185..f4e03aaabb4c 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -132,7 +132,10 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) static inline void power_pmu_bhrb_enable(struct perf_event *event) {} static inline void power_pmu_bhrb_disable(struct perf_event *event) {} -static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {} +static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) +{ +} static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {} static void pmao_restore_workaround(bool ebb) { } #endif /* CONFIG_PPC32 */ @@ -444,7 +447,8 @@ static void power_pmu_bhrb_disable(struct perf_event *event) /* Called from ctxsw to prevent one process's branch entries to * mingle with the other process's entries during context switch. */ -static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) +static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, + struct task_struct *task, bool sched_in) { if (!ppmu->bhrb_nr) return; diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c index b24d4102fbf6..1dc095ad48fc 100644 --- a/arch/powerpc/platforms/8xx/cpm1.c +++ b/arch/powerpc/platforms/8xx/cpm1.c @@ -45,7 +45,7 @@ #include <sysdev/fsl_soc.h> #ifdef CONFIG_8xx_GPIO -#include <linux/gpio/legacy-of-mm-gpiochip.h> +#include <linux/gpio/driver.h> #endif #define CPM_MAP_SIZE (0x4000) @@ -376,7 +376,8 @@ int __init cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode) #ifdef CONFIG_8xx_GPIO struct cpm1_gpio16_chip { - struct of_mm_gpio_chip mm_gc; + struct gpio_chip gc; + void __iomem *regs; spinlock_t lock; /* shadowed data register to clear/set bits safely */ @@ -386,19 +387,17 @@ struct cpm1_gpio16_chip { int irq[16]; }; -static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc) +static void cpm1_gpio16_save_regs(struct cpm1_gpio16_chip *cpm1_gc) { - struct cpm1_gpio16_chip *cpm1_gc = - container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; cpm1_gc->cpdata = in_be16(&iop->dat); } static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; u16 pin_mask; pin_mask = 1 << (15 - gpio); @@ -406,11 +405,9 @@ static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio) return !!(in_be16(&iop->dat) & pin_mask); } -static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, - int value) +static void __cpm1_gpio16_set(struct cpm1_gpio16_chip *cpm1_gc, u16 pin_mask, int value) { - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; if (value) cpm1_gc->cpdata |= pin_mask; @@ -422,38 +419,35 @@ static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask, static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); unsigned long flags; u16 pin_mask = 1 << (15 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); - __cpm1_gpio16_set(mm_gc, pin_mask, value); + __cpm1_gpio16_set(cpm1_gc, pin_mask, value); spin_unlock_irqrestore(&cpm1_gc->lock, flags); } static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); return cpm1_gc->irq[gpio] ? : -ENXIO; } static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; unsigned long flags; u16 pin_mask = 1 << (15 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); setbits16(&iop->dir, pin_mask); - __cpm1_gpio16_set(mm_gc, pin_mask, val); + __cpm1_gpio16_set(cpm1_gc, pin_mask, val); spin_unlock_irqrestore(&cpm1_gc->lock, flags); @@ -462,9 +456,8 @@ static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int cpm1_gpio16_dir_in(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport16 __iomem *iop = mm_gc->regs; + struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport16 __iomem *iop = cpm1_gc->regs; unsigned long flags; u16 pin_mask = 1 << (15 - gpio); @@ -481,11 +474,10 @@ int cpm1_gpiochip_add16(struct device *dev) { struct device_node *np = dev->of_node; struct cpm1_gpio16_chip *cpm1_gc; - struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; u16 mask; - cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); + cpm1_gc = devm_kzalloc(dev, sizeof(*cpm1_gc), GFP_KERNEL); if (!cpm1_gc) return -ENOMEM; @@ -499,10 +491,8 @@ int cpm1_gpiochip_add16(struct device *dev) cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++); } - mm_gc = &cpm1_gc->mm_gc; - gc = &mm_gc->gc; - - mm_gc->save_regs = cpm1_gpio16_save_regs; + gc = &cpm1_gc->gc; + gc->base = -1; gc->ngpio = 16; gc->direction_input = cpm1_gpio16_dir_in; gc->direction_output = cpm1_gpio16_dir_out; @@ -512,30 +502,39 @@ int cpm1_gpiochip_add16(struct device *dev) gc->parent = dev; gc->owner = THIS_MODULE; - return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); + if (!gc->label) + return -ENOMEM; + + cpm1_gc->regs = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(cpm1_gc->regs)) + return PTR_ERR(cpm1_gc->regs); + + cpm1_gpio16_save_regs(cpm1_gc); + + return devm_gpiochip_add_data(dev, gc, cpm1_gc); } struct cpm1_gpio32_chip { - struct of_mm_gpio_chip mm_gc; + struct gpio_chip gc; + void __iomem *regs; spinlock_t lock; /* shadowed data register to clear/set bits safely */ u32 cpdata; }; -static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) +static void cpm1_gpio32_save_regs(struct cpm1_gpio32_chip *cpm1_gc) { - struct cpm1_gpio32_chip *cpm1_gc = - container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; cpm1_gc->cpdata = in_be32(&iop->dat); } static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; u32 pin_mask; pin_mask = 1 << (31 - gpio); @@ -543,11 +542,9 @@ static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio) return !!(in_be32(&iop->dat) & pin_mask); } -static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, - int value) +static void __cpm1_gpio32_set(struct cpm1_gpio32_chip *cpm1_gc, u32 pin_mask, int value) { - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; if (value) cpm1_gc->cpdata |= pin_mask; @@ -559,30 +556,28 @@ static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); - __cpm1_gpio32_set(mm_gc, pin_mask, value); + __cpm1_gpio32_set(cpm1_gc, pin_mask, value); spin_unlock_irqrestore(&cpm1_gc->lock, flags); } static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm1_gc->lock, flags); setbits32(&iop->dir, pin_mask); - __cpm1_gpio32_set(mm_gc, pin_mask, val); + __cpm1_gpio32_set(cpm1_gc, pin_mask, val); spin_unlock_irqrestore(&cpm1_gc->lock, flags); @@ -591,9 +586,8 @@ static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int cpm1_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm_ioport32b __iomem *iop = mm_gc->regs; + struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(gc); + struct cpm_ioport32b __iomem *iop = cpm1_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); @@ -610,19 +604,16 @@ int cpm1_gpiochip_add32(struct device *dev) { struct device_node *np = dev->of_node; struct cpm1_gpio32_chip *cpm1_gc; - struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; - cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL); + cpm1_gc = devm_kzalloc(dev, sizeof(*cpm1_gc), GFP_KERNEL); if (!cpm1_gc) return -ENOMEM; spin_lock_init(&cpm1_gc->lock); - mm_gc = &cpm1_gc->mm_gc; - gc = &mm_gc->gc; - - mm_gc->save_regs = cpm1_gpio32_save_regs; + gc = &cpm1_gc->gc; + gc->base = -1; gc->ngpio = 32; gc->direction_input = cpm1_gpio32_dir_in; gc->direction_output = cpm1_gpio32_dir_out; @@ -631,7 +622,17 @@ int cpm1_gpiochip_add32(struct device *dev) gc->parent = dev; gc->owner = THIS_MODULE; - return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc); + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); + if (!gc->label) + return -ENOMEM; + + cpm1_gc->regs = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(cpm1_gc->regs)) + return PTR_ERR(cpm1_gc->regs); + + cpm1_gpio32_save_regs(cpm1_gc); + + return devm_gpiochip_add_data(dev, gc, cpm1_gc); } #endif /* CONFIG_8xx_GPIO */ diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index fe2e0249cbc2..a112d26185a0 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -514,10 +514,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } - nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); - if (!nvram_image) - panic("%s: Failed to allocate %u bytes\n", __func__, - NVRAM_SIZE); + nvram_image = memblock_alloc_or_panic(NVRAM_SIZE, SMP_CACHE_BYTES); nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 877720c64515..4ac9808e55a4 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -88,26 +88,6 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, } } -static void memtrace_clear_range(unsigned long start_pfn, - unsigned long nr_pages) -{ - unsigned long pfn; - - /* As HIGHMEM does not apply, use clear_page() directly. */ - for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { - if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) - cond_resched(); - clear_page(__va(PFN_PHYS(pfn))); - } - /* - * Before we go ahead and use this range as cache inhibited range - * flush the cache. - */ - flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), - (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), - FLUSH_CHUNK_SIZE); -} - static u64 memtrace_alloc_node(u32 nid, u64 size) { const unsigned long nr_pages = PHYS_PFN(size); @@ -119,17 +99,18 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) * by alloc_contig_pages(). */ page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | - __GFP_NOWARN, nid, NULL); + __GFP_NOWARN | __GFP_ZERO, nid, NULL); if (!page) return 0; start_pfn = page_to_pfn(page); /* - * Clear the range while we still have a linear mapping. - * - * TODO: use __GFP_ZERO with alloc_contig_pages() once supported. + * Before we go ahead and use this range as cache inhibited range + * flush the cache. */ - memtrace_clear_range(start_pfn, nr_pages); + flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), + (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), + FLUSH_CHUNK_SIZE); /* * Set pages PageOffline(), to indicate that nobody (e.g., hibernation, diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c index c9a9b759cc92..a379ff86c120 100644 --- a/arch/powerpc/platforms/powernv/opal-core.c +++ b/arch/powerpc/platforms/powernv/opal-core.c @@ -149,7 +149,7 @@ static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf, /* end of vector */ bufp[idx++] = cpu_to_be64(AT_NULL); - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_AUXV, + buf = append_elf64_note(buf, NN_AUXV, NT_AUXV, oc_conf->auxv_buf, AUXV_DESC_SZ); return buf; } @@ -252,7 +252,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) * crashing CPU's prstatus. */ first_cpu_note = buf; - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS, + buf = append_elf64_note(buf, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); for (i = 0; i < oc_conf->num_cpus; i++, bufp += size_per_thread) { @@ -279,7 +279,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) fill_prstatus(&prstatus, thread_pir, ®s); if (thread_pir != oc_conf->crashing_cpu) { - buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, + buf = append_elf64_note(buf, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); } else { @@ -287,7 +287,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf) * Add crashing CPU as the first NT_PRSTATUS note for * GDB to process the core file appropriately. */ - append_elf64_note(first_cpu_note, CRASH_CORE_NOTE_NAME, + append_elf64_note(first_cpu_note, NN_PRSTATUS, NT_PRSTATUS, &prstatus, sizeof(prstatus)); } diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 5d0f35bb917e..9ec265fcaff4 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -180,10 +180,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node, /* * Allocate a buffer to hold the MC recoverable ranges. */ - mc_recoverable_range = memblock_alloc(size, __alignof__(u64)); - if (!mc_recoverable_range) - panic("%s: Failed to allocate %u bytes align=0x%lx\n", - __func__, size, __alignof__(u64)); + mc_recoverable_range = memblock_alloc_or_panic(size, __alignof__(u64)); for (i = 0; i < mc_recoverable_range_len; i++) { mc_recoverable_range[i].start_addr = @@ -818,7 +815,7 @@ static int opal_add_one_export(struct kobject *parent, const char *export_name, sysfs_bin_attr_init(attr); attr->attr.name = name; attr->attr.mode = 0400; - attr->read = sysfs_bin_attr_simple_read; + attr->read_new = sysfs_bin_attr_simple_read; attr->private = __va(vals[0]); attr->size = vals[1]; diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 5144f11359f7..150c09b58ae8 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -115,10 +115,7 @@ static void __init prealloc(struct ps3_prealloc *p) if (!p->size) return; - p->address = memblock_alloc(p->size, p->align); - if (!p->address) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, p->size, p->align); + p->address = memblock_alloc_or_panic(p->size, p->align); printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, p->address); diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 1893f66371fa..b12ef382fec7 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -580,8 +580,10 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) switch(rets[0]) { case 0: - result = EEH_STATE_MMIO_ACTIVE | - EEH_STATE_DMA_ACTIVE; + result = EEH_STATE_MMIO_ACTIVE | + EEH_STATE_DMA_ACTIVE | + EEH_STATE_MMIO_ENABLED | + EEH_STATE_DMA_ENABLED; break; case 1: result = EEH_STATE_RESET_ACTIVE | diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 534cd159e9ab..ae6f7a235d8b 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -1650,7 +1650,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn) iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, dynamic_addr, dynamic_len, page_shift, NULL, &iommu_table_lpar_multi_ops); - iommu_init_table(newtbl, pci->phb->node, start, end); + iommu_init_table(newtbl, pci->phb->node, + start >> page_shift, end >> page_shift); pci->table_group->tables[default_win_removed ? 0 : 1] = newtbl; @@ -2065,7 +2066,9 @@ static long spapr_tce_create_table(struct iommu_table_group *table_group, int nu offset, 1UL << window_shift, IOMMU_PAGE_SHIFT_4K, NULL, &iommu_table_lpar_multi_ops); - iommu_init_table(tbl, pci->phb->node, start, end); + iommu_init_table(tbl, pci->phb->node, + start >> IOMMU_PAGE_SHIFT_4K, + end >> IOMMU_PAGE_SHIFT_4K); table_group->tables[0] = tbl; @@ -2136,7 +2139,7 @@ static long spapr_tce_create_table(struct iommu_table_group *table_group, int nu /* New table for using DDW instead of the default DMA window */ iommu_table_setparms_common(tbl, pci->phb->bus->number, create.liobn, win_addr, 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops); - iommu_init_table(tbl, pci->phb->node, start, end); + iommu_init_table(tbl, pci->phb->node, start >> page_shift, end >> page_shift); pci->table_group->tables[num] = tbl; set_iommu_table_base(&pdev->dev, tbl); @@ -2205,6 +2208,9 @@ static long spapr_tce_unset_window(struct iommu_table_group *table_group, int nu const char *win_name; int ret = -ENODEV; + if (!tbl) /* The table was never created OR window was never opened */ + return 0; + mutex_lock(&dma_win_init_mutex); if ((num == 0) && is_default_window_table(table_group, tbl)) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 1798f0f14d58..62bd8e2d5d4c 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -53,7 +53,7 @@ struct update_props_workarea { static unsigned int nmi_wd_lpm_factor = 200; #ifdef CONFIG_SYSCTL -static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = { +static const struct ctl_table nmi_wd_lpm_factor_ctl_table[] = { { .procname = "nmi_wd_lpm_factor", .data = &nmi_wd_lpm_factor, diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c index 1574176e3ffc..c86950d7105a 100644 --- a/arch/powerpc/platforms/pseries/papr-vpd.c +++ b/arch/powerpc/platforms/pseries/papr-vpd.c @@ -482,14 +482,13 @@ static long papr_vpd_create_handle(struct papr_location_code __user *ulc) goto free_blob; } - file = anon_inode_getfile("[papr-vpd]", &papr_vpd_handle_ops, - (void *)blob, O_RDONLY); + file = anon_inode_getfile_fmode("[papr-vpd]", &papr_vpd_handle_ops, + (void *)blob, O_RDONLY, + FMODE_LSEEK | FMODE_PREAD); if (IS_ERR(file)) { err = PTR_ERR(file); goto put_fd; } - - file->f_mode |= FMODE_LSEEK | FMODE_PREAD; fd_install(fd, file); return fd; put_fd: diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index f84ac9fbe203..f7c9271bda58 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -544,7 +544,7 @@ static int drc_pmem_query_health(struct papr_scm_priv *p) /* Jiffies offset for which the health data is assumed to be same */ cache_timeout = p->lasthealth_jiffies + - msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000); + secs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL); /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */ if (time_after(jiffies, cache_timeout)) diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 1aa0cb097c9c..7b9a5ea9cad9 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -75,7 +75,7 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p) srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK; cascade_virq = msi_data->cascade_array[srs]->virq; - seq_printf(p, " fsl-msi-%d", cascade_virq); + seq_printf(p, "fsl-msi-%d", cascade_virq); } diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 0b6e37f3ffb8..456a4f64ae0a 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -124,10 +124,7 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, if (bmp->bitmap_from_slab) bmp->bitmap = kzalloc(size, GFP_KERNEL); else { - bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES); - if (!bmp->bitmap) - panic("%s: Failed to allocate %u bytes\n", __func__, - size); + bmp->bitmap = memblock_alloc_or_panic(size, SMP_CACHE_BYTES); /* the bitmap won't be freed from memblock allocator */ kmemleak_not_leak(bmp->bitmap); } diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index f4e841a36458..1acb53aab252 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -1271,11 +1271,7 @@ static int xmon_batch_next_cpu(void) { unsigned long cpu; - while (!cpumask_empty(&xmon_batch_cpus)) { - cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus, - xmon_batch_start_cpu, true); - if (cpu >= nr_cpu_ids) - break; + for_each_cpu_wrap(cpu, &xmon_batch_cpus, xmon_batch_start_cpu) { if (xmon_batch_start_cpu == -1) xmon_batch_start_cpu = cpu; if (xmon_switch_cpu(cpu)) @@ -2623,9 +2619,9 @@ static void dump_one_paca(int cpu) printf("paca for cpu 0x%x @ %px:\n", cpu, p); - printf(" %-*s = %s\n", 25, "possible", cpu_possible(cpu) ? "yes" : "no"); - printf(" %-*s = %s\n", 25, "present", cpu_present(cpu) ? "yes" : "no"); - printf(" %-*s = %s\n", 25, "online", cpu_online(cpu) ? "yes" : "no"); + printf(" %-*s = %s\n", 25, "possible", str_yes_no(cpu_possible(cpu))); + printf(" %-*s = %s\n", 25, "present", str_yes_no(cpu_present(cpu))); + printf(" %-*s = %s\n", 25, "online", str_yes_no(cpu_online(cpu))); #define DUMP(paca, name, format) \ printf(" %-*s = "format"\t(0x%lx)\n", 25, #name, 18, paca->name, \ |