diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-20 10:01:38 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-20 10:01:38 -0700 |
commit | a1c28b75a95808161cacbb3531c418abe248994e (patch) | |
tree | d481754b548989457710cd07bdd66df3dedd54f9 /arch/arm/mm | |
parent | a05a70db34ba24ca009e1c9cedaef26fd17d5470 (diff) | |
parent | 5632a9fbcd451892332d45553ce8b831d5143691 (diff) | |
download | lwn-a1c28b75a95808161cacbb3531c418abe248994e.tar.gz lwn-a1c28b75a95808161cacbb3531c418abe248994e.zip |
Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King:
"Changes included in this pull request:
- revert pxa2xx-flash back to using ioremap_cached() and switch
memremap() to use arch_memremap_wb()
- remove pci=firmware command line argument handling
- remove unnecessary arm_dma_set_mask() implementation, the generic
implementation will do for ARM
- removal of the ARM kallsyms "hack" to work around mode switching
veneers and vectors located below PAGE_OFFSET
- tidy up build system output a little
- add L2 cache power management DT bindings
- remove duplicated local_irq_disable() in reboot paths
- handle AMBA primecell devices better at registration time with PM
domains (needed for Samsung SoCs)
- ARM specific preparation to support Keystone II kexec"
* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: 8567/1: cache-uniphier: activate ways for secondary CPUs
ARM: 8570/2: Documentation: devicetree: Add PL310 PM bindings
ARM: 8569/1: pl2x0: Add OF control of cache power management
ARM: 8568/1: reboot: remove duplicated local_irq_disable()
ARM: 8566/1: drivers: amba: properly handle devices with power domains
ARM: provide arm_has_idmap_alias() helper
ARM: kexec: remove 512MB restriction on kexec crashdump
ARM: provide improved virt_to_idmap() functionality
ARM: kexec: fix crashkernel= handling
ARM: 8557/1: specify install, zinstall, and uinstall as PHONY targets
ARM: 8562/1: suppress "include/generated/mach-types.h is up to date."
ARM: 8553/1: kallsyms: remove --page-offset command line option
ARM: 8552/1: kallsyms: remove special lower address limit for CONFIG_ARM
ARM: 8555/1: kallsyms: ignore ARM mode switching veneers
ARM: 8548/1: dma-mapping: remove arm_dma_set_mask()
ARM: 8554/1: kernel: pci: remove pci=firmware command line parameter handling
ARM: memremap: implement arch_memremap_wb()
memremap: add arch specific hook for MEMREMAP_WB mappings
mtd: pxa2xx-flash: switch back from memremap to ioremap_cached
ARM: reintroduce ioremap_cached() for creating cached I/O mappings
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 26 | ||||
-rw-r--r-- | arch/arm/mm/cache-uniphier.c | 26 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 16 | ||||
-rw-r--r-- | arch/arm/mm/idmap.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 16 | ||||
-rw-r--r-- | arch/arm/mm/nommu.c | 9 |
6 files changed, 69 insertions, 26 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9f9d54271aad..c61996c256cc 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -647,11 +647,6 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock) aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); } - /* r3p0 or later has power control register */ - if (rev >= L310_CACHE_ID_RTL_R3P0) - l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN | - L310_STNDBY_MODE_EN; - /* * Always enable non-secure access to the lockdown registers - * we write to them as part of the L2C enable sequence so they @@ -1141,6 +1136,7 @@ static void __init l2c310_of_parse(const struct device_node *np, u32 filter[2] = { 0, 0 }; u32 assoc; u32 prefetch; + u32 power; u32 val; int ret; @@ -1271,6 +1267,26 @@ static void __init l2c310_of_parse(const struct device_node *np, } l2x0_saved_regs.prefetch_ctrl = prefetch; + + power = l2x0_saved_regs.pwr_ctrl | + L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN; + + ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val); + if (!ret) { + if (!val) + power &= ~L310_DYNAMIC_CLK_GATING_EN; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n"); + } + ret = of_property_read_u32(np, "arm,standby-mode", &val); + if (!ret) { + if (!val) + power &= ~L310_STNDBY_MODE_EN; + } else if (ret != -EINVAL) { + pr_err("L2C-310 OF standby-mode property value is missing or invalid\n"); + } + + l2x0_saved_regs.pwr_ctrl = power; } static const struct l2c_init_data of_l2c310_data __initconst = { diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c index a6fa7b73fbe0..c8e2f4947223 100644 --- a/arch/arm/mm/cache-uniphier.c +++ b/arch/arm/mm/cache-uniphier.c @@ -96,6 +96,7 @@ struct uniphier_cache_data { void __iomem *ctrl_base; void __iomem *rev_base; void __iomem *op_base; + void __iomem *way_ctrl_base; u32 way_present_mask; u32 way_locked_mask; u32 nsets; @@ -256,10 +257,13 @@ static void __init __uniphier_cache_set_locked_ways( struct uniphier_cache_data *data, u32 way_mask) { + unsigned int cpu; + data->way_locked_mask = way_mask & data->way_present_mask; - writel_relaxed(~data->way_locked_mask & data->way_present_mask, - data->ctrl_base + UNIPHIER_SSCLPDAWCR); + for_each_possible_cpu(cpu) + writel_relaxed(~data->way_locked_mask & data->way_present_mask, + data->way_ctrl_base + 4 * cpu); } static void uniphier_cache_maint_range(unsigned long start, unsigned long end, @@ -459,6 +463,8 @@ static int __init __uniphier_cache_init(struct device_node *np, goto err; } + data->way_ctrl_base = data->ctrl_base + 0xc00; + if (*cache_level == 2) { u32 revision = readl(data->rev_base + UNIPHIER_SSCID); /* @@ -467,6 +473,22 @@ static int __init __uniphier_cache_init(struct device_node *np, */ if (revision <= 0x16) data->range_op_max_size = (u32)1 << 22; + + /* + * Unfortunatly, the offset address of active way control base + * varies from SoC to SoC. + */ + switch (revision) { + case 0x11: /* sLD3 */ + data->way_ctrl_base = data->ctrl_base + 0x870; + break; + case 0x12: /* LD4 */ + case 0x16: /* sld8 */ + data->way_ctrl_base = data->ctrl_base + 0x840; + break; + default: + break; + } } data->range_op_max_size -= data->line_size; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 5c2ca062c3fa..ff7ed5697d3e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -190,7 +190,6 @@ struct dma_map_ops arm_dma_ops = { .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, - .set_dma_mask = arm_dma_set_mask, }; EXPORT_SYMBOL(arm_dma_ops); @@ -209,7 +208,6 @@ struct dma_map_ops arm_coherent_dma_ops = { .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, - .set_dma_mask = arm_dma_set_mask, }; EXPORT_SYMBOL(arm_coherent_dma_ops); @@ -1143,16 +1141,6 @@ int dma_supported(struct device *dev, u64 mask) } EXPORT_SYMBOL(dma_supported); -int arm_dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} - #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) @@ -2006,8 +1994,6 @@ struct dma_map_ops iommu_ops = { .unmap_sg = arm_iommu_unmap_sg, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_device = arm_iommu_sync_sg_for_device, - - .set_dma_mask = arm_dma_set_mask, }; struct dma_map_ops iommu_coherent_ops = { @@ -2021,8 +2007,6 @@ struct dma_map_ops iommu_coherent_ops = { .map_sg = arm_coherent_iommu_map_sg, .unmap_sg = arm_coherent_iommu_unmap_sg, - - .set_dma_mask = arm_dma_set_mask, }; /** diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index bd274a05b8ff..c1a48f88764e 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -15,7 +15,7 @@ * page tables. */ pgd_t *idmap_pgd; -unsigned long (*arch_virt_to_idmap)(unsigned long x); +long long arch_phys_to_idmap_offset; #ifdef CONFIG_ARM_LPAE static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 66a978d05958..ff0eed23ddf1 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -297,9 +297,10 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, } /* - * Don't allow RAM to be mapped - this causes problems with ARMv6+ + * Don't allow RAM to be mapped with mismatched attributes - this + * causes problems with ARMv6+ */ - if (WARN_ON(pfn_valid(pfn))) + if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW)) return NULL; area = get_vm_area_caller(size, VM_IOREMAP, caller); @@ -380,11 +381,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size) EXPORT_SYMBOL(ioremap); void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) + __alias(ioremap_cached); + +void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) { return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_cache); +EXPORT_SYMBOL(ioremap_cached); void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) { @@ -414,6 +419,13 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) __builtin_return_address(0)); } +void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) +{ + return (__force void *)arch_ioremap_caller(phys_addr, size, + MT_MEMORY_RW, + __builtin_return_address(0)); +} + void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index d5805e4bf2fc..2740967727e2 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -368,11 +368,15 @@ void __iomem *ioremap(resource_size_t res_cookie, size_t size) EXPORT_SYMBOL(ioremap); void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) + __alias(ioremap_cached); + +void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) { return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_cache); +EXPORT_SYMBOL(ioremap_cached); void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) { @@ -381,6 +385,11 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) } EXPORT_SYMBOL(ioremap_wc); +void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) +{ + return (void *)phys_addr; +} + void __iounmap(volatile void __iomem *addr) { } |