summaryrefslogtreecommitdiff
path: root/arch/riscv/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r--arch/riscv/mm/fault.c52
-rw-r--r--arch/riscv/mm/hugetlbpage.c2
-rw-r--r--arch/riscv/mm/init.c29
-rw-r--r--arch/riscv/mm/kasan_init.c14
4 files changed, 84 insertions, 13 deletions
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index a9f2b4af8f3f..0194324a0c50 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -22,6 +22,57 @@
#include "../kernel/head.h"
+static void show_pte(unsigned long addr)
+{
+ pgd_t *pgdp, pgd;
+ p4d_t *p4dp, p4d;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+ struct mm_struct *mm = current->mm;
+
+ if (!mm)
+ mm = &init_mm;
+
+ pr_alert("Current %s pgtable: %luK pagesize, %d-bit VAs, pgdp=0x%016llx\n",
+ current->comm, PAGE_SIZE / SZ_1K, VA_BITS,
+ mm == &init_mm ? (u64)__pa_symbol(mm->pgd) : virt_to_phys(mm->pgd));
+
+ pgdp = pgd_offset(mm, addr);
+ pgd = pgdp_get(pgdp);
+ pr_alert("[%016lx] pgd=%016lx", addr, pgd_val(pgd));
+ if (pgd_none(pgd) || pgd_bad(pgd) || pgd_leaf(pgd))
+ goto out;
+
+ p4dp = p4d_offset(pgdp, addr);
+ p4d = p4dp_get(p4dp);
+ pr_cont(", p4d=%016lx", p4d_val(p4d));
+ if (p4d_none(p4d) || p4d_bad(p4d) || p4d_leaf(p4d))
+ goto out;
+
+ pudp = pud_offset(p4dp, addr);
+ pud = pudp_get(pudp);
+ pr_cont(", pud=%016lx", pud_val(pud));
+ if (pud_none(pud) || pud_bad(pud) || pud_leaf(pud))
+ goto out;
+
+ pmdp = pmd_offset(pudp, addr);
+ pmd = pmdp_get(pmdp);
+ pr_cont(", pmd=%016lx", pmd_val(pmd));
+ if (pmd_none(pmd) || pmd_bad(pmd) || pmd_leaf(pmd))
+ goto out;
+
+ ptep = pte_offset_map(pmdp, addr);
+ if (!ptep)
+ goto out;
+
+ pte = ptep_get(ptep);
+ pr_cont(", pte=%016lx", pte_val(pte));
+ pte_unmap(ptep);
+out:
+ pr_cont("\n");
+}
+
static void die_kernel_fault(const char *msg, unsigned long addr,
struct pt_regs *regs)
{
@@ -31,6 +82,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
addr);
bust_spinlocks(0);
+ show_pte(addr);
die(regs, "Oops");
make_task_dead(SIGKILL);
}
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index 42314f093922..b4a78a4b35cf 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
pte_t orig_pte = ptep_get(ptep);
int pte_num;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index fc53ce748c80..15b2eda4c364 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -33,6 +33,7 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/soc.h>
+#include <asm/sparsemem.h>
#include <asm/tlbflush.h>
#include "../kernel/head.h"
@@ -62,6 +63,13 @@ EXPORT_SYMBOL(pgtable_l5_enabled);
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#define VMEMMAP_ADDR_ALIGN (1ULL << SECTION_SIZE_BITS)
+
+unsigned long vmemmap_start_pfn __ro_after_init;
+EXPORT_SYMBOL(vmemmap_start_pfn);
+#endif
+
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
@@ -240,8 +248,12 @@ static void __init setup_bootmem(void)
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
*/
- if (!IS_ENABLED(CONFIG_XIP_KERNEL))
+ if (!IS_ENABLED(CONFIG_XIP_KERNEL)) {
phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
+#endif
+ }
/*
* In 64-bit, any use of __va/__pa before this point is wrong as we
@@ -256,8 +268,12 @@ static void __init setup_bootmem(void)
*/
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
- memblock_cap_memory_range(phys_ram_base,
- max_mapped_addr - phys_ram_base);
+ if (memblock_end_of_DRAM() > max_mapped_addr) {
+ memblock_cap_memory_range(phys_ram_base,
+ max_mapped_addr - phys_ram_base);
+ pr_warn("Physical memory overflows the linear mapping size: region above %pa removed",
+ &max_mapped_addr);
+ }
}
/*
@@ -1101,6 +1117,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
phys_ram_base = CONFIG_PHYS_RAM_BASE;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT;
+#endif
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
@@ -1558,7 +1577,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
return;
}
- pagetable_pte_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
@@ -1580,7 +1599,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemm
}
if (!is_vmemmap)
- pagetable_pmd_dtor(ptdesc);
+ pagetable_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index c301c8d291d2..41c635d6aca4 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -32,7 +32,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
pte_t *ptep, *p;
if (pmd_none(pmdp_get(pmd))) {
- p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -54,7 +54,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
unsigned long next;
if (pud_none(pudp_get(pud))) {
- p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -85,7 +85,7 @@ static void __init kasan_populate_pud(p4d_t *p4d,
unsigned long next;
if (p4d_none(p4dp_get(p4d))) {
- p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -116,7 +116,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
unsigned long next;
if (pgd_none(pgdp_get(pgd))) {
- p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
+ p = memblock_alloc_or_panic(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -385,7 +385,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d,
next = pud_addr_end(vaddr, end);
if (pud_none(pudp_get(pud_k))) {
- p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}
@@ -405,7 +405,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
next = p4d_addr_end(vaddr, end);
if (p4d_none(p4dp_get(p4d_k))) {
- p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}
@@ -424,7 +424,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
next = pgd_addr_end(vaddr, end);
if (pgd_none(pgdp_get(pgd_k))) {
- p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}