summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/openrisc/kernel/dma.c22
-rw-r--r--arch/powerpc/mm/book3s64/subpage_prot.c10
-rw-r--r--arch/s390/mm/gmap.c33
3 files changed, 32 insertions, 33 deletions
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index c7812e6effa2..4d5b8bd1d795 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -44,6 +44,10 @@ page_set_nocache(pte_t *pte, unsigned long addr,
return 0;
}
+static const struct mm_walk_ops set_nocache_walk_ops = {
+ .pte_entry = page_set_nocache,
+};
+
static int
page_clear_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
@@ -59,6 +63,10 @@ page_clear_nocache(pte_t *pte, unsigned long addr,
return 0;
}
+static const struct mm_walk_ops clear_nocache_walk_ops = {
+ .pte_entry = page_clear_nocache,
+};
+
/*
* Alloc "coherent" memory, which for OpenRISC means simply uncached.
*
@@ -81,10 +89,6 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
{
unsigned long va;
void *page;
- struct mm_walk walk = {
- .pte_entry = page_set_nocache,
- .mm = &init_mm
- };
page = alloc_pages_exact(size, gfp | __GFP_ZERO);
if (!page)
@@ -99,7 +103,8 @@ arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
* We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit.
*/
- if (walk_page_range(va, va + size, &walk)) {
+ if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
+ NULL)) {
free_pages_exact(page, size);
return NULL;
}
@@ -112,13 +117,10 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long va = (unsigned long)vaddr;
- struct mm_walk walk = {
- .pte_entry = page_clear_nocache,
- .mm = &init_mm
- };
/* walk_page_range shouldn't be able to fail here */
- WARN_ON(walk_page_range(va, va + size, &walk));
+ WARN_ON(walk_page_range(&init_mm, va, va + size,
+ &clear_nocache_walk_ops, NULL));
free_pages_exact(vaddr, size);
}
diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c
index 236f0a861ecc..2ef24a53f4c9 100644
--- a/arch/powerpc/mm/book3s64/subpage_prot.c
+++ b/arch/powerpc/mm/book3s64/subpage_prot.c
@@ -139,14 +139,14 @@ static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
return 0;
}
+static const struct mm_walk_ops subpage_walk_ops = {
+ .pmd_entry = subpage_walk_pmd_entry,
+};
+
static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
struct vm_area_struct *vma;
- struct mm_walk subpage_proto_walk = {
- .mm = mm,
- .pmd_entry = subpage_walk_pmd_entry,
- };
/*
* We don't try too hard, we just mark all the vma in that range
@@ -163,7 +163,7 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
if (vma->vm_start >= (addr + len))
break;
vma->vm_flags |= VM_NOHUGEPAGE;
- walk_page_vma(vma, &subpage_proto_walk);
+ walk_page_vma(vma, &subpage_walk_ops, NULL);
vma = vma->vm_next;
}
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index cf80feae970d..bd78d504fdad 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2521,13 +2521,9 @@ static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
return 0;
}
-static inline void zap_zero_pages(struct mm_struct *mm)
-{
- struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
-
- walk.mm = mm;
- walk_page_range(0, TASK_SIZE, &walk);
-}
+static const struct mm_walk_ops zap_zero_walk_ops = {
+ .pmd_entry = __zap_zero_pages,
+};
/*
* switch on pgstes for its userspace process (for kvm)
@@ -2546,7 +2542,7 @@ int s390_enable_sie(void)
mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
- zap_zero_pages(mm);
+ walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
up_write(&mm->mmap_sem);
return 0;
}
@@ -2589,12 +2585,13 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
return 0;
}
+static const struct mm_walk_ops enable_skey_walk_ops = {
+ .hugetlb_entry = __s390_enable_skey_hugetlb,
+ .pte_entry = __s390_enable_skey_pte,
+};
+
int s390_enable_skey(void)
{
- struct mm_walk walk = {
- .hugetlb_entry = __s390_enable_skey_hugetlb,
- .pte_entry = __s390_enable_skey_pte,
- };
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc = 0;
@@ -2614,8 +2611,7 @@ int s390_enable_skey(void)
}
mm->def_flags &= ~VM_MERGEABLE;
- walk.mm = mm;
- walk_page_range(0, TASK_SIZE, &walk);
+ walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
out_up:
up_write(&mm->mmap_sem);
@@ -2633,13 +2629,14 @@ static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
return 0;
}
+static const struct mm_walk_ops reset_cmma_walk_ops = {
+ .pte_entry = __s390_reset_cmma,
+};
+
void s390_reset_cmma(struct mm_struct *mm)
{
- struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
-
down_write(&mm->mmap_sem);
- walk.mm = mm;
- walk_page_range(0, TASK_SIZE, &walk);
+ walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
up_write(&mm->mmap_sem);
}
EXPORT_SYMBOL_GPL(s390_reset_cmma);