summaryrefslogtreecommitdiff
path: root/mm/hmm.c
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2018-04-10 16:28:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-11 10:28:30 -0700
commitff05c0c6bbe5043af6a1686522ed845f40ba49ee (patch)
tree9799943dde9991379fc2569182f5ba5cf7accad7 /mm/hmm.c
parent86586a41b8fe655e28be418a40e9bb2bb478cdd5 (diff)
downloadlwn-ff05c0c6bbe5043af6a1686522ed845f40ba49ee.tar.gz
lwn-ff05c0c6bbe5043af6a1686522ed845f40ba49ee.zip
mm/hmm: use uint64_t for HMM pfn instead of defining hmm_pfn_t to ulong
All device driver we care about are using 64bits page table entry. In order to match this and to avoid useless define convert all HMM pfn to directly use uint64_t. It is a first step on the road to allow driver to directly use pfn value return by HMM (saving memory and CPU cycles use for conversion between the two). Link: http://lkml.kernel.org/r/20180323005527.758-9-jglisse@redhat.com Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Cc: Evgeny Baskakov <ebaskakov@nvidia.com> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Mark Hairgrove <mhairgrove@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hmm.c')
-rw-r--r--mm/hmm.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 5da0f852a7aa..b69f30fc064b 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -304,7 +304,7 @@ struct hmm_vma_walk {
static int hmm_vma_do_fault(struct mm_walk *walk,
unsigned long addr,
- hmm_pfn_t *pfn)
+ uint64_t *pfn)
{
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
struct hmm_vma_walk *hmm_vma_walk = walk->private;
@@ -324,7 +324,7 @@ static int hmm_vma_do_fault(struct mm_walk *walk,
return -EAGAIN;
}
-static void hmm_pfns_special(hmm_pfn_t *pfns,
+static void hmm_pfns_special(uint64_t *pfns,
unsigned long addr,
unsigned long end)
{
@@ -338,7 +338,7 @@ static int hmm_pfns_bad(unsigned long addr,
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
- hmm_pfn_t *pfns = range->pfns;
+ uint64_t *pfns = range->pfns;
unsigned long i;
i = (addr - range->start) >> PAGE_SHIFT;
@@ -348,7 +348,7 @@ static int hmm_pfns_bad(unsigned long addr,
return 0;
}
-static void hmm_pfns_clear(hmm_pfn_t *pfns,
+static void hmm_pfns_clear(uint64_t *pfns,
unsigned long addr,
unsigned long end)
{
@@ -362,7 +362,7 @@ static int hmm_vma_walk_hole(unsigned long addr,
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
- hmm_pfn_t *pfns = range->pfns;
+ uint64_t *pfns = range->pfns;
unsigned long i;
hmm_vma_walk->last = addr;
@@ -387,7 +387,7 @@ static int hmm_vma_walk_clear(unsigned long addr,
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
- hmm_pfn_t *pfns = range->pfns;
+ uint64_t *pfns = range->pfns;
unsigned long i;
hmm_vma_walk->last = addr;
@@ -414,7 +414,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma;
- hmm_pfn_t *pfns = range->pfns;
+ uint64_t *pfns = range->pfns;
unsigned long addr = start, i;
bool write_fault;
pte_t *ptep;
@@ -431,7 +431,7 @@ again:
if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) {
unsigned long pfn;
- hmm_pfn_t flag = 0;
+ uint64_t flag = 0;
pmd_t pmd;
/*
@@ -456,7 +456,7 @@ again:
pfn = pmd_pfn(pmd) + pte_index(addr);
flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
- pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag;
+ pfns[i] = hmm_pfn_from_pfn(pfn) | flag;
return 0;
}
@@ -490,7 +490,7 @@ again:
* device and report anything else as error.
*/
if (is_device_private_entry(entry)) {
- pfns[i] = hmm_pfn_t_from_pfn(swp_offset(entry));
+ pfns[i] = hmm_pfn_from_pfn(swp_offset(entry));
if (is_write_device_private_entry(entry)) {
pfns[i] |= HMM_PFN_WRITE;
} else if (write_fault)
@@ -515,7 +515,7 @@ again:
if (write_fault && !pte_write(pte))
goto fault;
- pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte));
+ pfns[i] = hmm_pfn_from_pfn(pte_pfn(pte));
pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
continue;
@@ -678,8 +678,8 @@ EXPORT_SYMBOL(hmm_vma_range_done);
* This is similar to a regular CPU page fault except that it will not trigger
* any memory migration if the memory being faulted is not accessible by CPUs.
*
- * On error, for one virtual address in the range, the function will set the
- * hmm_pfn_t error flag for the corresponding pfn entry.
+ * On error, for one virtual address in the range, the function will mark the
+ * corresponding HMM pfn entry with an error flag.
*
* Expected use pattern:
* retry: