summaryrefslogtreecommitdiff
path: root/drivers/nvdimm/pmem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-14 19:42:11 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-14 19:42:11 -0700
commitfec88ab0af9706b2201e5daf377c5031c62d11f7 (patch)
tree7206e8a3ff2dea87f912f4660d453a8c118248ac /drivers/nvdimm/pmem.c
parentfa6e951a2a440babd7a7310d0f4713e618061767 (diff)
parentcc5dfd59e375f4d0f2b64643723d16b38b2f2d78 (diff)
downloadlwn-fec88ab0af9706b2201e5daf377c5031c62d11f7.tar.gz
lwn-fec88ab0af9706b2201e5daf377c5031c62d11f7.zip
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull HMM updates from Jason Gunthorpe: "Improvements and bug fixes for the hmm interface in the kernel: - Improve clarity, locking and APIs related to the 'hmm mirror' feature merged last cycle. In linux-next we now see AMDGPU and nouveau to be using this API. - Remove old or transitional hmm APIs. These are hold overs from the past with no users, or APIs that existed only to manage cross tree conflicts. There are still a few more of these cleanups that didn't make the merge window cut off. - Improve some core mm APIs: - export alloc_pages_vma() for driver use - refactor into devm_request_free_mem_region() to manage DEVICE_PRIVATE resource reservations - refactor duplicative driver code into the core dev_pagemap struct - Remove hmm wrappers of improved core mm APIs, instead have drivers use the simplified API directly - Remove DEVICE_PUBLIC - Simplify the kconfig flow for the hmm users and core code" * tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (42 commits) mm: don't select MIGRATE_VMA_HELPER from HMM_MIRROR mm: remove the HMM config option mm: sort out the DEVICE_PRIVATE Kconfig mess mm: simplify ZONE_DEVICE page private data mm: remove hmm_devmem_add mm: remove hmm_vma_alloc_locked_page nouveau: use devm_memremap_pages directly nouveau: use alloc_page_vma directly PCI/P2PDMA: use the dev_pagemap internal refcount device-dax: use the dev_pagemap internal refcount memremap: provide an optional internal refcount in struct dev_pagemap memremap: replace the altmap_valid field with a PGMAP_ALTMAP_VALID flag memremap: remove the data field in struct dev_pagemap memremap: add a migrate_to_ram method to struct dev_pagemap_ops memremap: lift the devmap_enable manipulation into devm_memremap_pages memremap: pass a struct dev_pagemap to ->kill and ->cleanup memremap: move dev_pagemap callbacks into a separate structure memremap: validate the pagemap type passed to devm_memremap_pages mm: factor out a devm_request_free_mem_region helper mm: export alloc_pages_vma ...
Diffstat (limited to 'drivers/nvdimm/pmem.c')
-rw-r--r--drivers/nvdimm/pmem.c51
1 files changed, 19 insertions, 32 deletions
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 24d7fe7c74ed..e7d8cc9f41e8 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -303,24 +303,24 @@ static const struct attribute_group *pmem_attribute_groups[] = {
NULL,
};
-static void __pmem_release_queue(struct percpu_ref *ref)
+static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
{
- struct request_queue *q;
+ struct request_queue *q =
+ container_of(pgmap->ref, struct request_queue, q_usage_counter);
- q = container_of(ref, typeof(*q), q_usage_counter);
blk_cleanup_queue(q);
}
-static void pmem_release_queue(void *ref)
+static void pmem_release_queue(void *pgmap)
{
- __pmem_release_queue(ref);
+ pmem_pagemap_cleanup(pgmap);
}
-static void pmem_freeze_queue(struct percpu_ref *ref)
+static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
{
- struct request_queue *q;
+ struct request_queue *q =
+ container_of(pgmap->ref, struct request_queue, q_usage_counter);
- q = container_of(ref, typeof(*q), q_usage_counter);
blk_freeze_queue_start(q);
}
@@ -334,26 +334,16 @@ static void pmem_release_disk(void *__pmem)
put_disk(pmem->disk);
}
-static void pmem_release_pgmap_ops(void *__pgmap)
-{
- dev_pagemap_put_ops();
-}
-
-static void fsdax_pagefree(struct page *page, void *data)
+static void pmem_pagemap_page_free(struct page *page)
{
wake_up_var(&page->_refcount);
}
-static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap)
-{
- dev_pagemap_get_ops();
- if (devm_add_action_or_reset(dev, pmem_release_pgmap_ops, pgmap))
- return -ENOMEM;
- pgmap->type = MEMORY_DEVICE_FS_DAX;
- pgmap->page_free = fsdax_pagefree;
-
- return 0;
-}
+static const struct dev_pagemap_ops fsdax_pagemap_ops = {
+ .page_free = pmem_pagemap_page_free,
+ .kill = pmem_pagemap_kill,
+ .cleanup = pmem_pagemap_cleanup,
+};
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
@@ -409,11 +399,9 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags = PFN_DEV;
pmem->pgmap.ref = &q->q_usage_counter;
- pmem->pgmap.kill = pmem_freeze_queue;
- pmem->pgmap.cleanup = __pmem_release_queue;
if (is_nd_pfn(dev)) {
- if (setup_pagemap_fsdax(dev, &pmem->pgmap))
- return -ENOMEM;
+ pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -424,15 +412,14 @@ static int pmem_attach_disk(struct device *dev,
bb_res.start += pmem->data_offset;
} else if (pmem_should_map_pages(dev)) {
memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
- pmem->pgmap.altmap_valid = false;
- if (setup_pagemap_fsdax(dev, &pmem->pgmap))
- return -ENOMEM;
+ pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
} else {
if (devm_add_action_or_reset(dev, pmem_release_queue,
- &q->q_usage_counter))
+ &pmem->pgmap))
return -ENOMEM;
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);