diff options
author | Ross Zwisler <ross.zwisler@linux.intel.com> | 2015-06-25 03:08:39 -0400 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2015-06-26 11:23:38 -0400 |
commit | 61031952f4c89dba1065f7a5b9419badb112554c (patch) | |
tree | 70a8b29fa96b6222bd19bb604d364bce404f14ae /drivers/nvdimm | |
parent | 74ae66c3b14ffa94c8d2dea201cdf8e6203d13d5 (diff) | |
download | lwn-61031952f4c89dba1065f7a5b9419badb112554c.tar.gz lwn-61031952f4c89dba1065f7a5b9419badb112554c.zip |
arch, x86: pmem api for ensuring durability of persistent memory updates
Based on an original patch by Ross Zwisler [1].
Writes to persistent memory have the potential to be posted to cpu
cache, cpu write buffers, and platform write buffers (memory controller)
before being committed to persistent media. Provide apis,
memcpy_to_pmem(), wmb_pmem(), and memremap_pmem(), to write data to
pmem and assert that it is durable in PMEM (a persistent linear address
range). A '__pmem' attribute is added so sparse can track proper usage
of pointers to pmem.
This continues the status quo of pmem being x86 only for 4.2, but
reworks to ioremap, and wider implementation of memremap() will enable
other archs in 4.3.
[1]: https://lists.01.org/pipermail/linux-nvdimm/2015-May/000932.html
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
[djbw: various reworks]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r-- | drivers/nvdimm/pmem.c | 33 |
1 files changed, 20 insertions, 13 deletions
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 42b766f33e59..ade9eb917a4d 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -23,6 +23,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> +#include <linux/pmem.h> #include <linux/nd.h> #include "nd.h" @@ -32,7 +33,7 @@ struct pmem_device { /* One contiguous memory region per device */ phys_addr_t phys_addr; - void *virt_addr; + void __pmem *virt_addr; size_t size; }; @@ -44,13 +45,14 @@ static void pmem_do_bvec(struct pmem_device *pmem, struct page *page, { void *mem = kmap_atomic(page); size_t pmem_off = sector << 9; + void __pmem *pmem_addr = pmem->virt_addr + pmem_off; if (rw == READ) { - memcpy(mem + off, pmem->virt_addr + pmem_off, len); + memcpy_from_pmem(mem + off, pmem_addr, len); flush_dcache_page(page); } else { flush_dcache_page(page); - memcpy(pmem->virt_addr + pmem_off, mem + off, len); + memcpy_to_pmem(pmem_addr, mem + off, len); } kunmap_atomic(mem); @@ -71,6 +73,10 @@ static void pmem_make_request(struct request_queue *q, struct bio *bio) bio_data_dir(bio), iter.bi_sector); if (do_acct) nd_iostat_end(bio, start); + + if (bio_data_dir(bio)) + wmb_pmem(); + bio_endio(bio, 0); } @@ -94,7 +100,8 @@ static long pmem_direct_access(struct block_device *bdev, sector_t sector, if (!pmem) return -ENODEV; - *kaddr = pmem->virt_addr + offset; + /* FIXME convert DAX to comprehend that this mapping has a lifetime */ + *kaddr = (void __force *) pmem->virt_addr + offset; *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT; return pmem->size - offset; @@ -118,6 +125,8 @@ static struct pmem_device *pmem_alloc(struct device *dev, pmem->phys_addr = res->start; pmem->size = resource_size(res); + if (!arch_has_pmem_api()) + dev_warn(dev, "unable to guarantee persistence of writes\n"); if (!request_mem_region(pmem->phys_addr, pmem->size, dev_name(dev))) { dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", @@ -126,11 +135,7 @@ static struct pmem_device *pmem_alloc(struct device *dev, return ERR_PTR(-EBUSY); } - /* - * Map the memory as non-cachable, as we can't write back the contents - * of the CPU caches in case of a crash. - */ - pmem->virt_addr = ioremap_nocache(pmem->phys_addr, pmem->size); + pmem->virt_addr = memremap_pmem(pmem->phys_addr, pmem->size); if (!pmem->virt_addr) { release_mem_region(pmem->phys_addr, pmem->size); kfree(pmem); @@ -195,16 +200,18 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns, } if (rw == READ) - memcpy(buf, pmem->virt_addr + offset, size); - else - memcpy(pmem->virt_addr + offset, buf, size); + memcpy_from_pmem(buf, pmem->virt_addr + offset, size); + else { + memcpy_to_pmem(pmem->virt_addr + offset, buf, size); + wmb_pmem(); + } return 0; } static void pmem_free(struct pmem_device *pmem) { - iounmap(pmem->virt_addr); + memunmap_pmem(pmem->virt_addr); release_mem_region(pmem->phys_addr, pmem->size); kfree(pmem); } |