summaryrefslogtreecommitdiff
path: root/arch/microblaze
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2023-08-08 10:40:58 +0200
committerMichal Simek <michal.simek@amd.com>2023-08-23 09:35:22 +0200
commit0d2b49479bf91c857d83608da7b64328e556dff7 (patch)
tree344c729e06a74ee0f166215611a5a8c5e393f950 /arch/microblaze
parentb48edb8665fe7b90ff11b23bcc949fee95c035f0 (diff)
downloadlwn-0d2b49479bf91c857d83608da7b64328e556dff7.tar.gz
lwn-0d2b49479bf91c857d83608da7b64328e556dff7.zip
microblaze: Make virt_to_pfn() a static inline
Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. Move the function down in the file so __pa() exists in our scope, and it compiles. This in turn requires moving __pa() as it depends on __virt_to_phys() that was below. (Lazy macro evaluation conflicts with strict function ordering.) Make a symmetric change to pfn_to_virt() so we have type checking both ways. Due to this the <asm/page.h> file being included into some assembly files, some further inclusion guards are needed to make sure assembly keeps compiling. Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Link: https://lore.kernel.org/r/20230808-virt-to-phys-microblaze-v1-1-e6df710fe0a1@linaro.org Signed-off-by: Michal Simek <michal.simek@amd.com>
Diffstat (limited to 'arch/microblaze')
-rw-r--r--arch/microblaze/include/asm/page.h27
1 files changed, 19 insertions, 8 deletions
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 337f23eabc71..86a4ce07c192 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -99,9 +99,6 @@ extern int page_is_ram(unsigned long pfn);
# define phys_to_pfn(phys) (PFN_DOWN(phys))
# define pfn_to_phys(pfn) (PFN_PHYS(pfn))
-# define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
-# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
-
# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
@@ -109,11 +106,6 @@ extern int page_is_ram(unsigned long pfn);
# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
# endif /* __ASSEMBLY__ */
-#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
-
-# define __pa(x) __virt_to_phys((unsigned long)(x))
-# define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
-
/* Convert between virtual and physical address for MMU. */
/* Handle MicroBlaze processor with virtual memory. */
#define __virt_to_phys(addr) \
@@ -125,6 +117,25 @@ extern int page_is_ram(unsigned long pfn);
#define tovirt(rd, rs) \
addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
+#ifndef __ASSEMBLY__
+
+# define __pa(x) __virt_to_phys((unsigned long)(x))
+# define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+
+static inline unsigned long virt_to_pfn(const void *vaddr)
+{
+ return phys_to_pfn(__pa(vaddr));
+}
+
+static inline const void *pfn_to_virt(unsigned long pfn)
+{
+ return __va(pfn_to_phys((pfn)));
+}
+
+#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
+
+#endif /* __ASSEMBLY__ */
+
#define TOPHYS(addr) __virt_to_phys(addr)
#endif /* __KERNEL__ */