diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/linux/prefetch.h | |
download | lwn-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz lwn-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/linux/prefetch.h')
-rw-r--r-- | include/linux/prefetch.h | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h new file mode 100644 index 000000000000..fc86f274147f --- /dev/null +++ b/include/linux/prefetch.h @@ -0,0 +1,69 @@ +/* + * Generic cache management functions. Everything is arch-specific, + * but this header exists to make sure the defines/functions can be + * used in a generic way. + * + * 2000-11-13 Arjan van de Ven <arjan@fenrus.demon.nl> + * + */ + +#ifndef _LINUX_PREFETCH_H +#define _LINUX_PREFETCH_H + +#include <linux/types.h> +#include <asm/processor.h> +#include <asm/cache.h> + +/* + prefetch(x) attempts to pre-emptively get the memory pointed to + by address "x" into the CPU L1 cache. + prefetch(x) should not cause any kind of exception, prefetch(0) is + specifically ok. + + prefetch() should be defined by the architecture, if not, the + #define below provides a no-op define. + + There are 3 prefetch() macros: + + prefetch(x) - prefetches the cacheline at "x" for read + prefetchw(x) - prefetches the cacheline at "x" for write + spin_lock_prefetch(x) - prefectches the spinlock *x for taking + + there is also PREFETCH_STRIDE which is the architecure-prefered + "lookahead" size for prefetching streamed operations. + +*/ + +/* + * These cannot be do{}while(0) macros. See the mental gymnastics in + * the loop macro. + */ + +#ifndef ARCH_HAS_PREFETCH +static inline void prefetch(const void *x) {;} +#endif + +#ifndef ARCH_HAS_PREFETCHW +static inline void prefetchw(const void *x) {;} +#endif + +#ifndef ARCH_HAS_SPINLOCK_PREFETCH +#define spin_lock_prefetch(x) prefetchw(x) +#endif + +#ifndef PREFETCH_STRIDE +#define PREFETCH_STRIDE (4*L1_CACHE_BYTES) +#endif + +static inline void prefetch_range(void *addr, size_t len) +{ +#ifdef ARCH_HAS_PREFETCH + char *cp; + char *end = addr + len; + + for (cp = addr; cp < end; cp += PREFETCH_STRIDE) + prefetch(cp); +#endif +} + +#endif |