diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 11:16:28 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 11:16:28 +0200 |
commit | 2ec1df4130c60d1eb49dc0fa0ed15858fede6b05 (patch) | |
tree | 97e578ba1546770eadbe84cff2dc44256f97b9d7 /arch/i386/kernel | |
parent | ee580dc91efd83e6b55955e7261e8ad2a0e08d1a (diff) | |
download | lwn-2ec1df4130c60d1eb49dc0fa0ed15858fede6b05.tar.gz lwn-2ec1df4130c60d1eb49dc0fa0ed15858fede6b05.zip |
i386: move kernel/cpu/mtrr
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/cpu/Makefile | 2 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/Makefile | 3 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/amd.c | 121 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/centaur.c | 224 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/cyrix.c | 380 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/generic.c | 509 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/if.c | 439 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/main.c | 768 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/mtrr.h | 98 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/mtrr/state.c | 79 |
10 files changed, 1 insertions, 2622 deletions
diff --git a/arch/i386/kernel/cpu/Makefile b/arch/i386/kernel/cpu/Makefile index 8d9ce0232ada..6687f6d5ad2f 100644 --- a/arch/i386/kernel/cpu/Makefile +++ b/arch/i386/kernel/cpu/Makefile @@ -14,7 +14,7 @@ obj-y += umc.o obj-$(CONFIG_X86_MCE) += ../../../x86/kernel/cpu/mcheck/ -obj-$(CONFIG_MTRR) += mtrr/ +obj-$(CONFIG_MTRR) += ../../../x86/kernel/cpu/mtrr/ obj-$(CONFIG_CPU_FREQ) += ../../../x86/kernel/cpu/cpufreq/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o diff --git a/arch/i386/kernel/cpu/mtrr/Makefile b/arch/i386/kernel/cpu/mtrr/Makefile deleted file mode 100644 index 191fc0533649..000000000000 --- a/arch/i386/kernel/cpu/mtrr/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -obj-y := main.o if.o generic.o state.o -obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o - diff --git a/arch/i386/kernel/cpu/mtrr/amd.c b/arch/i386/kernel/cpu/mtrr/amd.c deleted file mode 100644 index 0949cdbf848a..000000000000 --- a/arch/i386/kernel/cpu/mtrr/amd.c +++ /dev/null @@ -1,121 +0,0 @@ -#include <linux/init.h> -#include <linux/mm.h> -#include <asm/mtrr.h> -#include <asm/msr.h> - -#include "mtrr.h" - -static void -amd_get_mtrr(unsigned int reg, unsigned long *base, - unsigned long *size, mtrr_type * type) -{ - unsigned long low, high; - - rdmsr(MSR_K6_UWCCR, low, high); - /* Upper dword is region 1, lower is region 0 */ - if (reg == 1) - low = high; - /* The base masks off on the right alignment */ - *base = (low & 0xFFFE0000) >> PAGE_SHIFT; - *type = 0; - if (low & 1) - *type = MTRR_TYPE_UNCACHABLE; - if (low & 2) - *type = MTRR_TYPE_WRCOMB; - if (!(low & 3)) { - *size = 0; - return; - } - /* - * This needs a little explaining. The size is stored as an - * inverted mask of bits of 128K granularity 15 bits long offset - * 2 bits - * - * So to get a size we do invert the mask and add 1 to the lowest - * mask bit (4 as its 2 bits in). This gives us a size we then shift - * to turn into 128K blocks - * - * eg 111 1111 1111 1100 is 512K - * - * invert 000 0000 0000 0011 - * +1 000 0000 0000 0100 - * *128K ... - */ - low = (~low) & 0x1FFFC; - *size = (low + 4) << (15 - PAGE_SHIFT); - return; -} - -static void amd_set_mtrr(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type) -/* [SUMMARY] Set variable MTRR register on the local CPU. - <reg> The register to set. - <base> The base address of the region. - <size> The size of the region. If this is 0 the region is disabled. - <type> The type of the region. - <do_safe> If TRUE, do the change safely. If FALSE, safety measures should - be done externally. - [RETURNS] Nothing. -*/ -{ - u32 regs[2]; - - /* - * Low is MTRR0 , High MTRR 1 - */ - rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); - /* - * Blank to disable - */ - if (size == 0) - regs[reg] = 0; - else - /* Set the register to the base, the type (off by one) and an - inverted bitmask of the size The size is the only odd - bit. We are fed say 512K We invert this and we get 111 1111 - 1111 1011 but if you subtract one and invert you get the - desired 111 1111 1111 1100 mask - - But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */ - regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) - | (base << PAGE_SHIFT) | (type + 1); - - /* - * The writeback rule is quite specific. See the manual. Its - * disable local interrupts, write back the cache, set the mtrr - */ - wbinvd(); - wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); -} - -static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) -{ - /* Apply the K6 block alignment and size rules - In order - o Uncached or gathering only - o 128K or bigger block - o Power of 2 block - o base suitably aligned to the power - */ - if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) - || (size & ~(size - 1)) - size || (base & (size - 1))) - return -EINVAL; - return 0; -} - -static struct mtrr_ops amd_mtrr_ops = { - .vendor = X86_VENDOR_AMD, - .set = amd_set_mtrr, - .get = amd_get_mtrr, - .get_free_region = generic_get_free_region, - .validate_add_page = amd_validate_add_page, - .have_wrcomb = positive_have_wrcomb, -}; - -int __init amd_init_mtrr(void) -{ - set_mtrr_ops(&amd_mtrr_ops); - return 0; -} - -//arch_initcall(amd_mtrr_init); diff --git a/arch/i386/kernel/cpu/mtrr/centaur.c b/arch/i386/kernel/cpu/mtrr/centaur.c deleted file mode 100644 index cb9aa3a7a7ab..000000000000 --- a/arch/i386/kernel/cpu/mtrr/centaur.c +++ /dev/null @@ -1,224 +0,0 @@ -#include <linux/init.h> -#include <linux/mm.h> -#include <asm/mtrr.h> -#include <asm/msr.h> -#include "mtrr.h" - -static struct { - unsigned long high; - unsigned long low; -} centaur_mcr[8]; - -static u8 centaur_mcr_reserved; -static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ - -/* - * Report boot time MCR setups - */ - -static int -centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) -/* [SUMMARY] Get a free MTRR. - <base> The starting (base) address of the region. - <size> The size (in bytes) of the region. - [RETURNS] The index of the region on success, else -1 on error. -*/ -{ - int i, max; - mtrr_type ltype; - unsigned long lbase, lsize; - - max = num_var_ranges; - if (replace_reg >= 0 && replace_reg < max) - return replace_reg; - for (i = 0; i < max; ++i) { - if (centaur_mcr_reserved & (1 << i)) - continue; - mtrr_if->get(i, &lbase, &lsize, <ype); - if (lsize == 0) - return i; - } - return -ENOSPC; -} - -void -mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) -{ - centaur_mcr[mcr].low = lo; - centaur_mcr[mcr].high = hi; -} - -static void -centaur_get_mcr(unsigned int reg, unsigned long *base, - unsigned long *size, mtrr_type * type) -{ - *base = centaur_mcr[reg].high >> PAGE_SHIFT; - *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; - *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */ - if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) - *type = MTRR_TYPE_UNCACHABLE; - if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) - *type = MTRR_TYPE_WRBACK; - if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) - *type = MTRR_TYPE_WRBACK; - -} - -static void centaur_set_mcr(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type) -{ - unsigned long low, high; - - if (size == 0) { - /* Disable */ - high = low = 0; - } else { - high = base << PAGE_SHIFT; - if (centaur_mcr_type == 0) - low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */ - else { - if (type == MTRR_TYPE_UNCACHABLE) - low = -size << PAGE_SHIFT | 0x02; /* NC */ - else - low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */ - } - } - centaur_mcr[reg].high = high; - centaur_mcr[reg].low = low; - wrmsr(MSR_IDT_MCR0 + reg, low, high); -} - -#if 0 -/* - * Initialise the later (saner) Winchip MCR variant. In this version - * the BIOS can pass us the registers it has used (but not their values) - * and the control register is read/write - */ - -static void __init -centaur_mcr1_init(void) -{ - unsigned i; - u32 lo, hi; - - /* Unfortunately, MCR's are read-only, so there is no way to - * find out what the bios might have done. - */ - - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */ - lo &= ~0x1C0; /* clear key */ - lo |= 0x040; /* set key to 1 */ - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */ - } - - centaur_mcr_type = 1; - - /* - * Clear any unconfigured MCR's. - */ - - for (i = 0; i < 8; ++i) { - if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) { - if (!(lo & (1 << (9 + i)))) - wrmsr(MSR_IDT_MCR0 + i, 0, 0); - else - /* - * If the BIOS set up an MCR we cannot see it - * but we don't wish to obliterate it - */ - centaur_mcr_reserved |= (1 << i); - } - } - /* - * Throw the main write-combining switch... - * However if OOSTORE is enabled then people have already done far - * cleverer things and we should behave. - */ - - lo |= 15; /* Write combine enables */ - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); -} - -/* - * Initialise the original winchip with read only MCR registers - * no used bitmask for the BIOS to pass on and write only control - */ - -static void __init -centaur_mcr0_init(void) -{ - unsigned i; - - /* Unfortunately, MCR's are read-only, so there is no way to - * find out what the bios might have done. - */ - - /* Clear any unconfigured MCR's. - * This way we are sure that the centaur_mcr array contains the actual - * values. The disadvantage is that any BIOS tweaks are thus undone. - * - */ - for (i = 0; i < 8; ++i) { - if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) - wrmsr(MSR_IDT_MCR0 + i, 0, 0); - } - - wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */ -} - -/* - * Initialise Winchip series MCR registers - */ - -static void __init -centaur_mcr_init(void) -{ - struct set_mtrr_context ctxt; - - set_mtrr_prepare_save(&ctxt); - set_mtrr_cache_disable(&ctxt); - - if (boot_cpu_data.x86_model == 4) - centaur_mcr0_init(); - else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9) - centaur_mcr1_init(); - - set_mtrr_done(&ctxt); -} -#endif - -static int centaur_validate_add_page(unsigned long base, - unsigned long size, unsigned int type) -{ - /* - * FIXME: Winchip2 supports uncached - */ - if (type != MTRR_TYPE_WRCOMB && - (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { - printk(KERN_WARNING - "mtrr: only write-combining%s supported\n", - centaur_mcr_type ? " and uncacheable are" - : " is"); - return -EINVAL; - } - return 0; -} - -static struct mtrr_ops centaur_mtrr_ops = { - .vendor = X86_VENDOR_CENTAUR, -// .init = centaur_mcr_init, - .set = centaur_set_mcr, - .get = centaur_get_mcr, - .get_free_region = centaur_get_free_region, - .validate_add_page = centaur_validate_add_page, - .have_wrcomb = positive_have_wrcomb, -}; - -int __init centaur_init_mtrr(void) -{ - set_mtrr_ops(¢aur_mtrr_ops); - return 0; -} - -//arch_initcall(centaur_init_mtrr); diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c deleted file mode 100644 index 2287d4863a8a..000000000000 --- a/arch/i386/kernel/cpu/mtrr/cyrix.c +++ /dev/null @@ -1,380 +0,0 @@ -#include <linux/init.h> -#include <linux/mm.h> -#include <asm/mtrr.h> -#include <asm/msr.h> -#include <asm/io.h> -#include <asm/processor-cyrix.h> -#include "mtrr.h" - -int arr3_protected; - -static void -cyrix_get_arr(unsigned int reg, unsigned long *base, - unsigned long *size, mtrr_type * type) -{ - unsigned long flags; - unsigned char arr, ccr3, rcr, shift; - - arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ - - /* Save flags and disable interrupts */ - local_irq_save(flags); - - ccr3 = getCx86(CX86_CCR3); - setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ - ((unsigned char *) base)[3] = getCx86(arr); - ((unsigned char *) base)[2] = getCx86(arr + 1); - ((unsigned char *) base)[1] = getCx86(arr + 2); - rcr = getCx86(CX86_RCR_BASE + reg); - setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ - - /* Enable interrupts if it was enabled previously */ - local_irq_restore(flags); - shift = ((unsigned char *) base)[1] & 0x0f; - *base >>= PAGE_SHIFT; - - /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 - * Note: shift==0xf means 4G, this is unsupported. - */ - if (shift) - *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1); - else - *size = 0; - - /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */ - if (reg < 7) { - switch (rcr) { - case 1: - *type = MTRR_TYPE_UNCACHABLE; - break; - case 8: - *type = MTRR_TYPE_WRBACK; - break; - case 9: - *type = MTRR_TYPE_WRCOMB; - break; - case 24: - default: - *type = MTRR_TYPE_WRTHROUGH; - break; - } - } else { - switch (rcr) { - case 0: - *type = MTRR_TYPE_UNCACHABLE; - break; - case 8: - *type = MTRR_TYPE_WRCOMB; - break; - case 9: - *type = MTRR_TYPE_WRBACK; - break; - case 25: - default: - *type = MTRR_TYPE_WRTHROUGH; - break; - } - } -} - -static int -cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) -/* [SUMMARY] Get a free ARR. - <base> The starting (base) address of the region. - <size> The size (in bytes) of the region. - [RETURNS] The index of the region on success, else -1 on error. -*/ -{ - int i; - mtrr_type ltype; - unsigned long lbase, lsize; - - switch (replace_reg) { - case 7: - if (size < 0x40) - break; - case 6: - case 5: - case 4: - return replace_reg; - case 3: - if (arr3_protected) - break; - case 2: - case 1: - case 0: - return replace_reg; - } - /* If we are to set up a region >32M then look at ARR7 immediately */ - if (size > 0x2000) { - cyrix_get_arr(7, &lbase, &lsize, <ype); - if (lsize == 0) - return 7; - /* Else try ARR0-ARR6 first */ - } else { - for (i = 0; i < 7; i++) { - cyrix_get_arr(i, &lbase, &lsize, <ype); - if ((i == 3) && arr3_protected) - continue; - if (lsize == 0) - return i; - } - /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */ - cyrix_get_arr(i, &lbase, &lsize, <ype); - if ((lsize == 0) && (size >= 0x40)) - return i; - } - return -ENOSPC; -} - -static u32 cr4 = 0; -static u32 ccr3; - -static void prepare_set(void) -{ - u32 cr0; - - /* Save value of CR4 and clear Page Global Enable (bit 7) */ - if ( cpu_has_pge ) { - cr4 = read_cr4(); - write_cr4(cr4 & ~X86_CR4_PGE); - } - - /* Disable and flush caches. Note that wbinvd flushes the TLBs as - a side-effect */ - cr0 = read_cr0() | 0x40000000; - wbinvd(); - write_cr0(cr0); - wbinvd(); - - /* Cyrix ARRs - everything else were excluded at the top */ - ccr3 = getCx86(CX86_CCR3); - - /* Cyrix ARRs - everything else were excluded at the top */ - setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); - -} - -static void post_set(void) -{ - /* Flush caches and TLBs */ - wbinvd(); - - /* Cyrix ARRs - everything else was excluded at the top */ - setCx86(CX86_CCR3, ccr3); - - /* Enable caches */ - write_cr0(read_cr0() & 0xbfffffff); - - /* Restore value of CR4 */ - if ( cpu_has_pge ) - write_cr4(cr4); -} - -static void cyrix_set_arr(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type) -{ - unsigned char arr, arr_type, arr_size; - - arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ - - /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */ - if (reg >= 7) - size >>= 6; - - size &= 0x7fff; /* make sure arr_size <= 14 */ - for (arr_size = 0; size; arr_size++, size >>= 1) ; - - if (reg < 7) { - switch (type) { - case MTRR_TYPE_UNCACHABLE: - arr_type = 1; - break; - case MTRR_TYPE_WRCOMB: - arr_type = 9; - break; - case MTRR_TYPE_WRTHROUGH: - arr_type = 24; - break; - default: - arr_type = 8; - break; - } - } else { - switch (type) { - case MTRR_TYPE_UNCACHABLE: - arr_type = 0; - break; - case MTRR_TYPE_WRCOMB: - arr_type = 8; - break; - case MTRR_TYPE_WRTHROUGH: - arr_type = 25; - break; - default: - arr_type = 9; - break; - } - } - - prepare_set(); - - base <<= PAGE_SHIFT; - setCx86(arr, ((unsigned char *) &base)[3]); - setCx86(arr + 1, ((unsigned char *) &base)[2]); - setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size); - setCx86(CX86_RCR_BASE + reg, arr_type); - - post_set(); -} - -typedef struct { - unsigned long base; - unsigned long size; - mtrr_type type; -} arr_state_t; - -static arr_state_t arr_state[8] = { - {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, - {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL} -}; - -static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 }; - -static void cyrix_set_all(void) -{ - int i; - - prepare_set(); - - /* the CCRs are not contiguous */ - for (i = 0; i < 4; i++) - setCx86(CX86_CCR0 + i, ccr_state[i]); - for (; i < 7; i++) - setCx86(CX86_CCR4 + i, ccr_state[i]); - for (i = 0; i < 8; i++) - cyrix_set_arr(i, arr_state[i].base, - arr_state[i].size, arr_state[i].type); - - post_set(); -} - -#if 0 -/* - * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection - * with the SMM (System Management Mode) mode. So we need the following: - * Check whether SMI_LOCK (CCR3 bit 0) is set - * if it is set, write a warning message: ARR3 cannot be changed! - * (it cannot be changed until the next processor reset) - * if it is reset, then we can change it, set all the needed bits: - * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset) - * - disable access to SMM memory (CCR1 bit 2 reset) - * - disable SMM mode (CCR1 bit 1 reset) - * - disable write protection of ARR3 (CCR6 bit 1 reset) - * - (maybe) disable ARR3 - * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set) - */ -static void __init -cyrix_arr_init(void) -{ - struct set_mtrr_context ctxt; - unsigned char ccr[7]; - int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 }; -#ifdef CONFIG_SMP - int i; -#endif - - /* flush cache and enable MAPEN */ - set_mtrr_prepare_save(&ctxt); - set_mtrr_cache_disable(&ctxt); - - /* Save all CCRs locally */ - ccr[0] = getCx86(CX86_CCR0); - ccr[1] = getCx86(CX86_CCR1); - ccr[2] = getCx86(CX86_CCR2); - ccr[3] = ctxt.ccr3; - ccr[4] = getCx86(CX86_CCR4); - ccr[5] = getCx86(CX86_CCR5); - ccr[6] = getCx86(CX86_CCR6); - - if (ccr[3] & 1) { - ccrc[3] = 1; - arr3_protected = 1; - } else { - /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and - * access to SMM memory through ARR3 (bit 7). - */ - if (ccr[1] & 0x80) { - ccr[1] &= 0x7f; - ccrc[1] |= 0x80; - } - if (ccr[1] & 0x04) { - ccr[1] &= 0xfb; - ccrc[1] |= 0x04; - } - if (ccr[1] & 0x02) { - ccr[1] &= 0xfd; - ccrc[1] |= 0x02; - } - arr3_protected = 0; - if (ccr[6] & 0x02) { - ccr[6] &= 0xfd; - ccrc[6] = 1; /* Disable write protection of ARR3 */ - setCx86(CX86_CCR6, ccr[6]); - } - /* Disable ARR3. This is safe now that we disabled SMM. */ - /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */ - } - /* If we changed CCR1 in memory, change it in the processor, too. */ - if (ccrc[1]) - setCx86(CX86_CCR1, ccr[1]); - - /* Enable ARR usage by the processor */ - if (!(ccr[5] & 0x20)) { - ccr[5] |= 0x20; - ccrc[5] = 1; - setCx86(CX86_CCR5, ccr[5]); - } -#ifdef CONFIG_SMP - for (i = 0; i < 7; i++) - ccr_state[i] = ccr[i]; - for (i = 0; i < 8; i++) - cyrix_get_arr(i, - &arr_state[i].base, &arr_state[i].size, - &arr_state[i].type); -#endif - - set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */ - - if (ccrc[5]) - printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n"); - if (ccrc[3]) - printk(KERN_INFO "mtrr: ARR3 cannot be changed\n"); -/* - if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n"); - if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n"); - if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n"); -*/ - if (ccrc[6]) - printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n"); -} -#endif - -static struct mtrr_ops cyrix_mtrr_ops = { - .vendor = X86_VENDOR_CYRIX, -// .init = cyrix_arr_init, - .set_all = cyrix_set_all, - .set = cyrix_set_arr, - .get = cyrix_get_arr, - .get_free_region = cyrix_get_free_region, - .validate_add_page = generic_validate_add_page, - .have_wrcomb = positive_have_wrcomb, -}; - -int __init cyrix_init_mtrr(void) -{ - set_mtrr_ops(&cyrix_mtrr_ops); - return 0; -} - -//arch_initcall(cyrix_init_mtrr); diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c deleted file mode 100644 index 56f64e34829f..000000000000 --- a/arch/i386/kernel/cpu/mtrr/generic.c +++ /dev/null @@ -1,509 +0,0 @@ -/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong - because MTRRs can span upto 40 bits (36bits on most modern x86) */ -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <asm/io.h> -#include <asm/mtrr.h> -#include <asm/msr.h> -#include <asm/system.h> -#include <asm/cpufeature.h> -#include <asm/tlbflush.h> -#include "mtrr.h" - -struct mtrr_state { - struct mtrr_var_range *var_ranges; - mtrr_type fixed_ranges[NUM_FIXED_RANGES]; - unsigned char enabled; - unsigned char have_fixed; - mtrr_type def_type; -}; - -struct fixed_range_block { - int base_msr; /* start address of an MTRR block */ - int ranges; /* number of MTRRs in this block */ -}; - -static struct fixed_range_block fixed_range_blocks[] = { - { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */ - { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */ - { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */ - {} -}; - -static unsigned long smp_changes_mask; -static struct mtrr_state mtrr_state = {}; - -#undef MODULE_PARAM_PREFIX -#define MODULE_PARAM_PREFIX "mtrr." - -static int mtrr_show; -module_param_named(show, mtrr_show, bool, 0); - -/* Get the MSR pair relating to a var range */ -static void -get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) -{ - rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); - rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); -} - -static void -get_fixed_ranges(mtrr_type * frs) -{ - unsigned int *p = (unsigned int *) frs; - int i; - - rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); - - for (i = 0; i < 2; i++) - rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); - for (i = 0; i < 8; i++) - rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); -} - -void mtrr_save_fixed_ranges(void *info) -{ - if (cpu_has_mtrr) - get_fixed_ranges(mtrr_state.fixed_ranges); -} - -static void print_fixed(unsigned base, unsigned step, const mtrr_type*types) -{ - unsigned i; - - for (i = 0; i < 8; ++i, ++types, base += step) - printk(KERN_INFO "MTRR %05X-%05X %s\n", - base, base + step - 1, mtrr_attrib_to_str(*types)); -} - -/* Grab all of the MTRR state for this CPU into *state */ -void __init get_mtrr_state(void) -{ - unsigned int i; - struct mtrr_var_range *vrs; - unsigned lo, dummy; - - if (!mtrr_state.var_ranges) { - mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range), - GFP_KERNEL); - if (!mtrr_state.var_ranges) - return; - } - vrs = mtrr_state.var_ranges; - - rdmsr(MTRRcap_MSR, lo, dummy); - mtrr_state.have_fixed = (lo >> 8) & 1; - - for (i = 0; i < num_var_ranges; i++) - get_mtrr_var_range(i, &vrs[i]); - if (mtrr_state.have_fixed) - get_fixed_ranges(mtrr_state.fixed_ranges); - - rdmsr(MTRRdefType_MSR, lo, dummy); - mtrr_state.def_type = (lo & 0xff); - mtrr_state.enabled = (lo & 0xc00) >> 10; - - if (mtrr_show) { - int high_width; - - printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type)); - if (mtrr_state.have_fixed) { - printk(KERN_INFO "MTRR fixed ranges %sabled:\n", - mtrr_state.enabled & 1 ? "en" : "dis"); - print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); - for (i = 0; i < 2; ++i) - print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); - for (i = 0; i < 8; ++i) - print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); - } - printk(KERN_INFO "MTRR variable ranges %sabled:\n", - mtrr_state.enabled & 2 ? "en" : "dis"); - high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; - for (i = 0; i < num_var_ranges; ++i) { - if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) - printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n", - i, - high_width, - mtrr_state.var_ranges[i].base_hi, - mtrr_state.var_ranges[i].base_lo >> 12, - high_width, - mtrr_state.var_ranges[i].mask_hi, - mtrr_state.var_ranges[i].mask_lo >> 12, - mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); - else - printk(KERN_INFO "MTRR %u disabled\n", i); - } - } -} - -/* Some BIOS's are fucked and don't set all MTRRs the same! */ -void __init mtrr_state_warn(void) -{ - unsigned long mask = smp_changes_mask; - - if (!mask) - return; - if (mask & MTRR_CHANGE_MASK_FIXED) - printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); - if (mask & MTRR_CHANGE_MASK_VARIABLE) - printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); - if (mask & MTRR_CHANGE_MASK_DEFTYPE) - printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); - printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); - printk(KERN_INFO "mtrr: corrected configuration.\n"); -} - -/* Doesn't attempt to pass an error out to MTRR users - because it's quite complicated in some cases and probably not - worth it because the best error handling is to ignore it. */ -void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) -{ - if (wrmsr_safe(msr, a, b) < 0) - printk(KERN_ERR - "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", - smp_processor_id(), msr, a, b); -} - -/** - * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs - * see AMD publication no. 24593, chapter 3.2.1 for more information - */ -static inline void k8_enable_fixed_iorrs(void) -{ - unsigned lo, hi; - - rdmsr(MSR_K8_SYSCFG, lo, hi); - mtrr_wrmsr(MSR_K8_SYSCFG, lo - | K8_MTRRFIXRANGE_DRAM_ENABLE - | K8_MTRRFIXRANGE_DRAM_MODIFY, hi); -} - -/** - * Checks and updates an fixed-range MTRR if it differs from the value it - * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also. - * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information - * \param msr MSR address of the MTTR which should be checked and updated - * \param changed pointer which indicates whether the MTRR needed to be changed - * \param msrwords pointer to the MSR values which the MSR should have - */ -static void set_fixed_range(int msr, int * changed, unsigned int * msrwords) -{ - unsigned lo, hi; - - rdmsr(msr, lo, hi); - - if (lo != msrwords[0] || hi != msrwords[1]) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && - boot_cpu_data.x86 == 15 && - ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK)) - k8_enable_fixed_iorrs(); - mtrr_wrmsr(msr, msrwords[0], msrwords[1]); - *changed = TRUE; - } -} - -int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) -/* [SUMMARY] Get a free MTRR. - <base> The starting (base) address of the region. - <size> The size (in bytes) of the region. - [RETURNS] The index of the region on success, else -1 on error. -*/ -{ - int i, max; - mtrr_type ltype; - unsigned long lbase, lsize; - - max = num_var_ranges; - if (replace_reg >= 0 && replace_reg < max) - return replace_reg; - for (i = 0; i < max; ++i) { - mtrr_if->get(i, &lbase, &lsize, <ype); - if (lsize == 0) - return i; - } - return -ENOSPC; -} - -static void generic_get_mtrr(unsigned int reg, unsigned long *base, - unsigned long *size, mtrr_type *type) -{ - unsigned int mask_lo, mask_hi, base_lo, base_hi; - - rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); - if ((mask_lo & 0x800) == 0) { - /* Invalid (i.e. free) range */ - *base = 0; - *size = 0; - *type = 0; - return; - } - - rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); - - /* Work out the shifted address mask. */ - mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) - | mask_lo >> PAGE_SHIFT; - - /* This works correctly if size is a power of two, i.e. a - contiguous range. */ - *size = -mask_lo; - *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; - *type = base_lo & 0xff; -} - -/** - * Checks and updates the fixed-range MTRRs if they differ from the saved set - * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges() - */ -static int set_fixed_ranges(mtrr_type * frs) -{ - unsigned long long *saved = (unsigned long long *) frs; - int changed = FALSE; - int block=-1, range; - - while (fixed_range_blocks[++block].ranges) - for (range=0; range < fixed_range_blocks[block].ranges; range++) - set_fixed_range(fixed_range_blocks[block].base_msr + range, - &changed, (unsigned int *) saved++); - - return changed; -} - -/* Set the MSR pair relating to a var range. Returns TRUE if - changes are made */ -static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) -{ - unsigned int lo, hi; - int changed = FALSE; - - rdmsr(MTRRphysBase_MSR(index), lo, hi); - if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) - || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != - (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { - mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); - changed = TRUE; - } - - rdmsr(MTRRphysMask_MSR(index), lo, hi); - - if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) - || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != - (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { - mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); - changed = TRUE; - } - return changed; -} - -static u32 deftype_lo, deftype_hi; - -static unsigned long set_mtrr_state(void) -/* [SUMMARY] Set the MTRR state for this CPU. - <state> The MTRR state information to read. - <ctxt> Some relevant CPU context. - [NOTE] The CPU must already be in a safe state for MTRR changes. - [RETURNS] 0 if no changes made, else a mask indication what was changed. -*/ -{ - unsigned int i; - unsigned long change_mask = 0; - - for (i = 0; i < num_var_ranges; i++) - if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) - change_mask |= MTRR_CHANGE_MASK_VARIABLE; - - if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) - change_mask |= MTRR_CHANGE_MASK_FIXED; - - /* Set_mtrr_restore restores the old value of MTRRdefType, - so to set it we fiddle with the saved value */ - if ((deftype_lo & 0xff) != mtrr_state.def_type - || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { - deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); - change_mask |= MTRR_CHANGE_MASK_DEFTYPE; - } - - return change_mask; -} - - -static unsigned long cr4 = 0; -static DEFINE_SPINLOCK(set_atomicity_lock); - -/* - * Since we are disabling the cache don't allow any interrupts - they - * would run extremely slow and would only increase the pain. The caller must - * ensure that local interrupts are disabled and are reenabled after post_set() - * has been called. - */ - -static void prepare_set(void) __acquires(set_atomicity_lock) -{ - unsigned long cr0; - - /* Note that this is not ideal, since the cache is only flushed/disabled - for this CPU while the MTRRs are changed, but changing this requires - more invasive changes to the way the kernel boots */ - - spin_lock(&set_atomicity_lock); - - /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ - cr0 = read_cr0() | 0x40000000; /* set CD flag */ - write_cr0(cr0); - wbinvd(); - - /* Save value of CR4 and clear Page Global Enable (bit 7) */ - if ( cpu_has_pge ) { - cr4 = read_cr4(); - write_cr4(cr4 & ~X86_CR4_PGE); - } - - /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ - __flush_tlb(); - - /* Save MTRR state */ - rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); - - /* Disable MTRRs, and set the default type to uncached */ - mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi); -} - -static void post_set(void) __releases(set_atomicity_lock) -{ - /* Flush TLBs (no need to flush caches - they are disabled) */ - __flush_tlb(); - - /* Intel (P6) standard MTRRs */ - mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); - - /* Enable caches */ - write_cr0(read_cr0() & 0xbfffffff); - - /* Restore value of CR4 */ - if ( cpu_has_pge ) - write_cr4(cr4); - spin_unlock(&set_atomicity_lock); -} - -static void generic_set_all(void) -{ - unsigned long mask, count; - unsigned long flags; - - local_irq_save(flags); - prepare_set(); - - /* Actually set the state */ - mask = set_mtrr_state(); - - post_set(); - local_irq_restore(flags); - - /* Use the atomic bitops to update the global mask */ - for (count = 0; count < sizeof mask * 8; ++count) { - if (mask & 0x01) - set_bit(count, &smp_changes_mask); - mask >>= 1; - } - -} - -static void generic_set_mtrr(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type) -/* [SUMMARY] Set variable MTRR register on the local CPU. - <reg> The register to set. - <base> The base address of the region. - <size> The size of the region. If this is 0 the region is disabled. - <type> The type of the region. - <do_safe> If TRUE, do the change safely. If FALSE, safety measures should - be done externally. - [RETURNS] Nothing. -*/ -{ - unsigned long flags; - struct mtrr_var_range *vr; - - vr = &mtrr_state.var_ranges[reg]; - - local_irq_save(flags); - prepare_set(); - - if (size == 0) { - /* The invalid bit is kept in the mask, so we simply clear the - relevant mask register to disable a range. */ - mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); - memset(vr, 0, sizeof(struct mtrr_var_range)); - } else { - vr->base_lo = base << PAGE_SHIFT | type; - vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); - vr->mask_lo = -size << PAGE_SHIFT | 0x800; - vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); - - mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); - mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); - } - - post_set(); - local_irq_restore(flags); -} - -int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) -{ - unsigned long lbase, last; - - /* For Intel PPro stepping <= 7, must be 4 MiB aligned - and not touch 0x70000000->0x7003FFFF */ - if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && - boot_cpu_data.x86_model == 1 && - boot_cpu_data.x86_mask <= 7) { - if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { - printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); - return -EINVAL; - } - if (!(base + size < 0x70000 || base > 0x7003F) && - (type == MTRR_TYPE_WRCOMB - || type == MTRR_TYPE_WRBACK)) { - printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); - return -EINVAL; - } - } - - /* Check upper bits of base and last are equal and lower bits are 0 - for base and 1 for last */ - last = base + size - 1; - for (lbase = base; !(lbase & 1) && (last & 1); - lbase = lbase >> 1, last = last >> 1) ; - if (lbase != last) { - printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", - base, size); - return -EINVAL; - } - return 0; -} - - -static int generic_have_wrcomb(void) -{ - unsigned long config, dummy; - rdmsr(MTRRcap_MSR, config, dummy); - return (config & (1 << 10)); -} - -int positive_have_wrcomb(void) -{ - return 1; -} - -/* generic structure... - */ -struct mtrr_ops generic_mtrr_ops = { - .use_intel_if = 1, - .set_all = generic_set_all, - .get = generic_get_mtrr, - .get_free_region = generic_get_free_region, - .set = generic_set_mtrr, - .validate_add_page = generic_validate_add_page, - .have_wrcomb = generic_have_wrcomb, -}; diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c deleted file mode 100644 index c7d8f1756745..000000000000 --- a/arch/i386/kernel/cpu/mtrr/if.c +++ /dev/null @@ -1,439 +0,0 @@ -#include <linux/init.h> -#include <linux/proc_fs.h> -#include <linux/capability.h> -#include <linux/ctype.h> -#include <linux/module.h> -#include <linux/seq_file.h> -#include <asm/uaccess.h> - -#define LINE_SIZE 80 - -#include <asm/mtrr.h> -#include "mtrr.h" - -/* RED-PEN: this is accessed without any locking */ -extern unsigned int *usage_table; - - -#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) - -static const char *const mtrr_strings[MTRR_NUM_TYPES] = -{ - "uncachable", /* 0 */ - "write-combining", /* 1 */ - "?", /* 2 */ - "?", /* 3 */ - "write-through", /* 4 */ - "write-protect", /* 5 */ - "write-back", /* 6 */ -}; - -const char *mtrr_attrib_to_str(int x) -{ - return (x <= 6) ? mtrr_strings[x] : "?"; -} - -#ifdef CONFIG_PROC_FS - -static int -mtrr_file_add(unsigned long base, unsigned long size, - unsigned int type, char increment, struct file *file, int page) -{ - int reg, max; - unsigned int *fcount = FILE_FCOUNT(file); - - max = num_var_ranges; - if (fcount == NULL) { - fcount = kzalloc(max * sizeof *fcount, GFP_KERNEL); - if (!fcount) - return -ENOMEM; - FILE_FCOUNT(file) = fcount; - } - if (!page) { - if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) - return -EINVAL; - base >>= PAGE_SHIFT; - size >>= PAGE_SHIFT; - } - reg = mtrr_add_page(base, size, type, 1); - if (reg >= 0) - ++fcount[reg]; - return reg; -} - -static int -mtrr_file_del(unsigned long base, unsigned long size, - struct file *file, int page) -{ - int reg; - unsigned int *fcount = FILE_FCOUNT(file); - - if (!page) { - if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) - return -EINVAL; - base >>= PAGE_SHIFT; - size >>= PAGE_SHIFT; - } - reg = mtrr_del_page(-1, base, size); - if (reg < 0) - return reg; - if (fcount == NULL) - return reg; - if (fcount[reg] < 1) - return -EINVAL; - --fcount[reg]; - return reg; -} - -/* RED-PEN: seq_file can seek now. this is ignored. */ -static ssize_t -mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) -/* Format of control line: - "base=%Lx size=%Lx type=%s" OR: - "disable=%d" -*/ -{ - int i, err; - unsigned long reg; - unsigned long long base, size; - char *ptr; - char line[LINE_SIZE]; - size_t linelen; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (!len) - return -EINVAL; - memset(line, 0, LINE_SIZE); - if (len > LINE_SIZE) - len = LINE_SIZE; - if (copy_from_user(line, buf, len - 1)) - return -EFAULT; - linelen = strlen(line); - ptr = line + linelen - 1; - if (linelen && *ptr == '\n') - *ptr = '\0'; - if (!strncmp(line, "disable=", 8)) { - reg = simple_strtoul(line + 8, &ptr, 0); - err = mtrr_del_page(reg, 0, 0); - if (err < 0) - return err; - return len; - } - if (strncmp(line, "base=", 5)) - return -EINVAL; - base = simple_strtoull(line + 5, &ptr, 0); - for (; isspace(*ptr); ++ptr) ; - if (strncmp(ptr, "size=", 5)) - return -EINVAL; - size = simple_strtoull(ptr + 5, &ptr, 0); - if ((base & 0xfff) || (size & 0xfff)) - return -EINVAL; - for (; isspace(*ptr); ++ptr) ; - if (strncmp(ptr, "type=", 5)) - return -EINVAL; - ptr += 5; - for (; isspace(*ptr); ++ptr) ; - for (i = 0; i < MTRR_NUM_TYPES; ++i) { - if (strcmp(ptr, mtrr_strings[i])) - continue; - base >>= PAGE_SHIFT; - size >>= PAGE_SHIFT; - err = - mtrr_add_page((unsigned long) base, (unsigned long) size, i, - 1); - if (err < 0) - return err; - return len; - } - return -EINVAL; -} - -static long -mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg) -{ - int err = 0; - mtrr_type type; - unsigned long size; - struct mtrr_sentry sentry; - struct mtrr_gentry gentry; - void __user *arg = (void __user *) __arg; - - switch (cmd) { - case MTRRIOC_ADD_ENTRY: - case MTRRIOC_SET_ENTRY: - case MTRRIOC_DEL_ENTRY: - case MTRRIOC_KILL_ENTRY: - case MTRRIOC_ADD_PAGE_ENTRY: - case MTRRIOC_SET_PAGE_ENTRY: - case MTRRIOC_DEL_PAGE_ENTRY: - case MTRRIOC_KILL_PAGE_ENTRY: - if (copy_from_user(&sentry, arg, sizeof sentry)) - return -EFAULT; - break; - case MTRRIOC_GET_ENTRY: - case MTRRIOC_GET_PAGE_ENTRY: - if (copy_from_user(&gentry, arg, sizeof gentry)) - return -EFAULT; - break; -#ifdef CONFIG_COMPAT - case MTRRIOC32_ADD_ENTRY: - case MTRRIOC32_SET_ENTRY: - case MTRRIOC32_DEL_ENTRY: - case MTRRIOC32_KILL_ENTRY: - case MTRRIOC32_ADD_PAGE_ENTRY: - case MTRRIOC32_SET_PAGE_ENTRY: - case MTRRIOC32_DEL_PAGE_ENTRY: - case MTRRIOC32_KILL_PAGE_ENTRY: { - struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg; - err = get_user(sentry.base, &s32->base); - err |= get_user(sentry.size, &s32->size); - err |= get_user(sentry.type, &s32->type); - if (err) - return err; - break; - } - case MTRRIOC32_GET_ENTRY: - case MTRRIOC32_GET_PAGE_ENTRY: { - struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; - err = get_user(gentry.regnum, &g32->regnum); - err |= get_user(gentry.base, &g32->base); - err |= get_user(gentry.size, &g32->size); - err |= get_user(gentry.type, &g32->type); - if (err) - return err; - break; - } -#endif - } - - switch (cmd) { - default: - return -ENOTTY; - case MTRRIOC_ADD_ENTRY: -#ifdef CONFIG_COMPAT - case MTRRIOC32_ADD_ENTRY: -#endif - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - err = - mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, - file, 0); - break; - case MTRRIOC_SET_ENTRY: -#ifdef CONFIG_COMPAT - case MTRRIOC32_SET_ENTRY: -#endif - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - err = mtrr_add(sentry.base, sentry.size, sentry.type, 0); - break; - case MTRRIOC_DEL_ENTRY: -#ifdef CONFIG_COMPAT - case MTRRIOC32_DEL_ENTRY: -#endif - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - err = mtrr_file_del(sentry.base, sentry.size, file, 0); - break; - case MTRRIOC_KILL_ENTRY: -#ifdef CONFIG_COMPAT - case MTRRIOC32_KILL_ENTRY: -#endif - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - err = mtrr_del(-1, sentry.base, sentry.size); - break; - case MTRRIOC_GET_ENTRY: -#ifdef CONFIG_COMPAT - case MTRRIOC32_GET_ENTRY: -#endif - if (gentry.regnum >= num_var_ranges) - return -EINVAL; - mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); - - /* Hide entries that go above 4GB */ - if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)) - || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))) - gentry.base = gentry.size = gentry.type = 0; - else { - gentry.base <<= PAGE_SHIFT; - gentry.size = size << PAGE_SHIFT; - gentry.type = type; - } - - break; - case MTRRIOC_ADD_PAGE_ENTRY: -#ifdef CONFIG_COMPAT - case MTRRIOC32_ADD_PAGE_ENTRY: -#endif - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - err = - mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, - file, 1); - break; - case MTRRIOC_SET_PAGE_ENTRY: -#ifdef CONFIG_COMPAT - case MTRRIOC32_SET_PAGE_ENTRY: -#endif - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0); - break; - case MTRRIOC_DEL_PAGE_ENTRY: -#ifdef CONFIG_COMPAT - case MTRRIOC32_DEL_PAGE_ENTRY: -#endif - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - err = mtrr_file_del(sentry.base, sentry.size, file, 1); - break; - case MTRRIOC_KILL_PAGE_ENTRY: -#ifdef CONFIG_COMPAT - case MTRRIOC32_KILL_PAGE_ENTRY: -#endif - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - err = mtrr_del_page(-1, sentry.base, sentry.size); - break; - case MTRRIOC_GET_PAGE_ENTRY: -#ifdef CONFIG_COMPAT - case MTRRIOC32_GET_PAGE_ENTRY: -#endif - if (gentry.regnum >= num_var_ranges) - return -EINVAL; - mtrr_if->get(gentry.regnum, &gentry.base, &size, &type); - /* Hide entries that would overflow */ - if (size != (__typeof__(gentry.size))size) - gentry.base = gentry.size = gentry.type = 0; - else { - gentry.size = size; - gentry.type = type; - } - break; - } - - if (err) - return err; - - switch(cmd) { - case MTRRIOC_GET_ENTRY: - case MTRRIOC_GET_PAGE_ENTRY: - if (copy_to_user(arg, &gentry, sizeof gentry)) - err = -EFAULT; - break; -#ifdef CONFIG_COMPAT - case MTRRIOC32_GET_ENTRY: - case MTRRIOC32_GET_PAGE_ENTRY: { - struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; - err = put_user(gentry.base, &g32->base); - err |= put_user(gentry.size, &g32->size); - err |= put_user(gentry.regnum, &g32->regnum); - err |= put_user(gentry.type, &g32->type); - break; - } -#endif - } - return err; -} - -static int -mtrr_close(struct inode *ino, struct file *file) -{ - int i, max; - unsigned int *fcount = FILE_FCOUNT(file); - - if (fcount != NULL) { - max = num_var_ranges; - for (i = 0; i < max; ++i) { - while (fcount[i] > 0) { - mtrr_del(i, 0, 0); - --fcount[i]; - } - } - kfree(fcount); - FILE_FCOUNT(file) = NULL; - } - return single_release(ino, file); -} - -static int mtrr_seq_show(struct seq_file *seq, void *offset); - -static int mtrr_open(struct inode *inode, struct file *file) -{ - if (!mtrr_if) - return -EIO; - if (!mtrr_if->get) - return -ENXIO; - return single_open(file, mtrr_seq_show, NULL); -} - -static const struct file_operations mtrr_fops = { - .owner = THIS_MODULE, - .open = mtrr_open, - .read = seq_read, - .llseek = seq_lseek, - .write = mtrr_write, - .unlocked_ioctl = mtrr_ioctl, - .compat_ioctl = mtrr_ioctl, - .release = mtrr_close, -}; - - -static struct proc_dir_entry *proc_root_mtrr; - - -static int mtrr_seq_show(struct seq_file *seq, void *offset) -{ - char factor; - int i, max, len; - mtrr_type type; - unsigned long base, size; - - len = 0; - max = num_var_ranges; - for (i = 0; i < max; i++) { - mtrr_if->get(i, &base, &size, &type); - if (size == 0) - usage_table[i] = 0; - else { - if (size < (0x100000 >> PAGE_SHIFT)) { - /* less than 1MB */ - factor = 'K'; - size <<= PAGE_SHIFT - 10; - } else { - factor = 'M'; - size >>= 20 - PAGE_SHIFT; - } - /* RED-PEN: base can be > 32bit */ - len += seq_printf(seq, - "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n", - i, base, base >> (20 - PAGE_SHIFT), size, factor, - mtrr_attrib_to_str(type), usage_table[i]); - } - } - return 0; -} - -static int __init mtrr_if_init(void) -{ - struct cpuinfo_x86 *c = &boot_cpu_data; - - if ((!cpu_has(c, X86_FEATURE_MTRR)) && - (!cpu_has(c, X86_FEATURE_K6_MTRR)) && - (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) && - (!cpu_has(c, X86_FEATURE_CENTAUR_MCR))) - return -ENODEV; - - proc_root_mtrr = - create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root); - if (proc_root_mtrr) { - proc_root_mtrr->owner = THIS_MODULE; - proc_root_mtrr->proc_fops = &mtrr_fops; - } - return 0; -} - -arch_initcall(mtrr_if_init); -#endif /* CONFIG_PROC_FS */ diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c deleted file mode 100644 index c48b6fea5ab4..000000000000 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ /dev/null @@ -1,768 +0,0 @@ -/* Generic MTRR (Memory Type Range Register) driver. - - Copyright (C) 1997-2000 Richard Gooch - Copyright (c) 2002 Patrick Mochel - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - - Richard Gooch may be reached by email at rgooch@atnf.csiro.au - The postal address is: - Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. - - Source: "Pentium Pro Family Developer's Manual, Volume 3: - Operating System Writer's Guide" (Intel document number 242692), - section 11.11.7 - - This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> - on 6-7 March 2002. - Source: Intel Architecture Software Developers Manual, Volume 3: - System Programming Guide; Section 9.11. (1997 edition - PPro). -*/ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/smp.h> -#include <linux/cpu.h> -#include <linux/mutex.h> - -#include <asm/mtrr.h> - -#include <asm/uaccess.h> -#include <asm/processor.h> -#include <asm/msr.h> -#include "mtrr.h" - -u32 num_var_ranges = 0; - -unsigned int *usage_table; -static DEFINE_MUTEX(mtrr_mutex); - -u64 size_or_mask, size_and_mask; - -static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; - -struct mtrr_ops * mtrr_if = NULL; - -static void set_mtrr(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type); - -#ifndef CONFIG_X86_64 -extern int arr3_protected; -#else -#define arr3_protected 0 -#endif - -void set_mtrr_ops(struct mtrr_ops * ops) -{ - if (ops->vendor && ops->vendor < X86_VENDOR_NUM) - mtrr_ops[ops->vendor] = ops; -} - -/* Returns non-zero if we have the write-combining memory type */ -static int have_wrcomb(void) -{ - struct pci_dev *dev; - u8 rev; - - if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { - /* ServerWorks LE chipsets < rev 6 have problems with write-combining - Don't allow it and leave room for other chipsets to be tagged */ - if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && - dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { - pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); - if (rev <= 5) { - printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); - pci_dev_put(dev); - return 0; - } - } - /* Intel 450NX errata # 23. Non ascending cacheline evictions to - write combining memory may resulting in data corruption */ - if (dev->vendor == PCI_VENDOR_ID_INTEL && - dev->device == PCI_DEVICE_ID_INTEL_82451NX) { - printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); - pci_dev_put(dev); - return 0; - } - pci_dev_put(dev); - } - return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); -} - -/* This function returns the number of variable MTRRs */ -static void __init set_num_var_ranges(void) -{ - unsigned long config = 0, dummy; - - if (use_intel()) { - rdmsr(MTRRcap_MSR, config, dummy); - } else if (is_cpu(AMD)) - config = 2; - else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) - config = 8; - num_var_ranges = config & 0xff; -} - -static void __init init_table(void) -{ - int i, max; - - max = num_var_ranges; - if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL)) - == NULL) { - printk(KERN_ERR "mtrr: could not allocate\n"); - return; - } - for (i = 0; i < max; i++) - usage_table[i] = 1; -} - -struct set_mtrr_data { - atomic_t count; - atomic_t gate; - unsigned long smp_base; - unsigned long smp_size; - unsigned int smp_reg; - mtrr_type smp_type; -}; - -#ifdef CONFIG_SMP - -static void ipi_handler(void *info) -/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. - [RETURNS] Nothing. -*/ -{ - struct set_mtrr_data *data = info; - unsigned long flags; - - local_irq_save(flags); - - atomic_dec(&data->count); - while(!atomic_read(&data->gate)) - cpu_relax(); - - /* The master has cleared me to execute */ - if (data->smp_reg != ~0U) - mtrr_if->set(data->smp_reg, data->smp_base, - data->smp_size, data->smp_type); - else - mtrr_if->set_all(); - - atomic_dec(&data->count); - while(atomic_read(&data->gate)) - cpu_relax(); - - atomic_dec(&data->count); - local_irq_restore(flags); -} - -#endif - -static inline int types_compatible(mtrr_type type1, mtrr_type type2) { - return type1 == MTRR_TYPE_UNCACHABLE || - type2 == MTRR_TYPE_UNCACHABLE || - (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || - (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH); -} - -/** - * set_mtrr - update mtrrs on all processors - * @reg: mtrr in question - * @base: mtrr base - * @size: mtrr size - * @type: mtrr type - * - * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: - * - * 1. Send IPI to do the following: - * 2. Disable Interrupts - * 3. Wait for all procs to do so - * 4. Enter no-fill cache mode - * 5. Flush caches - * 6. Clear PGE bit - * 7. Flush all TLBs - * 8. Disable all range registers - * 9. Update the MTRRs - * 10. Enable all range registers - * 11. Flush all TLBs and caches again - * 12. Enter normal cache mode and reenable caching - * 13. Set PGE - * 14. Wait for buddies to catch up - * 15. Enable interrupts. - * - * What does that mean for us? Well, first we set data.count to the number - * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait - * until it hits 0 and proceed. We set the data.gate flag and reset data.count. - * Meanwhile, they are waiting for that flag to be set. Once it's set, each - * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it - * differently, so we call mtrr_if->set() callback and let them take care of it. - * When they're done, they again decrement data->count and wait for data.gate to - * be reset. - * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. - * Everyone then enables interrupts and we all continue on. - * - * Note that the mechanism is the same for UP systems, too; all the SMP stuff - * becomes nops. - */ -static void set_mtrr(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type) -{ - struct set_mtrr_data data; - unsigned long flags; - - data.smp_reg = reg; - data.smp_base = base; - data.smp_size = size; - data.smp_type = type; - atomic_set(&data.count, num_booting_cpus() - 1); - /* make sure data.count is visible before unleashing other CPUs */ - smp_wmb(); - atomic_set(&data.gate,0); - - /* Start the ball rolling on other CPUs */ - if (smp_call_function(ipi_handler, &data, 1, 0) != 0) - panic("mtrr: timed out waiting for other CPUs\n"); - - local_irq_save(flags); - - while(atomic_read(&data.count)) - cpu_relax(); - - /* ok, reset count and toggle gate */ - atomic_set(&data.count, num_booting_cpus() - 1); - smp_wmb(); - atomic_set(&data.gate,1); - - /* do our MTRR business */ - - /* HACK! - * We use this same function to initialize the mtrrs on boot. - * The state of the boot cpu's mtrrs has been saved, and we want - * to replicate across all the APs. - * If we're doing that @reg is set to something special... - */ - if (reg != ~0U) - mtrr_if->set(reg,base,size,type); - - /* wait for the others */ - while(atomic_read(&data.count)) - cpu_relax(); - - atomic_set(&data.count, num_booting_cpus() - 1); - smp_wmb(); - atomic_set(&data.gate,0); - - /* - * Wait here for everyone to have seen the gate change - * So we're the last ones to touch 'data' - */ - while(atomic_read(&data.count)) - cpu_relax(); - - local_irq_restore(flags); -} - -/** - * mtrr_add_page - Add a memory type region - * @base: Physical base address of region in pages (in units of 4 kB!) - * @size: Physical size of region in pages (4 kB) - * @type: Type of MTRR desired - * @increment: If this is true do usage counting on the region - * - * Memory type region registers control the caching on newer Intel and - * non Intel processors. This function allows drivers to request an - * MTRR is added. The details and hardware specifics of each processor's - * implementation are hidden from the caller, but nevertheless the - * caller should expect to need to provide a power of two size on an - * equivalent power of two boundary. - * - * If the region cannot be added either because all regions are in use - * or the CPU cannot support it a negative value is returned. On success - * the register number for this entry is returned, but should be treated - * as a cookie only. - * - * On a multiprocessor machine the changes are made to all processors. - * This is required on x86 by the Intel processors. - * - * The available types are - * - * %MTRR_TYPE_UNCACHABLE - No caching - * - * %MTRR_TYPE_WRBACK - Write data back in bursts whenever - * - * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts - * - * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes - * - * BUGS: Needs a quiet flag for the cases where drivers do not mind - * failures and do not wish system log messages to be sent. - */ - -int mtrr_add_page(unsigned long base, unsigned long size, - unsigned int type, char increment) -{ - int i, replace, error; - mtrr_type ltype; - unsigned long lbase, lsize; - - if (!mtrr_if) - return -ENXIO; - - if ((error = mtrr_if->validate_add_page(base,size,type))) - return error; - - if (type >= MTRR_NUM_TYPES) { - printk(KERN_WARNING "mtrr: type: %u invalid\n", type); - return -EINVAL; - } - - /* If the type is WC, check that this processor supports it */ - if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { - printk(KERN_WARNING - "mtrr: your processor doesn't support write-combining\n"); - return -ENOSYS; - } - - if (!size) { - printk(KERN_WARNING "mtrr: zero sized request\n"); - return -EINVAL; - } - - if (base & size_or_mask || size & size_or_mask) { - printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); - return -EINVAL; - } - - error = -EINVAL; - replace = -1; - - /* No CPU hotplug when we change MTRR entries */ - lock_cpu_hotplug(); - /* Search for existing MTRR */ - mutex_lock(&mtrr_mutex); - for (i = 0; i < num_var_ranges; ++i) { - mtrr_if->get(i, &lbase, &lsize, <ype); - if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) - continue; - /* At this point we know there is some kind of overlap/enclosure */ - if (base < lbase || base + size - 1 > lbase + lsize - 1) { - if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { - /* New region encloses an existing region */ - if (type == ltype) { - replace = replace == -1 ? i : -2; - continue; - } - else if (types_compatible(type, ltype)) - continue; - } - printk(KERN_WARNING - "mtrr: 0x%lx000,0x%lx000 overlaps existing" - " 0x%lx000,0x%lx000\n", base, size, lbase, - lsize); - goto out; - } - /* New region is enclosed by an existing region */ - if (ltype != type) { - if (types_compatible(type, ltype)) - continue; - printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", - base, size, mtrr_attrib_to_str(ltype), - mtrr_attrib_to_str(type)); - goto out; - } - if (increment) - ++usage_table[i]; - error = i; - goto out; - } - /* Search for an empty MTRR */ - i = mtrr_if->get_free_region(base, size, replace); - if (i >= 0) { - set_mtrr(i, base, size, type); - if (likely(replace < 0)) - usage_table[i] = 1; - else { - usage_table[i] = usage_table[replace] + !!increment; - if (unlikely(replace != i)) { - set_mtrr(replace, 0, 0, 0); - usage_table[replace] = 0; - } - } - } else - printk(KERN_INFO "mtrr: no more MTRRs available\n"); - error = i; - out: - mutex_unlock(&mtrr_mutex); - unlock_cpu_hotplug(); - return error; -} - -static int mtrr_check(unsigned long base, unsigned long size) -{ - if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { - printk(KERN_WARNING - "mtrr: size and base must be multiples of 4 kiB\n"); - printk(KERN_DEBUG - "mtrr: size: 0x%lx base: 0x%lx\n", size, base); - dump_stack(); - return -1; - } - return 0; -} - -/** - * mtrr_add - Add a memory type region - * @base: Physical base address of region - * @size: Physical size of region - * @type: Type of MTRR desired - * @increment: If this is true do usage counting on the region - * - * Memory type region registers control the caching on newer Intel and - * non Intel processors. This function allows drivers to request an - * MTRR is added. The details and hardware specifics of each processor's - * implementation are hidden from the caller, but nevertheless the - * caller should expect to need to provide a power of two size on an - * equivalent power of two boundary. - * - * If the region cannot be added either because all regions are in use - * or the CPU cannot support it a negative value is returned. On success - * the register number for this entry is returned, but should be treated - * as a cookie only. - * - * On a multiprocessor machine the changes are made to all processors. - * This is required on x86 by the Intel processors. - * - * The available types are - * - * %MTRR_TYPE_UNCACHABLE - No caching - * - * %MTRR_TYPE_WRBACK - Write data back in bursts whenever - * - * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts - * - * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes - * - * BUGS: Needs a quiet flag for the cases where drivers do not mind - * failures and do not wish system log messages to be sent. - */ - -int -mtrr_add(unsigned long base, unsigned long size, unsigned int type, - char increment) -{ - if (mtrr_check(base, size)) - return -EINVAL; - return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, - increment); -} - -/** - * mtrr_del_page - delete a memory type region - * @reg: Register returned by mtrr_add - * @base: Physical base address - * @size: Size of region - * - * If register is supplied then base and size are ignored. This is - * how drivers should call it. - * - * Releases an MTRR region. If the usage count drops to zero the - * register is freed and the region returns to default state. - * On success the register is returned, on failure a negative error - * code. - */ - -int mtrr_del_page(int reg, unsigned long base, unsigned long size) -{ - int i, max; - mtrr_type ltype; - unsigned long lbase, lsize; - int error = -EINVAL; - - if (!mtrr_if) - return -ENXIO; - - max = num_var_ranges; - /* No CPU hotplug when we change MTRR entries */ - lock_cpu_hotplug(); - mutex_lock(&mtrr_mutex); - if (reg < 0) { - /* Search for existing MTRR */ - for (i = 0; i < max; ++i) { - mtrr_if->get(i, &lbase, &lsize, <ype); - if (lbase == base && lsize == size) { - reg = i; - break; - } - } - if (reg < 0) { - printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, - size); - goto out; - } - } - if (reg >= max) { - printk(KERN_WARNING "mtrr: register: %d too big\n", reg); - goto out; - } - if (is_cpu(CYRIX) && !use_intel()) { - if ((reg == 3) && arr3_protected) { - printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n"); - goto out; - } - } - mtrr_if->get(reg, &lbase, &lsize, <ype); - if (lsize < 1) { - printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); - goto out; - } - if (usage_table[reg] < 1) { - printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); - goto out; - } - if (--usage_table[reg] < 1) - set_mtrr(reg, 0, 0, 0); - error = reg; - out: - mutex_unlock(&mtrr_mutex); - unlock_cpu_hotplug(); - return error; -} -/** - * mtrr_del - delete a memory type region - * @reg: Register returned by mtrr_add - * @base: Physical base address - * @size: Size of region - * - * If register is supplied then base and size are ignored. This is - * how drivers should call it. - * - * Releases an MTRR region. If the usage count drops to zero the - * register is freed and the region returns to default state. - * On success the register is returned, on failure a negative error - * code. - */ - -int -mtrr_del(int reg, unsigned long base, unsigned long size) -{ - if (mtrr_check(base, size)) - return -EINVAL; - return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); -} - -EXPORT_SYMBOL(mtrr_add); -EXPORT_SYMBOL(mtrr_del); - -/* HACK ALERT! - * These should be called implicitly, but we can't yet until all the initcall - * stuff is done... - */ -extern void amd_init_mtrr(void); -extern void cyrix_init_mtrr(void); -extern void centaur_init_mtrr(void); - -static void __init init_ifs(void) -{ -#ifndef CONFIG_X86_64 - amd_init_mtrr(); - cyrix_init_mtrr(); - centaur_init_mtrr(); -#endif -} - -/* The suspend/resume methods are only for CPU without MTRR. CPU using generic - * MTRR driver doesn't require this - */ -struct mtrr_value { - mtrr_type ltype; - unsigned long lbase; - unsigned long lsize; -}; - -static struct mtrr_value * mtrr_state; - -static int mtrr_save(struct sys_device * sysdev, pm_message_t state) -{ - int i; - int size = num_var_ranges * sizeof(struct mtrr_value); - - mtrr_state = kzalloc(size,GFP_ATOMIC); - if (!mtrr_state) - return -ENOMEM; - - for (i = 0; i < num_var_ranges; i++) { - mtrr_if->get(i, - &mtrr_state[i].lbase, - &mtrr_state[i].lsize, - &mtrr_state[i].ltype); - } - return 0; -} - -static int mtrr_restore(struct sys_device * sysdev) -{ - int i; - - for (i = 0; i < num_var_ranges; i++) { - if (mtrr_state[i].lsize) - set_mtrr(i, - mtrr_state[i].lbase, - mtrr_state[i].lsize, - mtrr_state[i].ltype); - } - kfree(mtrr_state); - return 0; -} - - - -static struct sysdev_driver mtrr_sysdev_driver = { - .suspend = mtrr_save, - .resume = mtrr_restore, -}; - - -/** - * mtrr_bp_init - initialize mtrrs on the boot CPU - * - * This needs to be called early; before any of the other CPUs are - * initialized (i.e. before smp_init()). - * - */ -void __init mtrr_bp_init(void) -{ - init_ifs(); - - if (cpu_has_mtrr) { - mtrr_if = &generic_mtrr_ops; - size_or_mask = 0xff000000; /* 36 bits */ - size_and_mask = 0x00f00000; - - /* This is an AMD specific MSR, but we assume(hope?) that - Intel will implement it to when they extend the address - bus of the Xeon. */ - if (cpuid_eax(0x80000000) >= 0x80000008) { - u32 phys_addr; - phys_addr = cpuid_eax(0x80000008) & 0xff; - /* CPUID workaround for Intel 0F33/0F34 CPU */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && - boot_cpu_data.x86 == 0xF && - boot_cpu_data.x86_model == 0x3 && - (boot_cpu_data.x86_mask == 0x3 || - boot_cpu_data.x86_mask == 0x4)) - phys_addr = 36; - - size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1); - size_and_mask = ~size_or_mask & 0xfffff00000ULL; - } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && - boot_cpu_data.x86 == 6) { - /* VIA C* family have Intel style MTRRs, but - don't support PAE */ - size_or_mask = 0xfff00000; /* 32 bits */ - size_and_mask = 0; - } - } else { - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - if (cpu_has_k6_mtrr) { - /* Pre-Athlon (K6) AMD CPU MTRRs */ - mtrr_if = mtrr_ops[X86_VENDOR_AMD]; - size_or_mask = 0xfff00000; /* 32 bits */ - size_and_mask = 0; - } - break; - case X86_VENDOR_CENTAUR: - if (cpu_has_centaur_mcr) { - mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; - size_or_mask = 0xfff00000; /* 32 bits */ - size_and_mask = 0; - } - break; - case X86_VENDOR_CYRIX: - if (cpu_has_cyrix_arr) { - mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; - size_or_mask = 0xfff00000; /* 32 bits */ - size_and_mask = 0; - } - break; - default: - break; - } - } - - if (mtrr_if) { - set_num_var_ranges(); - init_table(); - if (use_intel()) - get_mtrr_state(); - } -} - -void mtrr_ap_init(void) -{ - unsigned long flags; - - if (!mtrr_if || !use_intel()) - return; - /* - * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, - * but this routine will be called in cpu boot time, holding the lock - * breaks it. This routine is called in two cases: 1.very earily time - * of software resume, when there absolutely isn't mtrr entry changes; - * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to - * prevent mtrr entry changes - */ - local_irq_save(flags); - - mtrr_if->set_all(); - - local_irq_restore(flags); -} - -/** - * Save current fixed-range MTRR state of the BSP - */ -void mtrr_save_state(void) -{ - int cpu = get_cpu(); - - if (cpu == 0) - mtrr_save_fixed_ranges(NULL); - else - smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); - put_cpu(); -} - -static int __init mtrr_init_finialize(void) -{ - if (!mtrr_if) - return 0; - if (use_intel()) - mtrr_state_warn(); - else { - /* The CPUs haven't MTRR and seemes not support SMP. They have - * specific drivers, we use a tricky method to support - * suspend/resume for them. - * TBD: is there any system with such CPU which supports - * suspend/resume? if no, we should remove the code. - */ - sysdev_driver_register(&cpu_sysdev_class, - &mtrr_sysdev_driver); - } - return 0; -} -subsys_initcall(mtrr_init_finialize); diff --git a/arch/i386/kernel/cpu/mtrr/mtrr.h b/arch/i386/kernel/cpu/mtrr/mtrr.h deleted file mode 100644 index 289dfe6030e3..000000000000 --- a/arch/i386/kernel/cpu/mtrr/mtrr.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * local mtrr defines. - */ - -#ifndef TRUE -#define TRUE 1 -#define FALSE 0 -#endif - -#define MTRRcap_MSR 0x0fe -#define MTRRdefType_MSR 0x2ff - -#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) -#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) - -#define NUM_FIXED_RANGES 88 -#define MTRRfix64K_00000_MSR 0x250 -#define MTRRfix16K_80000_MSR 0x258 -#define MTRRfix16K_A0000_MSR 0x259 -#define MTRRfix4K_C0000_MSR 0x268 -#define MTRRfix4K_C8000_MSR 0x269 -#define MTRRfix4K_D0000_MSR 0x26a -#define MTRRfix4K_D8000_MSR 0x26b -#define MTRRfix4K_E0000_MSR 0x26c -#define MTRRfix4K_E8000_MSR 0x26d -#define MTRRfix4K_F0000_MSR 0x26e -#define MTRRfix4K_F8000_MSR 0x26f - -#define MTRR_CHANGE_MASK_FIXED 0x01 -#define MTRR_CHANGE_MASK_VARIABLE 0x02 -#define MTRR_CHANGE_MASK_DEFTYPE 0x04 - -/* In the Intel processor's MTRR interface, the MTRR type is always held in - an 8 bit field: */ -typedef u8 mtrr_type; - -struct mtrr_ops { - u32 vendor; - u32 use_intel_if; -// void (*init)(void); - void (*set)(unsigned int reg, unsigned long base, - unsigned long size, mtrr_type type); - void (*set_all)(void); - - void (*get)(unsigned int reg, unsigned long *base, - unsigned long *size, mtrr_type * type); - int (*get_free_region)(unsigned long base, unsigned long size, - int replace_reg); - int (*validate_add_page)(unsigned long base, unsigned long size, - unsigned int type); - int (*have_wrcomb)(void); -}; - -extern int generic_get_free_region(unsigned long base, unsigned long size, - int replace_reg); -extern int generic_validate_add_page(unsigned long base, unsigned long size, - unsigned int type); - -extern struct mtrr_ops generic_mtrr_ops; - -extern int positive_have_wrcomb(void); - -/* library functions for processor-specific routines */ -struct set_mtrr_context { - unsigned long flags; - unsigned long cr4val; - u32 deftype_lo; - u32 deftype_hi; - u32 ccr3; -}; - -struct mtrr_var_range { - u32 base_lo; - u32 base_hi; - u32 mask_lo; - u32 mask_hi; -}; - -void set_mtrr_done(struct set_mtrr_context *ctxt); -void set_mtrr_cache_disable(struct set_mtrr_context *ctxt); -void set_mtrr_prepare_save(struct set_mtrr_context *ctxt); - -void get_mtrr_state(void); - -extern void set_mtrr_ops(struct mtrr_ops * ops); - -extern u64 size_or_mask, size_and_mask; -extern struct mtrr_ops * mtrr_if; - -#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) -#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) - -extern unsigned int num_var_ranges; - -void mtrr_state_warn(void); -const char *mtrr_attrib_to_str(int x); -void mtrr_wrmsr(unsigned, unsigned, unsigned); - diff --git a/arch/i386/kernel/cpu/mtrr/state.c b/arch/i386/kernel/cpu/mtrr/state.c deleted file mode 100644 index c9014ca4a575..000000000000 --- a/arch/i386/kernel/cpu/mtrr/state.c +++ /dev/null @@ -1,79 +0,0 @@ -#include <linux/mm.h> -#include <linux/init.h> -#include <asm/io.h> -#include <asm/mtrr.h> -#include <asm/msr.h> -#include <asm-i386/processor-cyrix.h> -#include "mtrr.h" - - -/* Put the processor into a state where MTRRs can be safely set */ -void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) -{ - unsigned int cr0; - - /* Disable interrupts locally */ - local_irq_save(ctxt->flags); - - if (use_intel() || is_cpu(CYRIX)) { - - /* Save value of CR4 and clear Page Global Enable (bit 7) */ - if ( cpu_has_pge ) { - ctxt->cr4val = read_cr4(); - write_cr4(ctxt->cr4val & ~X86_CR4_PGE); - } - - /* Disable and flush caches. Note that wbinvd flushes the TLBs as - a side-effect */ - cr0 = read_cr0() | 0x40000000; - wbinvd(); - write_cr0(cr0); - wbinvd(); - - if (use_intel()) - /* Save MTRR state */ - rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); - else - /* Cyrix ARRs - everything else were excluded at the top */ - ctxt->ccr3 = getCx86(CX86_CCR3); - } -} - -void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) -{ - if (use_intel()) - /* Disable MTRRs, and set the default type to uncached */ - mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, - ctxt->deftype_hi); - else if (is_cpu(CYRIX)) - /* Cyrix ARRs - everything else were excluded at the top */ - setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); -} - -/* Restore the processor after a set_mtrr_prepare */ -void set_mtrr_done(struct set_mtrr_context *ctxt) -{ - if (use_intel() || is_cpu(CYRIX)) { - - /* Flush caches and TLBs */ - wbinvd(); - - /* Restore MTRRdefType */ - if (use_intel()) - /* Intel (P6) standard MTRRs */ - mtrr_wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi); - else - /* Cyrix ARRs - everything else was excluded at the top */ - setCx86(CX86_CCR3, ctxt->ccr3); - - /* Enable caches */ - write_cr0(read_cr0() & 0xbfffffff); - - /* Restore value of CR4 */ - if ( cpu_has_pge ) - write_cr4(ctxt->cr4val); - } - /* Re-enable interrupts locally (if enabled previously) */ - local_irq_restore(ctxt->flags); -} - |