summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2012-06-09 20:48:47 +0100
committerBen Hutchings <ben@decadent.org.uk>2014-04-02 00:58:52 +0100
commitb280aaa4ce04a6ab491fc9dc7eb4091336fd91ec (patch)
treec14b102b5b8fa54060b0619aa55bdca6d243aae4 /arch
parent2de2c2e956dbf164c5acb1dca16e9a3c1938096e (diff)
downloadlwn-b280aaa4ce04a6ab491fc9dc7eb4091336fd91ec.tar.gz
lwn-b280aaa4ce04a6ab491fc9dc7eb4091336fd91ec.zip
MIPS: Fix potencial corruption
commit a16dad7763420a3b46cff1e703a9070827796cfc upstream. Normally r4k_dma_cache_inv should only ever be called with cacheline aligned addresses. If however, it isn't there is the theoretical possibility of data corruption. There is no correct way of handling this and anyway, it should only happen if the DMA API is used incorrectly so drop There is a different corruption scenario with these CACHE instructions removed but again there is no way of handling this correctly and it can be triggered only through incorrect use of the DMA API. So just get rid of the complexity. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Reported-by: James Rodriguez <jamesr@juniper.net> Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/mm/c-r4k.c11
1 files changed, 0 insertions, 11 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index a79fe9aa7721..7d5ac0b3ed8b 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -633,9 +633,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
if (size >= scache_size)
r4k_blast_scache();
else {
- unsigned long lsize = cpu_scache_line_size();
- unsigned long almask = ~(lsize - 1);
-
/*
* There is no clearly documented alignment requirement
* for the cache instruction on MIPS processors and
@@ -644,9 +641,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
* hit ops with insufficient alignment. Solved by
* aligning the address to cache line size.
*/
- cache_op(Hit_Writeback_Inv_SD, addr & almask);
- cache_op(Hit_Writeback_Inv_SD,
- (addr + size - 1) & almask);
blast_inv_scache_range(addr, addr + size);
}
__sync();
@@ -656,12 +650,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
if (cpu_has_safe_index_cacheops && size >= dcache_size) {
r4k_blast_dcache();
} else {
- unsigned long lsize = cpu_dcache_line_size();
- unsigned long almask = ~(lsize - 1);
-
R4600_HIT_CACHEOP_WAR_IMPL;
- cache_op(Hit_Writeback_Inv_D, addr & almask);
- cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask);
blast_inv_dcache_range(addr, addr + size);
}