summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/tlb_low_64e.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 08:50:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 08:50:34 -0700
commitf536b3cae84eb7c9f3495285ad048d13a397ed0b (patch)
treeb53eee1c45eb080168786e2f103e76d6706cbbb0 /arch/powerpc/mm/tlb_low_64e.S
parente669830526a0abaf301bf408df69cde33901ac63 (diff)
parent537e5400a0a05c4efe70e7b372c19cfcd0179362 (diff)
downloadlwn-f536b3cae84eb7c9f3495285ad048d13a397ed0b.tar.gz
lwn-f536b3cae84eb7c9f3495285ad048d13a397ed0b.zip
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "This is the powerpc new goodies for 3.17. The short story: The biggest bit is Michael removing all of pre-POWER4 processor support from the 64-bit kernel. POWER3 and rs64. This gets rid of a ton of old cruft that has been bitrotting in a long while. It was broken for quite a few versions already and nobody noticed. Nobody uses those machines anymore. While at it, he cleaned up a bunch of old dusty cabinets, getting rid of a skeletton or two. Then, we have some base VFIO support for KVM, which allows assigning of PCI devices to KVM guests, support for large 64-bit BARs on "powernv" platforms, support for HMI (Hardware Management Interrupts) on those same platforms, some sparse-vmemmap improvements (for memory hotplug), There is the usual batch of Freescale embedded updates (summary in the merge commit) and fixes here or there, I think that's it for the highlights" * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (102 commits) powerpc/eeh: Export eeh_iommu_group_to_pe() powerpc/eeh: Add missing #ifdef CONFIG_IOMMU_API powerpc: Reduce scariness of interrupt frames in stack traces powerpc: start loop at section start of start in vmemmap_populated() powerpc: implement vmemmap_free() powerpc: implement vmemmap_remove_mapping() for BOOK3S powerpc: implement vmemmap_list_free() powerpc: Fail remap_4k_pfn() if PFN doesn't fit inside PTE powerpc/book3s: Fix endianess issue for HMI handling on napping cpus. powerpc/book3s: handle HMIs for cpus in nap mode. powerpc/powernv: Invoke opal call to handle hmi. powerpc/book3s: Add basic infrastructure to handle HMI in Linux. powerpc/iommu: Fix comments with it_page_shift powerpc/powernv: Handle compound PE in config accessors powerpc/powernv: Handle compound PE for EEH powerpc/powernv: Handle compound PE powerpc/powernv: Split ioda_eeh_get_state() powerpc/powernv: Allow to freeze PE powerpc/powernv: Enable M64 aperatus for PHB3 powerpc/eeh: Aux PE data for error log ...
Diffstat (limited to 'arch/powerpc/mm/tlb_low_64e.S')
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S69
1 files changed, 58 insertions, 11 deletions
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 356e8b41fb09..89bf95bd63b1 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -296,9 +296,12 @@ itlb_miss_fault_bolted:
* r14 = page table base
* r13 = PACA
* r11 = tlb_per_core ptr
- * r10 = cpu number
+ * r10 = crap (free to use)
*/
tlb_miss_common_e6500:
+ crmove cr2*4+2,cr0*4+2 /* cr2.eq != 0 if kernel address */
+
+BEGIN_FTR_SECTION /* CPU_FTR_SMT */
/*
* Search if we already have an indirect entry for that virtual
* address, and if we do, bail out.
@@ -309,6 +312,7 @@ tlb_miss_common_e6500:
lhz r10,PACAPACAINDEX(r13)
cmpdi r15,0
cmpdi cr1,r15,1 /* set cr1.eq = 0 for non-recursive */
+ addi r10,r10,1
bne 2f
stbcx. r10,0,r11
bne 1b
@@ -322,18 +326,62 @@ tlb_miss_common_e6500:
b 1b
.previous
+ /*
+ * Erratum A-008139 says that we can't use tlbwe to change
+ * an indirect entry in any way (including replacing or
+ * invalidating) if the other thread could be in the process
+ * of a lookup. The workaround is to invalidate the entry
+ * with tlbilx before overwriting.
+ */
+
+ lbz r15,TCD_ESEL_NEXT(r11)
+ rlwinm r10,r15,16,0xff0000
+ oris r10,r10,MAS0_TLBSEL(1)@h
+ mtspr SPRN_MAS0,r10
+ isync
+ tlbre
+ mfspr r15,SPRN_MAS1
+ andis. r15,r15,MAS1_VALID@h
+ beq 5f
+
+BEGIN_FTR_SECTION_NESTED(532)
+ mfspr r10,SPRN_MAS8
+ rlwinm r10,r10,0,0x80000fff /* tgs,tlpid -> sgs,slpid */
+ mtspr SPRN_MAS5,r10
+END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
+
+ mfspr r10,SPRN_MAS1
+ rlwinm r15,r10,0,0x3fff0000 /* tid -> spid */
+ rlwimi r15,r10,20,0x00000003 /* ind,ts -> sind,sas */
+ mfspr r10,SPRN_MAS6
+ mtspr SPRN_MAS6,r15
+
mfspr r15,SPRN_MAS2
+ isync
+ tlbilxva 0,r15
+ isync
+
+ mtspr SPRN_MAS6,r10
+
+5:
+BEGIN_FTR_SECTION_NESTED(532)
+ li r10,0
+ mtspr SPRN_MAS8,r10
+ mtspr SPRN_MAS5,r10
+END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
tlbsx 0,r16
mfspr r10,SPRN_MAS1
- andis. r10,r10,MAS1_VALID@h
+ andis. r15,r10,MAS1_VALID@h
bne tlb_miss_done_e6500
-
- /* Undo MAS-damage from the tlbsx */
+FTR_SECTION_ELSE
mfspr r10,SPRN_MAS1
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_SMT)
+
oris r10,r10,MAS1_VALID@h
- mtspr SPRN_MAS1,r10
- mtspr SPRN_MAS2,r15
+ beq cr2,4f
+ rlwinm r10,r10,0,16,1 /* Clear TID */
+4: mtspr SPRN_MAS1,r10
/* Now, we need to walk the page tables. First check if we are in
* range.
@@ -394,11 +442,13 @@ tlb_miss_common_e6500:
tlb_miss_done_e6500:
.macro tlb_unlock_e6500
+BEGIN_FTR_SECTION
beq cr1,1f /* no unlock if lock was recursively grabbed */
li r15,0
isync
stb r15,0(r11)
1:
+END_FTR_SECTION_IFSET(CPU_FTR_SMT)
.endm
tlb_unlock_e6500
@@ -407,12 +457,9 @@ tlb_miss_done_e6500:
rfi
tlb_miss_kernel_e6500:
- mfspr r10,SPRN_MAS1
ld r14,PACA_KERNELPGD(r13)
- cmpldi cr0,r15,8 /* Check for vmalloc region */
- rlwinm r10,r10,0,16,1 /* Clear TID */
- mtspr SPRN_MAS1,r10
- beq+ tlb_miss_common_e6500
+ cmpldi cr1,r15,8 /* Check for vmalloc region */
+ beq+ cr1,tlb_miss_common_e6500
tlb_miss_fault_e6500:
tlb_unlock_e6500