diff options
Diffstat (limited to 'arch/ia64/kernel/ivt.S')
-rw-r--r-- | arch/ia64/kernel/ivt.S | 211 |
1 files changed, 137 insertions, 74 deletions
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index d9c05d53435b..3bb3a13c4047 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -1,7 +1,7 @@ /* * arch/ia64/kernel/ivt.S * - * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co + * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co * Stephane Eranian <eranian@hpl.hp.com> * David Mosberger <davidm@hpl.hp.com> * Copyright (C) 2000, 2002-2003 Intel Co @@ -405,17 +405,22 @@ ENTRY(nested_dtlb_miss) * r30: continuation address * r31: saved pr * - * Clobbered: b0, r18, r19, r21, psr.dt (cleared) + * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) */ rsm psr.dt // switch to using physical data addressing mov r19=IA64_KR(PT_BASE) // get the page table base address shl r21=r16,3 // shift bit 60 into sign bit + mov r18=cr.itir ;; shr.u r17=r16,61 // get the region number into r17 + extr.u r18=r18,2,6 // get the faulting page size ;; cmp.eq p6,p7=5,r17 // is faulting address in region 5? - shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address + add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address + add r18=PGDIR_SHIFT-PAGE_SHIFT,r18 ;; + shr.u r22=r16,r22 + shr.u r18=r16,r18 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place srlz.d @@ -428,7 +433,7 @@ ENTRY(nested_dtlb_miss) (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) cmp.eq p7,p6=0,r21 // unused address bits all zeroes? - shr.u r18=r16,PMD_SHIFT // shift L2 index into position + shr.u r18=r22,PMD_SHIFT // shift L2 index into position ;; ld8 r17=[r17] // fetch the L1 entry (may be 0) ;; @@ -436,7 +441,7 @@ ENTRY(nested_dtlb_miss) dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry ;; (p7) ld8 r17=[r17] // fetch the L2 entry (may be 0) - shr.u r19=r16,PAGE_SHIFT // shift L3 index into position + shr.u r19=r22,PAGE_SHIFT // shift L3 index into position ;; (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL? dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry @@ -687,82 +692,118 @@ ENTRY(break_fault) * to prevent leaking bits from kernel to user level. */ DBG_FAULT(11) - mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat. - mov r17=cr.iim - mov r18=__IA64_BREAK_SYSCALL - mov r21=ar.fpsr - mov r29=cr.ipsr - mov r19=b6 - mov r25=ar.unat - mov r27=ar.rsc - mov r26=ar.pfs - mov r28=cr.iip - mov r31=pr // prepare to save predicates - mov r20=r1 - ;; + mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) + mov r29=cr.ipsr // M2 (12 cyc) + mov r31=pr // I0 (2 cyc) + + mov r17=cr.iim // M2 (2 cyc) + mov.m r27=ar.rsc // M2 (12 cyc) + mov r18=__IA64_BREAK_SYSCALL // A + + mov.m ar.rsc=0 // M2 + mov.m r21=ar.fpsr // M2 (12 cyc) + mov r19=b6 // I0 (2 cyc) + ;; + mov.m r23=ar.bspstore // M2 (12 cyc) + mov.m r24=ar.rnat // M2 (5 cyc) + mov.i r26=ar.pfs // I0 (2 cyc) + + invala // M0|1 + nop.m 0 // M + mov r20=r1 // A save r1 + + nop.m 0 + movl r30=sys_call_table // X + + mov r28=cr.iip // M2 (2 cyc) + cmp.eq p0,p7=r18,r17 // I0 is this a system call? +(p7) br.cond.spnt non_syscall // B no -> + // + // From this point on, we are definitely on the syscall-path + // and we can use (non-banked) scratch registers. + // +/////////////////////////////////////////////////////////////////////// + mov r1=r16 // A move task-pointer to "addl"-addressable reg + mov r2=r16 // A setup r2 for ia64_syscall_setup + add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = ¤t_thread_info()->flags + adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 - cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so) -(p7) br.cond.spnt non_syscall + adds r15=-1024,r15 // A subtract 1024 from syscall number + mov r3=NR_syscalls - 1 ;; - ld1 r17=[r16] // load current->thread.on_ustack flag - st1 [r16]=r0 // clear current->thread.on_ustack flag - add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT + ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag + ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags + extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr + + shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024) + addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS + cmp.leu p6,p7=r15,r3 // A syscall number in range? ;; - invala - /* adjust return address so we skip over the break instruction: */ + lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS +(p6) ld8 r30=[r30] // M0|1 load address of syscall entry point + tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT? - extr.u r8=r29,41,2 // extract ei field from cr.ipsr - ;; - cmp.eq p6,p7=2,r8 // isr.ei==2? - mov r2=r1 // setup r2 for ia64_syscall_setup - ;; -(p6) mov r8=0 // clear ei to 0 -(p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped -(p7) adds r8=1,r8 // increment ei to next slot - ;; - cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already? - dep r29=r8,r29,41,2 // insert new ei into cr.ipsr + mov.m ar.bspstore=r22 // M2 switch to kernel RBS + cmp.eq p8,p9=2,r8 // A isr.ei==2? ;; - // switch from user to kernel RBS: - MINSTATE_START_SAVE_MIN_VIRT - br.call.sptk.many b7=ia64_syscall_setup - ;; - MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1 - ssm psr.ic | PSR_DEFAULT_BITS - ;; - srlz.i // guarantee that interruption collection is on - mov r3=NR_syscalls - 1 - ;; -(p15) ssm psr.i // restore psr.i - // p10==true means out registers are more than 8 or r15's Nat is true -(p10) br.cond.spnt.many ia64_ret_from_syscall - ;; - movl r16=sys_call_table +(p8) mov r8=0 // A clear ei to 0 +(p7) movl r30=sys_ni_syscall // X - adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024 - movl r2=ia64_ret_from_syscall - ;; - shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024) - cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ? - mov rp=r2 // set the real return addr +(p8) adds r28=16,r28 // A switch cr.iip to next bundle +(p9) adds r8=1,r8 // A increment ei to next slot + nop.i 0 ;; -(p6) ld8 r20=[r20] // load address of syscall entry point -(p7) movl r20=sys_ni_syscall - add r2=TI_FLAGS+IA64_TASK_SIZE,r13 - ;; - ld4 r2=[r2] // r2 = current_thread_info()->flags - ;; - and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit + mov.m r25=ar.unat // M2 (5 cyc) + dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr + adds r15=1024,r15 // A restore original syscall number + // + // If any of the above loads miss in L1D, we'll stall here until + // the data arrives. + // +/////////////////////////////////////////////////////////////////////// + st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag + mov b6=r30 // I0 setup syscall handler branch reg early + cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? + + and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit + mov r18=ar.bsp // M2 (12 cyc) +(pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS + ;; +.back_from_break_fixup: +(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack + cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? + br.call.sptk.many b7=ia64_syscall_setup // B +1: + mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 + nop 0 + bsw.1 // B (6 cyc) regs are saved, switch to bank 1 ;; - cmp.eq p8,p0=r2,r0 - mov b6=r20 + + ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection + movl r3=ia64_ret_from_syscall // X ;; -(p8) br.call.sptk.many b6=b6 // ignore this return addr - br.cond.sptk ia64_trace_syscall + + srlz.i // M0 ensure interruption collection is on + mov rp=r3 // I0 set the real return addr +(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT + +(p15) ssm psr.i // M2 restore psr.i +(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) + br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic // NOT REACHED +/////////////////////////////////////////////////////////////////////// + // On entry, we optimistically assumed that we're coming from user-space. + // For the rare cases where a system-call is done from within the kernel, + // we fix things up at this point: +.break_fixup: + add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure + mov ar.rnat=r24 // M2 restore kernel's AR.RNAT + ;; + mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE + br.cond.sptk .back_from_break_fixup END(break_fault) .org ia64_ivt+0x3000 @@ -837,8 +878,6 @@ END(interrupt) * - r31: saved pr * - b0: original contents (to be saved) * On exit: - * - executing on bank 1 registers - * - psr.ic enabled, interrupts restored * - p10: TRUE if syscall is invoked with more than 8 out * registers or r15's Nat is true * - r1: kernel's gp @@ -846,8 +885,11 @@ END(interrupt) * - r8: -EINVAL if p10 is true * - r12: points to kernel stack * - r13: points to current task + * - r14: preserved (same as on entry) + * - p13: preserved * - p15: TRUE if interrupts need to be re-enabled * - ar.fpsr: set to kernel settings + * - b6: preserved (same as on entry) */ GLOBAL_ENTRY(ia64_syscall_setup) #if PT(B6) != 0 @@ -915,10 +957,10 @@ GLOBAL_ENTRY(ia64_syscall_setup) (p13) mov in5=-1 ;; st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr - tnat.nz p14,p0=in6 + tnat.nz p13,p0=in6 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 ;; - stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error) + mov r8=1 (p9) tnat.nz p10,p0=r15 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) @@ -929,9 +971,9 @@ GLOBAL_ENTRY(ia64_syscall_setup) mov r13=r2 // establish `current' movl r1=__gp // establish kernel global pointer ;; -(p14) mov in6=-1 + st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error) +(p13) mov in6=-1 (p8) mov in7=-1 - nop.i 0 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 movl r17=FPSR_DEFAULT @@ -1002,6 +1044,8 @@ END(dispatch_illegal_op_fault) FAULT(17) ENTRY(non_syscall) + mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER + ;; SAVE_MIN_WITH_COVER // There is no particular reason for this code to be here, other than that @@ -1199,6 +1243,25 @@ END(disabled_fp_reg) // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) ENTRY(nat_consumption) DBG_FAULT(26) + + mov r16=cr.ipsr + mov r17=cr.isr + mov r31=pr // save PR + ;; + and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} + tbit.z p6,p0=r17,IA64_ISR_NA_BIT + ;; + cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18 + dep r16=-1,r16,IA64_PSR_ED_BIT,1 +(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) + ;; + mov cr.ipsr=r16 // set cr.ipsr.na + mov pr=r31,-1 + ;; + rfi + +1: mov pr=r31,-1 + ;; FAULT(26) END(nat_consumption) |