summaryrefslogtreecommitdiff
path: root/include/asm-s390
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-s390')
-rw-r--r--include/asm-s390/Kbuild1
-rw-r--r--include/asm-s390/bitops.h1
-rw-r--r--include/asm-s390/hugetlb.h183
-rw-r--r--include/asm-s390/kvm.h41
-rw-r--r--include/asm-s390/kvm_host.h234
-rw-r--r--include/asm-s390/kvm_para.h150
-rw-r--r--include/asm-s390/kvm_virtio.h53
-rw-r--r--include/asm-s390/lowcore.h15
-rw-r--r--include/asm-s390/mmu.h1
-rw-r--r--include/asm-s390/mmu_context.h8
-rw-r--r--include/asm-s390/page.h49
-rw-r--r--include/asm-s390/pgtable.h127
-rw-r--r--include/asm-s390/processor.h9
-rw-r--r--include/asm-s390/ptrace.h2
-rw-r--r--include/asm-s390/setup.h38
-rw-r--r--include/asm-s390/smp.h12
-rw-r--r--include/asm-s390/sparsemem.h18
-rw-r--r--include/asm-s390/sysinfo.h5
-rw-r--r--include/asm-s390/system.h18
-rw-r--r--include/asm-s390/thread_info.h2
-rw-r--r--include/asm-s390/tlbflush.h1
-rw-r--r--include/asm-s390/topology.h4
-rw-r--r--include/asm-s390/unaligned.h25
23 files changed, 912 insertions, 85 deletions
diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild
index e92b429d2be1..13c9805349f1 100644
--- a/include/asm-s390/Kbuild
+++ b/include/asm-s390/Kbuild
@@ -7,6 +7,7 @@ header-y += tape390.h
header-y += ucontext.h
header-y += vtoc.h
header-y += zcrypt.h
+header-y += kvm.h
unifdef-y += cmb.h
unifdef-y += debug.h
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index 965394e69452..b4eb24ab5af9 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -769,6 +769,7 @@ static inline int sched_find_first_bit(unsigned long *b)
}
#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
diff --git a/include/asm-s390/hugetlb.h b/include/asm-s390/hugetlb.h
new file mode 100644
index 000000000000..600a776f8f75
--- /dev/null
+++ b/include/asm-s390/hugetlb.h
@@ -0,0 +1,183 @@
+/*
+ * IBM System z Huge TLB Page Support for Kernel.
+ *
+ * Copyright IBM Corp. 2008
+ * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_HUGETLB_H
+#define _ASM_S390_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+
+#define is_hugepage_only_range(mm, addr, len) 0
+#define hugetlb_free_pgd_range free_pgd_range
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+#define hugetlb_prefault_arch_hook(mm) do { } while (0)
+
+int arch_prepare_hugepage(struct page *page);
+void arch_release_hugepage(struct page *page);
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ /*
+ * PROT_NONE needs to be remapped from the pte type to the ste type.
+ * The HW invalid bit is also different for pte and ste. The pte
+ * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
+ * bit, so we don't have to clear it.
+ */
+ if (pte_val(pte) & _PAGE_INVALID) {
+ if (pte_val(pte) & _PAGE_SWT)
+ pte_val(pte) |= _HPAGE_TYPE_NONE;
+ pte_val(pte) |= _SEGMENT_ENTRY_INV;
+ }
+ /*
+ * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
+ * table entry.
+ */
+ pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
+ /*
+ * Also set the change-override bit because we don't need dirty bit
+ * tracking for hugetlbfs pages.
+ */
+ pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
+ return pte;
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_RO;
+ return pte;
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
+ !(pte_val(pte) & _SEGMENT_ENTRY_RO);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ unsigned long mask;
+
+ if (!MACHINE_HAS_HPAGE) {
+ ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
+ if (ptep) {
+ mask = pte_val(pte) &
+ (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
+ pte = pte_mkhuge(*ptep);
+ pte_val(pte) |= mask;
+ }
+ }
+ return pte;
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = huge_ptep_get(ptep);
+
+ pmd_clear((pmd_t *) ptep);
+ return pte;
+}
+
+static inline void __pmd_csp(pmd_t *pmdp)
+{
+ register unsigned long reg2 asm("2") = pmd_val(*pmdp);
+ register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
+ _SEGMENT_ENTRY_INV;
+ register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
+
+ asm volatile(
+ " csp %1,%3"
+ : "=m" (*pmdp)
+ : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
+}
+
+static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
+{
+ unsigned long sto = (unsigned long) pmdp -
+ pmd_index(address) * sizeof(pmd_t);
+
+ if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
+ asm volatile(
+ " .insn rrf,0xb98e0000,%2,%3,0,0"
+ : "=m" (*pmdp)
+ : "m" (*pmdp), "a" (sto),
+ "a" ((address & HPAGE_MASK))
+ );
+ }
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
+}
+
+static inline void huge_ptep_invalidate(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ pmd_t *pmdp = (pmd_t *) ptep;
+
+ if (!MACHINE_HAS_IDTE) {
+ __pmd_csp(pmdp);
+ if (mm->context.noexec) {
+ pmdp = get_shadow_table(pmdp);
+ __pmd_csp(pmdp);
+ }
+ return;
+ }
+
+ __pmd_idte(address, pmdp);
+ if (mm->context.noexec) {
+ pmdp = get_shadow_table(pmdp);
+ __pmd_idte(address, pmdp);
+ }
+ return;
+}
+
+#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
+({ \
+ int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
+ if (__changed) { \
+ huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
+ set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
+ } \
+ __changed; \
+})
+
+#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
+({ \
+ pte_t __pte = huge_ptep_get(__ptep); \
+ if (pte_write(__pte)) { \
+ if (atomic_read(&(__mm)->mm_users) > 1 || \
+ (__mm) != current->active_mm) \
+ huge_ptep_invalidate(__mm, __addr, __ptep); \
+ set_huge_pte_at(__mm, __addr, __ptep, \
+ huge_pte_wrprotect(__pte)); \
+ } \
+})
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ huge_ptep_invalidate(vma->vm_mm, address, ptep);
+}
+
+#endif /* _ASM_S390_HUGETLB_H */
diff --git a/include/asm-s390/kvm.h b/include/asm-s390/kvm.h
index 573f2a351386..d74002f95794 100644
--- a/include/asm-s390/kvm.h
+++ b/include/asm-s390/kvm.h
@@ -1,6 +1,45 @@
#ifndef __LINUX_KVM_S390_H
#define __LINUX_KVM_S390_H
-/* s390 does not support KVM */
+/*
+ * asm-s390/kvm.h - KVM s390 specific structures and definitions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+#include <asm/types.h>
+
+/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
+struct kvm_pic_state {
+ /* no PIC for s390 */
+};
+
+struct kvm_ioapic_state {
+ /* no IOAPIC for s390 */
+};
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+ /* general purpose regs for s390 */
+ __u64 gprs[16];
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+ __u32 acrs[16];
+ __u64 crs[16];
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+ __u32 fpc;
+ __u64 fprs[16];
+};
#endif
diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h
new file mode 100644
index 000000000000..f8204a4f2e02
--- /dev/null
+++ b/include/asm-s390/kvm_host.h
@@ -0,0 +1,234 @@
+/*
+ * asm-s390/kvm_host.h - definition for kernel virtual machines on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+
+#ifndef ASM_KVM_HOST_H
+#define ASM_KVM_HOST_H
+#include <linux/kvm_host.h>
+#include <asm/debug.h>
+
+#define KVM_MAX_VCPUS 64
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+struct kvm_guest_debug {
+};
+
+struct sca_entry {
+ atomic_t scn;
+ __u64 reserved;
+ __u64 sda;
+ __u64 reserved2[2];
+} __attribute__((packed));
+
+
+struct sca_block {
+ __u64 ipte_control;
+ __u64 reserved[5];
+ __u64 mcn;
+ __u64 reserved2;
+ struct sca_entry cpu[64];
+} __attribute__((packed));
+
+#define KVM_PAGES_PER_HPAGE 256
+
+#define CPUSTAT_HOST 0x80000000
+#define CPUSTAT_WAIT 0x10000000
+#define CPUSTAT_ECALL_PEND 0x08000000
+#define CPUSTAT_STOP_INT 0x04000000
+#define CPUSTAT_IO_INT 0x02000000
+#define CPUSTAT_EXT_INT 0x01000000
+#define CPUSTAT_RUNNING 0x00800000
+#define CPUSTAT_RETAINED 0x00400000
+#define CPUSTAT_TIMING_SUB 0x00020000
+#define CPUSTAT_SIE_SUB 0x00010000
+#define CPUSTAT_RRF 0x00008000
+#define CPUSTAT_SLSV 0x00004000
+#define CPUSTAT_SLSR 0x00002000
+#define CPUSTAT_ZARCH 0x00000800
+#define CPUSTAT_MCDS 0x00000100
+#define CPUSTAT_SM 0x00000080
+#define CPUSTAT_G 0x00000008
+#define CPUSTAT_J 0x00000002
+#define CPUSTAT_P 0x00000001
+
+struct sie_block {
+ atomic_t cpuflags; /* 0x0000 */
+ __u32 prefix; /* 0x0004 */
+ __u8 reserved8[32]; /* 0x0008 */
+ __u64 cputm; /* 0x0028 */
+ __u64 ckc; /* 0x0030 */
+ __u64 epoch; /* 0x0038 */
+ __u8 reserved40[4]; /* 0x0040 */
+#define LCTL_CR0 0x8000
+ __u16 lctl; /* 0x0044 */
+ __s16 icpua; /* 0x0046 */
+ __u32 ictl; /* 0x0048 */
+ __u32 eca; /* 0x004c */
+ __u8 icptcode; /* 0x0050 */
+ __u8 reserved51; /* 0x0051 */
+ __u16 ihcpu; /* 0x0052 */
+ __u8 reserved54[2]; /* 0x0054 */
+ __u16 ipa; /* 0x0056 */
+ __u32 ipb; /* 0x0058 */
+ __u32 scaoh; /* 0x005c */
+ __u8 reserved60; /* 0x0060 */
+ __u8 ecb; /* 0x0061 */
+ __u8 reserved62[2]; /* 0x0062 */
+ __u32 scaol; /* 0x0064 */
+ __u8 reserved68[4]; /* 0x0068 */
+ __u32 todpr; /* 0x006c */
+ __u8 reserved70[16]; /* 0x0070 */
+ __u64 gmsor; /* 0x0080 */
+ __u64 gmslm; /* 0x0088 */
+ psw_t gpsw; /* 0x0090 */
+ __u64 gg14; /* 0x00a0 */
+ __u64 gg15; /* 0x00a8 */
+ __u8 reservedb0[30]; /* 0x00b0 */
+ __u16 iprcc; /* 0x00ce */
+ __u8 reservedd0[48]; /* 0x00d0 */
+ __u64 gcr[16]; /* 0x0100 */
+ __u64 gbea; /* 0x0180 */
+ __u8 reserved188[120]; /* 0x0188 */
+} __attribute__((packed));
+
+struct kvm_vcpu_stat {
+ u32 exit_userspace;
+ u32 exit_external_request;
+ u32 exit_external_interrupt;
+ u32 exit_stop_request;
+ u32 exit_validity;
+ u32 exit_instruction;
+ u32 instruction_lctl;
+ u32 instruction_lctg;
+ u32 exit_program_interruption;
+ u32 exit_instr_and_program;
+ u32 deliver_emergency_signal;
+ u32 deliver_service_signal;
+ u32 deliver_virtio_interrupt;
+ u32 deliver_stop_signal;
+ u32 deliver_prefix_signal;
+ u32 deliver_restart_signal;
+ u32 deliver_program_int;
+ u32 exit_wait_state;
+ u32 instruction_stidp;
+ u32 instruction_spx;
+ u32 instruction_stpx;
+ u32 instruction_stap;
+ u32 instruction_storage_key;
+ u32 instruction_stsch;
+ u32 instruction_chsc;
+ u32 instruction_stsi;
+ u32 instruction_stfl;
+ u32 instruction_sigp_sense;
+ u32 instruction_sigp_emergency;
+ u32 instruction_sigp_stop;
+ u32 instruction_sigp_arch;
+ u32 instruction_sigp_prefix;
+ u32 instruction_sigp_restart;
+ u32 diagnose_44;
+};
+
+struct io_info {
+ __u16 subchannel_id; /* 0x0b8 */
+ __u16 subchannel_nr; /* 0x0ba */
+ __u32 io_int_parm; /* 0x0bc */
+ __u32 io_int_word; /* 0x0c0 */
+};
+
+struct ext_info {
+ __u32 ext_params;
+ __u64 ext_params2;
+};
+
+#define PGM_OPERATION 0x01
+#define PGM_PRIVILEGED_OPERATION 0x02
+#define PGM_EXECUTE 0x03
+#define PGM_PROTECTION 0x04
+#define PGM_ADDRESSING 0x05
+#define PGM_SPECIFICATION 0x06
+#define PGM_DATA 0x07
+
+struct pgm_info {
+ __u16 code;
+};
+
+struct prefix_info {
+ __u32 address;
+};
+
+struct interrupt_info {
+ struct list_head list;
+ u64 type;
+ union {
+ struct io_info io;
+ struct ext_info ext;
+ struct pgm_info pgm;
+ struct prefix_info prefix;
+ };
+};
+
+/* for local_interrupt.action_flags */
+#define ACTION_STORE_ON_STOP 1
+#define ACTION_STOP_ON_STOP 2
+
+struct local_interrupt {
+ spinlock_t lock;
+ struct list_head list;
+ atomic_t active;
+ struct float_interrupt *float_int;
+ int timer_due; /* event indicator for waitqueue below */
+ wait_queue_head_t wq;
+ atomic_t *cpuflags;
+ unsigned int action_bits;
+};
+
+struct float_interrupt {
+ spinlock_t lock;
+ struct list_head list;
+ atomic_t active;
+ int next_rr_cpu;
+ unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)];
+ struct local_interrupt *local_int[64];
+};
+
+
+struct kvm_vcpu_arch {
+ struct sie_block *sie_block;
+ unsigned long guest_gprs[16];
+ s390_fp_regs host_fpregs;
+ unsigned int host_acrs[NUM_ACRS];
+ s390_fp_regs guest_fpregs;
+ unsigned int guest_acrs[NUM_ACRS];
+ struct local_interrupt local_int;
+ struct timer_list ckc_timer;
+ union {
+ cpuid_t cpu_id;
+ u64 stidp_data;
+ };
+};
+
+struct kvm_vm_stat {
+ u32 remote_tlb_flush;
+};
+
+struct kvm_arch{
+ unsigned long guest_origin;
+ unsigned long guest_memsize;
+ struct sca_block *sca;
+ debug_info_t *dbf;
+ struct float_interrupt float_int;
+};
+
+extern int sie64a(struct sie_block *, __u64 *);
+#endif
diff --git a/include/asm-s390/kvm_para.h b/include/asm-s390/kvm_para.h
new file mode 100644
index 000000000000..2c503796b619
--- /dev/null
+++ b/include/asm-s390/kvm_para.h
@@ -0,0 +1,150 @@
+/*
+ * asm-s390/kvm_para.h - definition for paravirtual devices on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef __S390_KVM_PARA_H
+#define __S390_KVM_PARA_H
+
+/*
+ * Hypercalls for KVM on s390. The calling convention is similar to the
+ * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1
+ * as hypercall number and R7 as parameter 6. The return value is
+ * written to R2. We use the diagnose instruction as hypercall. To avoid
+ * conflicts with existing diagnoses for LPAR and z/VM, we do not use
+ * the instruction encoded number, but specify the number in R1 and
+ * use 0x500 as KVM hypercall
+ *
+ * Copyright IBM Corp. 2007,2008
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+static inline long kvm_hypercall0(unsigned long nr)
+{
+ register unsigned long __nr asm("1") = nr;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr): "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
+ unsigned long p2)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
+ : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3) : "memory", "cc");
+ return __rc;
+}
+
+
+static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register unsigned long __p5 asm("6") = p5;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4), "d" (__p5) : "memory", "cc");
+ return __rc;
+}
+
+static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5,
+ unsigned long p6)
+{
+ register unsigned long __nr asm("1") = nr;
+ register unsigned long __p1 asm("2") = p1;
+ register unsigned long __p2 asm("3") = p2;
+ register unsigned long __p3 asm("4") = p3;
+ register unsigned long __p4 asm("5") = p4;
+ register unsigned long __p5 asm("6") = p5;
+ register unsigned long __p6 asm("7") = p6;
+ register long __rc asm("2");
+
+ asm volatile ("diag 2,4,0x500\n"
+ : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+ "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
+ : "memory", "cc");
+ return __rc;
+}
+
+/* kvm on s390 is always paravirtualization enabled */
+static inline int kvm_para_available(void)
+{
+ return 1;
+}
+
+/* No feature bits are currently assigned for kvm on s390 */
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif /* __S390_KVM_PARA_H */
diff --git a/include/asm-s390/kvm_virtio.h b/include/asm-s390/kvm_virtio.h
new file mode 100644
index 000000000000..5c871a990c29
--- /dev/null
+++ b/include/asm-s390/kvm_virtio.h
@@ -0,0 +1,53 @@
+/*
+ * kvm_virtio.h - definition for virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef __KVM_S390_VIRTIO_H
+#define __KVM_S390_VIRTIO_H
+
+#include <linux/types.h>
+
+struct kvm_device_desc {
+ /* The device type: console, network, disk etc. Type 0 terminates. */
+ __u8 type;
+ /* The number of virtqueues (first in config array) */
+ __u8 num_vq;
+ /*
+ * The number of bytes of feature bits. Multiply by 2: one for host
+ * features and one for guest acknowledgements.
+ */
+ __u8 feature_len;
+ /* The number of bytes of the config array after virtqueues. */
+ __u8 config_len;
+ /* A status byte, written by the Guest. */
+ __u8 status;
+ __u8 config[0];
+};
+
+/*
+ * This is how we expect the device configuration field for a virtqueue
+ * to be laid out in config space.
+ */
+struct kvm_vqconfig {
+ /* The token returned with an interrupt. Set by the guest */
+ __u64 token;
+ /* The address of the virtio ring */
+ __u64 address;
+ /* The number of entries in the virtio_ring */
+ __u16 num;
+
+};
+
+#define KVM_S390_VIRTIO_NOTIFY 0
+#define KVM_S390_VIRTIO_RESET 1
+#define KVM_S390_VIRTIO_SET_STATUS 2
+
+#endif
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index 5de3efb31445..0bc51d52a899 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -381,27 +381,32 @@ struct _lowcore
/* whether the kernel died with panic() or not */
__u32 panic_magic; /* 0xe00 */
- __u8 pad13[0x1200-0xe04]; /* 0xe04 */
+ __u8 pad13[0x11b8-0xe04]; /* 0xe04 */
+
+ /* 64 bit extparam used for pfault, diag 250 etc */
+ __u64 ext_params2; /* 0x11B8 */
+
+ __u8 pad14[0x1200-0x11C0]; /* 0x11C0 */
/* System info area */
__u64 floating_pt_save_area[16]; /* 0x1200 */
__u64 gpregs_save_area[16]; /* 0x1280 */
__u32 st_status_fixed_logout[4]; /* 0x1300 */
- __u8 pad14[0x1318-0x1310]; /* 0x1310 */
+ __u8 pad15[0x1318-0x1310]; /* 0x1310 */
__u32 prefixreg_save_area; /* 0x1318 */
__u32 fpt_creg_save_area; /* 0x131c */
- __u8 pad15[0x1324-0x1320]; /* 0x1320 */
+ __u8 pad16[0x1324-0x1320]; /* 0x1320 */
__u32 tod_progreg_save_area; /* 0x1324 */
__u32 cpu_timer_save_area[2]; /* 0x1328 */
__u32 clock_comp_save_area[2]; /* 0x1330 */
- __u8 pad16[0x1340-0x1338]; /* 0x1338 */
+ __u8 pad17[0x1340-0x1338]; /* 0x1338 */
__u32 access_regs_save_area[16]; /* 0x1340 */
__u64 cregs_save_area[16]; /* 0x1380 */
/* align to the top of the prefix area */
- __u8 pad17[0x2000-0x1400]; /* 0x1400 */
+ __u8 pad18[0x2000-0x1400]; /* 0x1400 */
#endif /* !__s390x__ */
} __attribute__((packed)); /* End structure*/
diff --git a/include/asm-s390/mmu.h b/include/asm-s390/mmu.h
index 1698e29c5b20..5dd5e7b3476f 100644
--- a/include/asm-s390/mmu.h
+++ b/include/asm-s390/mmu.h
@@ -7,6 +7,7 @@ typedef struct {
unsigned long asce_bits;
unsigned long asce_limit;
int noexec;
+ int pgstes;
} mm_context_t;
#endif
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index b5a34c6f91a9..4c2fbf48c9c4 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -20,7 +20,13 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
- mm->context.noexec = s390_noexec;
+ if (current->mm->context.pgstes) {
+ mm->context.noexec = 0;
+ mm->context.pgstes = 1;
+ } else {
+ mm->context.noexec = s390_noexec;
+ mm->context.pgstes = 0;
+ }
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index fe7f92b6ae6d..f0f4579eac13 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -19,17 +19,34 @@
#define PAGE_DEFAULT_ACC 0
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
+#define HPAGE_SHIFT 20
+#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#define ARCH_HAS_SETCLEAR_HUGE_PTE
+#define ARCH_HAS_HUGE_PTE_TYPE
+#define ARCH_HAS_PREPARE_HUGEPAGE
+#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
+
#include <asm/setup.h>
#ifndef __ASSEMBLY__
static inline void clear_page(void *page)
{
- register unsigned long reg1 asm ("1") = 0;
- register void *reg2 asm ("2") = page;
- register unsigned long reg3 asm ("3") = 4096;
- asm volatile(
- " mvcl 2,0"
- : "+d" (reg2), "+d" (reg3) : "d" (reg1) : "memory", "cc");
+ if (MACHINE_HAS_PFMF) {
+ asm volatile(
+ " .insn rre,0xb9af0000,%0,%1"
+ : : "d" (0x10000), "a" (page) : "memory", "cc");
+ } else {
+ register unsigned long reg1 asm ("1") = 0;
+ register void *reg2 asm ("2") = page;
+ register unsigned long reg3 asm ("3") = 4096;
+ asm volatile(
+ " mvcl 2,0"
+ : "+d" (reg2), "+d" (reg3) : "d" (reg1)
+ : "memory", "cc");
+ }
}
static inline void copy_page(void *to, void *from)
@@ -108,26 +125,6 @@ page_get_storage_key(unsigned long addr)
return skey;
}
-extern unsigned long max_pfn;
-
-static inline int pfn_valid(unsigned long pfn)
-{
- unsigned long dummy;
- int ccode;
-
- if (pfn >= max_pfn)
- return 0;
-
- asm volatile(
- " lra %0,0(%2)\n"
- " ipm %1\n"
- " srl %1,28\n"
- : "=d" (dummy), "=d" (ccode)
- : "a" (pfn << PAGE_SHIFT)
- : "cc");
- return !ccode;
-}
-
#endif /* !__ASSEMBLY__ */
/* to align the pointer to the (next) page boundary */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 65154dc9a9e5..c7f4f8e3e297 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -30,6 +30,7 @@
*/
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
+#include <asm/bitops.h>
#include <asm/bug.h>
#include <asm/processor.h>
@@ -128,7 +129,7 @@ extern char empty_zero_page[PAGE_SIZE];
#define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
#define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
#define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
-#define VMEM_MAP ((struct page *) VMALLOC_END)
+#define vmemmap ((struct page *) VMALLOC_END)
/*
* A 31 bit pagetable entry of S390 has following format:
@@ -219,6 +220,8 @@ extern char empty_zero_page[PAGE_SIZE];
/* Software bits in the page table entry */
#define _PAGE_SWT 0x001 /* SW pte type bit t */
#define _PAGE_SWX 0x002 /* SW pte type bit x */
+#define _PAGE_SPECIAL 0x004 /* SW associated with special page */
+#define __HAVE_ARCH_PTE_SPECIAL
/* Six different types of pages. */
#define _PAGE_TYPE_EMPTY 0x400
@@ -231,6 +234,15 @@ extern char empty_zero_page[PAGE_SIZE];
#define _PAGE_TYPE_EX_RW 0x002
/*
+ * Only four types for huge pages, using the invalid bit and protection bit
+ * of a segment table entry.
+ */
+#define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
+#define _HPAGE_TYPE_NONE 0x220
+#define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
+#define _HPAGE_TYPE_RW 0x000
+
+/*
* PTE type bits are rather complicated. handle_pte_fault uses pte_present,
* pte_none and pte_file to find out the pte type WITHOUT holding the page
* table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
@@ -258,6 +270,13 @@ extern char empty_zero_page[PAGE_SIZE];
* swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
*/
+/* Page status table bits for virtualization */
+#define RCP_PCL_BIT 55
+#define RCP_HR_BIT 54
+#define RCP_HC_BIT 53
+#define RCP_GR_BIT 50
+#define RCP_GC_BIT 49
+
#ifndef __s390x__
/* Bits in the segment table address-space-control-element */
@@ -315,6 +334,9 @@ extern char empty_zero_page[PAGE_SIZE];
#define _SEGMENT_ENTRY (0)
#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
+#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
+#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
+
#endif /* __s390x__ */
/*
@@ -510,9 +532,56 @@ static inline int pte_file(pte_t pte)
return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
}
+static inline int pte_special(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_SPECIAL);
+}
+
#define __HAVE_ARCH_PTE_SAME
#define pte_same(a,b) (pte_val(a) == pte_val(b))
+static inline void rcp_lock(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+ preempt_disable();
+ while (test_and_set_bit(RCP_PCL_BIT, pgste))
+ ;
+#endif
+}
+
+static inline void rcp_unlock(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+ clear_bit(RCP_PCL_BIT, pgste);
+ preempt_enable();
+#endif
+}
+
+/* forward declaration for SetPageUptodate in page-flags.h*/
+static inline void page_clear_dirty(struct page *page);
+#include <linux/page-flags.h>
+
+static inline void ptep_rcp_copy(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+ struct page *page = virt_to_page(pte_val(*ptep));
+ unsigned int skey;
+ unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+
+ skey = page_get_storage_key(page_to_phys(page));
+ if (skey & _PAGE_CHANGED)
+ set_bit_simple(RCP_GC_BIT, pgste);
+ if (skey & _PAGE_REFERENCED)
+ set_bit_simple(RCP_GR_BIT, pgste);
+ if (test_and_clear_bit_simple(RCP_HC_BIT, pgste))
+ SetPageDirty(page);
+ if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
+ SetPageReferenced(page);
+#endif
+}
+
/*
* query functions pte_write/pte_dirty/pte_young only work if
* pte_present() is true. Undefined behaviour if not..
@@ -599,6 +668,8 @@ static inline void pmd_clear(pmd_t *pmd)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
+ if (mm->context.pgstes)
+ ptep_rcp_copy(ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec)
pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
@@ -663,10 +734,34 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte;
}
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SPECIAL;
+ return pte;
+}
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+#ifdef CONFIG_PGSTE
+ unsigned long physpage;
+ int young;
+ unsigned long *pgste;
+
+ if (!vma->vm_mm->context.pgstes)
+ return 0;
+ physpage = pte_val(*ptep) & PAGE_MASK;
+ pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+
+ young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
+ rcp_lock(ptep);
+ if (young)
+ set_bit_simple(RCP_GR_BIT, pgste);
+ young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
+ rcp_unlock(ptep);
+ return young;
+#endif
return 0;
}
@@ -674,7 +769,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
- /* No need to flush TLB; bits are in storage key */
+ /* No need to flush TLB
+ * On s390 reference bits are in storage key and never in TLB
+ * With virtualization we handle the reference bit, without we
+ * we can simply return */
+#ifdef CONFIG_PGSTE
+ return ptep_test_and_clear_young(vma, address, ptep);
+#endif
return 0;
}
@@ -693,15 +794,25 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
: "=m" (*ptep) : "m" (*ptep),
"a" (pto), "a" (address));
}
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
}
static inline void ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
+ if (mm->context.pgstes) {
+ rcp_lock(ptep);
+ __ptep_ipte(address, ptep);
+ ptep_rcp_copy(ptep);
+ pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ rcp_unlock(ptep);
+ return;
+ }
__ptep_ipte(address, ptep);
- if (mm->context.noexec)
+ pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ if (mm->context.noexec) {
__ptep_ipte(address, ptep + PTRS_PER_PTE);
+ pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
+ }
}
/*
@@ -964,17 +1075,15 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define kern_addr_valid(addr) (1)
-extern int add_shared_memory(unsigned long start, unsigned long size);
-extern int remove_shared_memory(unsigned long start, unsigned long size);
+extern int vmem_add_mapping(unsigned long start, unsigned long size);
+extern int vmem_remove_mapping(unsigned long start, unsigned long size);
+extern int s390_enable_sie(void);
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
-#define __HAVE_ARCH_MEMMAP_INIT
-extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
-
#include <asm-generic/pgtable.h>
#endif /* _S390_PAGE_H */
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 8eaf343a12a8..a00f79dd323b 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -172,16 +172,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
*/
extern void task_show_regs(struct seq_file *m, struct task_struct *task);
-extern void show_registers(struct pt_regs *regs);
extern void show_code(struct pt_regs *regs);
-extern void show_trace(struct task_struct *task, unsigned long *sp);
-#ifdef CONFIG_64BIT
-extern void show_last_breaking_event(struct pt_regs *regs);
-#else
-static inline void show_last_breaking_event(struct pt_regs *regs)
-{
-}
-#endif
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index 61f6952f2e35..441d7c260857 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -463,8 +463,6 @@ struct user_regs_struct
};
#ifdef __KERNEL__
-#define __ARCH_SYS_PTRACE 1
-
/*
* These are defined as per linux/ptrace.h, which see.
*/
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index a76a6b8fd887..ba69674012a7 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -59,22 +59,42 @@ extern unsigned int s390_noexec;
*/
extern unsigned long machine_flags;
-#define MACHINE_IS_VM (machine_flags & 1)
-#define MACHINE_IS_P390 (machine_flags & 4)
-#define MACHINE_HAS_MVPG (machine_flags & 16)
-#define MACHINE_HAS_IDTE (machine_flags & 128)
-#define MACHINE_HAS_DIAG9C (machine_flags & 256)
+#define MACHINE_FLAG_VM (1UL << 0)
+#define MACHINE_FLAG_IEEE (1UL << 1)
+#define MACHINE_FLAG_P390 (1UL << 2)
+#define MACHINE_FLAG_CSP (1UL << 3)
+#define MACHINE_FLAG_MVPG (1UL << 4)
+#define MACHINE_FLAG_DIAG44 (1UL << 5)
+#define MACHINE_FLAG_IDTE (1UL << 6)
+#define MACHINE_FLAG_DIAG9C (1UL << 7)
+#define MACHINE_FLAG_MVCOS (1UL << 8)
+#define MACHINE_FLAG_KVM (1UL << 9)
+#define MACHINE_FLAG_HPAGE (1UL << 10)
+#define MACHINE_FLAG_PFMF (1UL << 11)
+
+#define MACHINE_IS_VM (machine_flags & MACHINE_FLAG_VM)
+#define MACHINE_IS_KVM (machine_flags & MACHINE_FLAG_KVM)
+#define MACHINE_IS_P390 (machine_flags & MACHINE_FLAG_P390)
+#define MACHINE_HAS_DIAG9C (machine_flags & MACHINE_FLAG_DIAG9C)
#ifndef __s390x__
-#define MACHINE_HAS_IEEE (machine_flags & 2)
-#define MACHINE_HAS_CSP (machine_flags & 8)
+#define MACHINE_HAS_IEEE (machine_flags & MACHINE_FLAG_IEEE)
+#define MACHINE_HAS_CSP (machine_flags & MACHINE_FLAG_CSP)
+#define MACHINE_HAS_IDTE (0)
#define MACHINE_HAS_DIAG44 (1)
+#define MACHINE_HAS_MVPG (machine_flags & MACHINE_FLAG_MVPG)
#define MACHINE_HAS_MVCOS (0)
+#define MACHINE_HAS_HPAGE (0)
+#define MACHINE_HAS_PFMF (0)
#else /* __s390x__ */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
-#define MACHINE_HAS_DIAG44 (machine_flags & 32)
-#define MACHINE_HAS_MVCOS (machine_flags & 512)
+#define MACHINE_HAS_IDTE (machine_flags & MACHINE_FLAG_IDTE)
+#define MACHINE_HAS_DIAG44 (machine_flags & MACHINE_FLAG_DIAG44)
+#define MACHINE_HAS_MVPG (1)
+#define MACHINE_HAS_MVCOS (machine_flags & MACHINE_FLAG_MVCOS)
+#define MACHINE_HAS_HPAGE (machine_flags & MACHINE_FLAG_HPAGE)
+#define MACHINE_HAS_PFMF (machine_flags & MACHINE_FLAG_PFMF)
#endif /* __s390x__ */
#define MACHINE_HAS_SCLP (!MACHINE_IS_P390)
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 6f3821a6a902..ae89cf2478fc 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -19,6 +19,7 @@
#include <asm/lowcore.h>
#include <asm/sigp.h>
#include <asm/ptrace.h>
+#include <asm/system.h>
/*
s390 specific smp.c headers
@@ -53,10 +54,7 @@ extern void machine_power_off_smp(void);
static inline __u16 hard_smp_processor_id(void)
{
- __u16 cpu_address;
-
- asm volatile("stap %0" : "=m" (cpu_address));
- return cpu_address;
+ return stap();
}
/*
@@ -108,5 +106,11 @@ static inline void smp_send_stop(void)
#define smp_cpu_not_running(cpu) 1
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+extern int smp_rescan_cpus(void);
+#else
+static inline int smp_rescan_cpus(void) { return 0; }
+#endif
+
extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
#endif
diff --git a/include/asm-s390/sparsemem.h b/include/asm-s390/sparsemem.h
new file mode 100644
index 000000000000..06dfdab6c0e8
--- /dev/null
+++ b/include/asm-s390/sparsemem.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_S390_SPARSEMEM_H
+#define _ASM_S390_SPARSEMEM_H
+
+#define SECTION_SIZE_BITS 25
+
+#ifdef CONFIG_64BIT
+
+#define MAX_PHYSADDR_BITS 42
+#define MAX_PHYSMEM_BITS 42
+
+#else
+
+#define MAX_PHYSADDR_BITS 31
+#define MAX_PHYSMEM_BITS 31
+
+#endif /* CONFIG_64BIT */
+
+#endif /* _ASM_S390_SPARSEMEM_H */
diff --git a/include/asm-s390/sysinfo.h b/include/asm-s390/sysinfo.h
index abe10ae15e46..79d01343f8b0 100644
--- a/include/asm-s390/sysinfo.h
+++ b/include/asm-s390/sysinfo.h
@@ -11,6 +11,9 @@
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
+#ifndef __ASM_S390_SYSINFO_H
+#define __ASM_S390_SYSINFO_H
+
struct sysinfo_1_1_1 {
char reserved_0[32];
char manufacturer[16];
@@ -114,3 +117,5 @@ static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
: "cc", "memory");
return r0;
}
+
+#endif /* __ASM_S390_SYSINFO_H */
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 92098df4d6e3..c819ae25a842 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -16,6 +16,7 @@
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/processor.h>
+#include <asm/lowcore.h>
#ifdef __KERNEL__
@@ -422,6 +423,23 @@ extern void smp_ctl_clear_bit(int cr, int bit);
#endif /* CONFIG_SMP */
+static inline unsigned int stfl(void)
+{
+ asm volatile(
+ " .insn s,0xb2b10000,0(0)\n" /* stfl */
+ "0:\n"
+ EX_TABLE(0b,0b));
+ return S390_lowcore.stfl_fac_list;
+}
+
+static inline unsigned short stap(void)
+{
+ unsigned short cpu_address;
+
+ asm volatile("stap %0" : "=m" (cpu_address));
+ return cpu_address;
+}
+
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
diff --git a/include/asm-s390/thread_info.h b/include/asm-s390/thread_info.h
index 0a518915bf90..99bbed99a3b2 100644
--- a/include/asm-s390/thread_info.h
+++ b/include/asm-s390/thread_info.h
@@ -89,7 +89,6 @@ static inline struct thread_info *current_thread_info(void)
* thread information flags bit numbers
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
-#define TIF_RESTORE_SIGMASK 1 /* restore signal mask in do_signal() */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_RESTART_SVC 4 /* restart svc with new svc number */
@@ -101,6 +100,7 @@ static inline struct thread_info *current_thread_info(void)
TIF_NEED_RESCHED */
#define TIF_31BIT 18 /* 32bit process */
#define TIF_MEMDIE 19
+#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index 9e57a93d7de1..d60394b9745e 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -2,6 +2,7 @@
#define _S390_TLBFLUSH_H
#include <linux/mm.h>
+#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/pgalloc.h>
diff --git a/include/asm-s390/topology.h b/include/asm-s390/topology.h
index 8e97b06f298a..d96c91643458 100644
--- a/include/asm-s390/topology.h
+++ b/include/asm-s390/topology.h
@@ -7,6 +7,10 @@
cpumask_t cpu_coregroup_map(unsigned int cpu);
+extern cpumask_t cpu_core_map[NR_CPUS];
+
+#define topology_core_siblings(cpu) (cpu_core_map[cpu])
+
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
diff --git a/include/asm-s390/unaligned.h b/include/asm-s390/unaligned.h
index 8ee86dbedd1f..da9627afe5d8 100644
--- a/include/asm-s390/unaligned.h
+++ b/include/asm-s390/unaligned.h
@@ -1,24 +1,13 @@
-/*
- * include/asm-s390/unaligned.h
- *
- * S390 version
- *
- * Derived from "include/asm-i386/unaligned.h"
- */
-
-#ifndef __S390_UNALIGNED_H
-#define __S390_UNALIGNED_H
+#ifndef _ASM_S390_UNALIGNED_H
+#define _ASM_S390_UNALIGNED_H
/*
* The S390 can do unaligned accesses itself.
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
*/
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
-#define get_unaligned(ptr) (*(ptr))
-
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+#define get_unaligned __get_unaligned_be
+#define put_unaligned __put_unaligned_be
-#endif
+#endif /* _ASM_S390_UNALIGNED_H */