summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-03-02 07:58:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-03-02 07:58:56 -0800
commite23b62256a361611cbd45cd1456638f1a5106b5c (patch)
tree472968c961432a1d9d0c3634ca20433f1d9cd29b
parentaebb2afd5420c860b7fbc3882a323ef1247fbf16 (diff)
parent8ccfe6675fa974bd06d64f74d0fdee6a5267d2aa (diff)
downloadlwn-e23b62256a361611cbd45cd1456638f1a5106b5c.tar.gz
lwn-e23b62256a361611cbd45cd1456638f1a5106b5c.zip
Merge tag 'arc-v3.9-rc1-late' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull new ARC architecture from Vineet Gupta: "Initial ARC Linux port with some fixes on top for 3.9-rc1: I would like to introduce the Linux port to ARC Processors (from Synopsys) for 3.9-rc1. The patch-set has been discussed on the public lists since Nov and has received a fair bit of review, specially from Arnd, tglx, Al and other subsystem maintainers for DeviceTree, kgdb... The arch bits are in arch/arc, some asm-generic changes (acked by Arnd), a minor change to PARISC (acked by Helge). The series is a touch bigger for a new port for 2 main reasons: 1. It enables a basic kernel in first sub-series and adds ptrace/kgdb/.. later 2. Some of the fallout of review (DeviceTree support, multi-platform- image support) were added on top of orig series, primarily to record the revision history. This updated pull request additionally contains - fixes due to our GNU tools catching up with the new syscall/ptrace ABI - some (minor) cross-arch Kconfig updates." * tag 'arc-v3.9-rc1-late' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (82 commits) ARC: split elf.h into uapi and export it for userspace ARC: Fixup the current ABI version ARC: gdbserver using regset interface possibly broken ARC: Kconfig cleanup tracking cross-arch Kconfig pruning in merge window ARC: make a copy of flat DT ARC: [plat-arcfpga] DT arc-uart bindings change: "baud" => "current-speed" ARC: Ensure CONFIG_VIRT_TO_BUS is not enabled ARC: Fix pt_orig_r8 access ARC: [3.9] Fallout of hlist iterator update ARC: 64bit RTSC timestamp hardware issue ARC: Don't fiddle with non-existent caches ARC: Add self to MAINTAINERS ARC: Provide a default serial.h for uart drivers needing BASE_BAUD ARC: [plat-arcfpga] defconfig for fully loaded ARC Linux ARC: [Review] Multi-platform image #8: platform registers SMP callbacks ARC: [Review] Multi-platform image #7: SMP common code to use callbacks ARC: [Review] Multi-platform image #6: cpu-to-dma-addr optional ARC: [Review] Multi-platform image #5: NR_IRQS defined by ARC core ARC: [Review] Multi-platform image #4: Isolate platform headers ARC: [Review] Multi-platform image #3: switch to board callback ...
-rw-r--r--Documentation/devicetree/bindings/arc/interrupts.txt24
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arc/Kbuild2
-rw-r--r--arch/arc/Kconfig453
-rw-r--r--arch/arc/Kconfig.debug34
-rw-r--r--arch/arc/Makefile126
-rw-r--r--arch/arc/boot/Makefile26
-rw-r--r--arch/arc/boot/dts/Makefile13
-rw-r--r--arch/arc/boot/dts/angel4.dts55
-rw-r--r--arch/arc/boot/dts/skeleton.dts10
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi37
-rw-r--r--arch/arc/configs/fpga_defconfig61
-rw-r--r--arch/arc/include/asm/Kbuild49
-rw-r--r--arch/arc/include/asm/arcregs.h433
-rw-r--r--arch/arc/include/asm/asm-offsets.h9
-rw-r--r--arch/arc/include/asm/atomic.h232
-rw-r--r--arch/arc/include/asm/barrier.h42
-rw-r--r--arch/arc/include/asm/bitops.h516
-rw-r--r--arch/arc/include/asm/bug.h37
-rw-r--r--arch/arc/include/asm/cache.h75
-rw-r--r--arch/arc/include/asm/cacheflush.h67
-rw-r--r--arch/arc/include/asm/checksum.h101
-rw-r--r--arch/arc/include/asm/clk.h22
-rw-r--r--arch/arc/include/asm/cmpxchg.h143
-rw-r--r--arch/arc/include/asm/current.h32
-rw-r--r--arch/arc/include/asm/defines.h56
-rw-r--r--arch/arc/include/asm/delay.h68
-rw-r--r--arch/arc/include/asm/disasm.h116
-rw-r--r--arch/arc/include/asm/dma-mapping.h221
-rw-r--r--arch/arc/include/asm/dma.h14
-rw-r--r--arch/arc/include/asm/elf.h78
-rw-r--r--arch/arc/include/asm/entry.h724
-rw-r--r--arch/arc/include/asm/exec.h15
-rw-r--r--arch/arc/include/asm/futex.h151
-rw-r--r--arch/arc/include/asm/io.h105
-rw-r--r--arch/arc/include/asm/irq.h25
-rw-r--r--arch/arc/include/asm/irqflags.h153
-rw-r--r--arch/arc/include/asm/kdebug.h19
-rw-r--r--arch/arc/include/asm/kgdb.h61
-rw-r--r--arch/arc/include/asm/kprobes.h62
-rw-r--r--arch/arc/include/asm/linkage.h63
-rw-r--r--arch/arc/include/asm/mach_desc.h87
-rw-r--r--arch/arc/include/asm/mmu.h23
-rw-r--r--arch/arc/include/asm/mmu_context.h213
-rw-r--r--arch/arc/include/asm/module.h28
-rw-r--r--arch/arc/include/asm/mutex.h18
-rw-r--r--arch/arc/include/asm/page.h109
-rw-r--r--arch/arc/include/asm/perf_event.h13
-rw-r--r--arch/arc/include/asm/pgalloc.h134
-rw-r--r--arch/arc/include/asm/pgtable.h405
-rw-r--r--arch/arc/include/asm/processor.h151
-rw-r--r--arch/arc/include/asm/prom.h14
-rw-r--r--arch/arc/include/asm/ptrace.h130
-rw-r--r--arch/arc/include/asm/sections.h18
-rw-r--r--arch/arc/include/asm/segment.h24
-rw-r--r--arch/arc/include/asm/serial.h25
-rw-r--r--arch/arc/include/asm/setup.h37
-rw-r--r--arch/arc/include/asm/smp.h130
-rw-r--r--arch/arc/include/asm/spinlock.h144
-rw-r--r--arch/arc/include/asm/spinlock_types.h35
-rw-r--r--arch/arc/include/asm/string.h40
-rw-r--r--arch/arc/include/asm/switch_to.h41
-rw-r--r--arch/arc/include/asm/syscall.h72
-rw-r--r--arch/arc/include/asm/syscalls.h29
-rw-r--r--arch/arc/include/asm/thread_info.h121
-rw-r--r--arch/arc/include/asm/timex.h18
-rw-r--r--arch/arc/include/asm/tlb-mmu1.h104
-rw-r--r--arch/arc/include/asm/tlb.h58
-rw-r--r--arch/arc/include/asm/tlbflush.h28
-rw-r--r--arch/arc/include/asm/uaccess.h751
-rw-r--r--arch/arc/include/asm/unaligned.h29
-rw-r--r--arch/arc/include/asm/unwind.h163
-rw-r--r--arch/arc/include/uapi/asm/Kbuild12
-rw-r--r--arch/arc/include/uapi/asm/byteorder.h18
-rw-r--r--arch/arc/include/uapi/asm/cachectl.h28
-rw-r--r--arch/arc/include/uapi/asm/elf.h26
-rw-r--r--arch/arc/include/uapi/asm/page.h39
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h48
-rw-r--r--arch/arc/include/uapi/asm/setup.h6
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h22
-rw-r--r--arch/arc/include/uapi/asm/signal.h27
-rw-r--r--arch/arc/include/uapi/asm/swab.h98
-rw-r--r--arch/arc/include/uapi/asm/unistd.h34
-rw-r--r--arch/arc/kernel/Makefile33
-rw-r--r--arch/arc/kernel/arc_hostlink.c58
-rw-r--r--arch/arc/kernel/arcksyms.c56
-rw-r--r--arch/arc/kernel/asm-offsets.c64
-rw-r--r--arch/arc/kernel/clk.c21
-rw-r--r--arch/arc/kernel/ctx_sw.c109
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S58
-rw-r--r--arch/arc/kernel/devtree.c123
-rw-r--r--arch/arc/kernel/disasm.c538
-rw-r--r--arch/arc/kernel/entry.S839
-rw-r--r--arch/arc/kernel/fpu.c55
-rw-r--r--arch/arc/kernel/head.S111
-rw-r--r--arch/arc/kernel/irq.c273
-rw-r--r--arch/arc/kernel/kgdb.c205
-rw-r--r--arch/arc/kernel/kprobes.c525
-rw-r--r--arch/arc/kernel/module.c145
-rw-r--r--arch/arc/kernel/process.c235
-rw-r--r--arch/arc/kernel/ptrace.c158
-rw-r--r--arch/arc/kernel/reset.c33
-rw-r--r--arch/arc/kernel/setup.c473
-rw-r--r--arch/arc/kernel/signal.c360
-rw-r--r--arch/arc/kernel/smp.c332
-rw-r--r--arch/arc/kernel/stacktrace.c254
-rw-r--r--arch/arc/kernel/sys.c18
-rw-r--r--arch/arc/kernel/time.c265
-rw-r--r--arch/arc/kernel/traps.c170
-rw-r--r--arch/arc/kernel/troubleshoot.c322
-rw-r--r--arch/arc/kernel/unaligned.c245
-rw-r--r--arch/arc/kernel/unwind.c1329
-rw-r--r--arch/arc/kernel/vmlinux.lds.S163
-rw-r--r--arch/arc/lib/Makefile9
-rw-r--r--arch/arc/lib/memcmp.S124
-rw-r--r--arch/arc/lib/memcpy-700.S66
-rw-r--r--arch/arc/lib/memset.S59
-rw-r--r--arch/arc/lib/strchr-700.S123
-rw-r--r--arch/arc/lib/strcmp.S96
-rw-r--r--arch/arc/lib/strcpy-700.S70
-rw-r--r--arch/arc/lib/strlen.S83
-rw-r--r--arch/arc/mm/Makefile10
-rw-r--r--arch/arc/mm/cache_arc700.c768
-rw-r--r--arch/arc/mm/dma.c94
-rw-r--r--arch/arc/mm/extable.c63
-rw-r--r--arch/arc/mm/fault.c228
-rw-r--r--arch/arc/mm/init.c187
-rw-r--r--arch/arc/mm/ioremap.c91
-rw-r--r--arch/arc/mm/tlb.c645
-rw-r--r--arch/arc/mm/tlbex.S408
-rw-r--r--arch/arc/oprofile/Makefile9
-rw-r--r--arch/arc/oprofile/common.c26
-rw-r--r--arch/arc/plat-arcfpga/Kconfig84
-rw-r--r--arch/arc/plat-arcfpga/Makefile12
-rw-r--r--arch/arc/plat-arcfpga/include/plat/irq.h31
-rw-r--r--arch/arc/plat-arcfpga/include/plat/memmap.h31
-rw-r--r--arch/arc/plat-arcfpga/include/plat/smp.h118
-rw-r--r--arch/arc/plat-arcfpga/irq.c25
-rw-r--r--arch/arc/plat-arcfpga/platform.c226
-rw-r--r--arch/arc/plat-arcfpga/smp.c171
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--include/asm-generic/checksum.h4
-rw-r--r--include/asm-generic/uaccess.h14
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/sysctl.c5
-rw-r--r--lib/checksum.c2
-rw-r--r--tools/perf/perf.h6
147 files changed, 19552 insertions, 1 deletions
diff --git a/Documentation/devicetree/bindings/arc/interrupts.txt b/Documentation/devicetree/bindings/arc/interrupts.txt
new file mode 100644
index 000000000000..9a5d562435ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/arc/interrupts.txt
@@ -0,0 +1,24 @@
+* ARC700 incore Interrupt Controller
+
+ The core interrupt controller provides 32 prioritised interrupts (2 levels)
+ to ARC700 core.
+
+Properties:
+
+- compatible: "snps,arc700-intc"
+- interrupt-controller: This is an interrupt controller.
+- #interrupt-cells: Must be <1>.
+
+ Single Cell "interrupts" property of a device specifies the IRQ number
+ between 0 to 31
+
+ intc accessed via the special ARC AUX register interface, hence "reg" property
+ is not specified.
+
+Example:
+
+ intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index 6db1c6bdf015..aea0adf414dc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7682,6 +7682,12 @@ F: lib/swiotlb.c
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
+SYNOPSYS ARC ARCHITECTURE
+M: Vineet Gupta <vgupta@synopsys.com>
+L: linux-snps-arc@vger.kernel.org
+S: Supported
+F: arch/arc/
+
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
S: Maintained
diff --git a/arch/arc/Kbuild b/arch/arc/Kbuild
new file mode 100644
index 000000000000..082d329d3245
--- /dev/null
+++ b/arch/arc/Kbuild
@@ -0,0 +1,2 @@
+obj-y += kernel/
+obj-y += mm/
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
new file mode 100644
index 000000000000..e6f4eca09ee3
--- /dev/null
+++ b/arch/arc/Kconfig
@@ -0,0 +1,453 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+config ARC
+ def_bool y
+ select CLONE_BACKWARDS
+ # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
+ select DEVTMPFS if !INITRAMFS_SOURCE=""
+ select GENERIC_ATOMIC64
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_FIND_FIRST_BIT
+ # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_SHOW
+ select GENERIC_KERNEL_EXECVE
+ select GENERIC_KERNEL_THREAD
+ select GENERIC_PENDING_IRQ if SMP
+ select GENERIC_SMP_IDLE_THREAD
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_IOREMAP_PROT
+ select HAVE_KPROBES
+ select HAVE_KRETPROBES
+ select HAVE_MEMBLOCK
+ select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+ select HAVE_OPROFILE
+ select HAVE_PERF_EVENTS
+ select IRQ_DOMAIN
+ select MODULES_USE_ELF_RELA
+ select NO_BOOTMEM
+ select OF
+ select OF_EARLY_FLATTREE
+ select PERF_USE_VMALLOC
+
+config SCHED_OMIT_FRAME_POINTER
+ def_bool y
+
+config GENERIC_CSUM
+ def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+
+config MMU
+ def_bool y
+
+config NO_IOPORT
+ def_bool y
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config BINFMT_ELF
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+ select STACKTRACE
+
+config HAVE_LATENCYTOP_SUPPORT
+ def_bool y
+
+config NO_DMA
+ def_bool n
+
+source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
+menu "ARC Architecture Configuration"
+
+menu "ARC Platform/SoC/Board"
+
+source "arch/arc/plat-arcfpga/Kconfig"
+#New platform adds here
+
+endmenu
+
+menu "ARC CPU Configuration"
+
+choice
+ prompt "ARC Core"
+ default ARC_CPU_770
+
+config ARC_CPU_750D
+ bool "ARC750D"
+ help
+ Support for ARC750 core
+
+config ARC_CPU_770
+ bool "ARC770"
+ select ARC_CPU_REL_4_10
+ help
+ Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
+ This core has a bunch of cool new features:
+ -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
+ Shared Address Spaces (for sharing TLB entires in MMU)
+ -Caches: New Prog Model, Region Flush
+ -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
+
+endchoice
+
+config CPU_BIG_ENDIAN
+ bool "Enable Big Endian Mode"
+ default n
+ help
+ Build kernel for Big Endian Mode of ARC CPU
+
+# If a platform can't work with 0x8000_0000 based dma_addr_t
+config ARC_PLAT_NEEDS_CPU_TO_DMA
+ bool
+
+config SMP
+ bool "Symmetric Multi-Processing (Incomplete)"
+ default n
+ select USE_GENERIC_SMP_HELPERS
+ help
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
+
+if SMP
+
+config ARC_HAS_COH_CACHES
+ def_bool n
+
+config ARC_HAS_COH_LLSC
+ def_bool n
+
+config ARC_HAS_COH_RTSC
+ def_bool n
+
+config ARC_HAS_REENTRANT_IRQ_LV2
+ def_bool n
+
+endif
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+ depends on SMP
+ default "2"
+
+menuconfig ARC_CACHE
+ bool "Enable Cache Support"
+ default y
+ # if SMP, cache enabled ONLY if ARC implementation has cache coherency
+ depends on !SMP || ARC_HAS_COH_CACHES
+
+if ARC_CACHE
+
+config ARC_CACHE_LINE_SHIFT
+ int "Cache Line Length (as power of 2)"
+ range 5 7
+ default "6"
+ help
+ Starting with ARC700 4.9, Cache line length is configurable,
+ This option specifies "N", with Line-len = 2 power N
+ So line lengths of 32, 64, 128 are specified by 5,6,7, respectively
+ Linux only supports same line lengths for I and D caches.
+
+config ARC_HAS_ICACHE
+ bool "Use Instruction Cache"
+ default y
+
+config ARC_HAS_DCACHE
+ bool "Use Data Cache"
+ default y
+
+config ARC_CACHE_PAGES
+ bool "Per Page Cache Control"
+ default y
+ depends on ARC_HAS_ICACHE || ARC_HAS_DCACHE
+ help
+ This can be used to over-ride the global I/D Cache Enable on a
+ per-page basis (but only for pages accessed via MMU such as
+ Kernel Virtual address or User Virtual Address)
+ TLB entries have a per-page Cache Enable Bit.
+ Note that Global I/D ENABLE + Per Page DISABLE works but corollary
+ Global DISABLE + Per Page ENABLE won't work
+
+endif #ARC_CACHE
+
+config ARC_HAS_ICCM
+ bool "Use ICCM"
+ help
+ Single Cycle RAMS to store Fast Path Code
+ default n
+
+config ARC_ICCM_SZ
+ int "ICCM Size in KB"
+ default "64"
+ depends on ARC_HAS_ICCM
+
+config ARC_HAS_DCCM
+ bool "Use DCCM"
+ help
+ Single Cycle RAMS to store Fast Path Data
+ default n
+
+config ARC_DCCM_SZ
+ int "DCCM Size in KB"
+ default "64"
+ depends on ARC_HAS_DCCM
+
+config ARC_DCCM_BASE
+ hex "DCCM map address"
+ default "0xA0000000"
+ depends on ARC_HAS_DCCM
+
+config ARC_HAS_HW_MPY
+ bool "Use Hardware Multiplier (Normal or Faster XMAC)"
+ default y
+ help
+ Influences how gcc generates code for MPY operations.
+ If enabled, MPYxx insns are generated, provided by Standard/XMAC
+ Multipler. Otherwise software multipy lib is used
+
+choice
+ prompt "ARC700 MMU Version"
+ default ARC_MMU_V3 if ARC_CPU_770
+ default ARC_MMU_V2 if ARC_CPU_750D
+
+config ARC_MMU_V1
+ bool "MMU v1"
+ help
+ Orig ARC700 MMU
+
+config ARC_MMU_V2
+ bool "MMU v2"
+ help
+ Fixed the deficiency of v1 - possible thrashing in memcpy sceanrio
+ when 2 D-TLB and 1 I-TLB entries index into same 2way set.
+
+config ARC_MMU_V3
+ bool "MMU v3"
+ depends on ARC_CPU_770
+ help
+ Introduced with ARC700 4.10: New Features
+ Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
+ Shared Address Spaces (SASID)
+
+endchoice
+
+
+choice
+ prompt "MMU Page Size"
+ default ARC_PAGE_SIZE_8K
+
+config ARC_PAGE_SIZE_8K
+ bool "8KB"
+ help
+ Choose between 8k vs 16k
+
+config ARC_PAGE_SIZE_16K
+ bool "16KB"
+ depends on ARC_MMU_V3
+
+config ARC_PAGE_SIZE_4K
+ bool "4KB"
+ depends on ARC_MMU_V3
+
+endchoice
+
+config ARC_COMPACT_IRQ_LEVELS
+ bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+ default n
+ # Timer HAS to be high priority, for any other high priority config
+ select ARC_IRQ3_LV2
+ # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
+ depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
+
+if ARC_COMPACT_IRQ_LEVELS
+
+config ARC_IRQ3_LV2
+ bool
+
+config ARC_IRQ5_LV2
+ bool
+
+config ARC_IRQ6_LV2
+ bool
+
+endif
+
+config ARC_FPU_SAVE_RESTORE
+ bool "Enable FPU state persistence across context switch"
+ default n
+ help
+ Double Precision Floating Point unit had dedictaed regs which
+ need to be saved/restored across context-switch.
+ Note that ARC FPU is overly simplistic, unlike say x86, which has
+ hardware pieces to allow software to conditionally save/restore,
+ based on actual usage of FPU by a task. Thus our implemn does
+ this for all tasks in system.
+
+menuconfig ARC_CPU_REL_4_10
+ bool "Enable support for Rel 4.10 features"
+ default n
+ help
+ -ARC770 (and dependent features) enabled
+ -ARC750 also shares some of the new features with 770
+
+config ARC_HAS_LLSC
+ bool "Insn: LLOCK/SCOND (efficient atomic ops)"
+ default y
+ depends on ARC_CPU_770
+ # if SMP, enable LLSC ONLY if ARC implementation has coherent atomics
+ depends on !SMP || ARC_HAS_COH_LLSC
+
+config ARC_HAS_SWAPE
+ bool "Insn: SWAPE (endian-swap)"
+ default y
+ depends on ARC_CPU_REL_4_10
+
+config ARC_HAS_RTSC
+ bool "Insn: RTSC (64-bit r/o cycle counter)"
+ default y
+ depends on ARC_CPU_REL_4_10
+ # if SMP, enable RTSC only if counter is coherent across cores
+ depends on !SMP || ARC_HAS_COH_RTSC
+
+endmenu # "ARC CPU Configuration"
+
+config LINUX_LINK_BASE
+ hex "Linux Link Address"
+ default "0x80000000"
+ help
+ ARC700 divides the 32 bit phy address space into two equal halves
+ -Lower 2G (0 - 0x7FFF_FFFF ) is user virtual, translated by MMU
+ -Upper 2G (0x8000_0000 onwards) is untranslated, for kernel
+ Typically Linux kernel is linked at the start of untransalted addr,
+ hence the default value of 0x8zs.
+ However some customers have peripherals mapped at this addr, so
+ Linux needs to be scooted a bit.
+ If you don't know what the above means, leave this setting alone.
+
+config ARC_CURR_IN_REG
+ bool "Dedicate Register r25 for current_task pointer"
+ default y
+ help
+ This reserved Register R25 to point to Current Task in
+ kernel mode. This saves memory access for each such access
+
+
+config ARC_MISALIGN_ACCESS
+ bool "Emulate unaligned memory access (userspace only)"
+ default N
+ select SYSCTL_ARCH_UNALIGN_NO_WARN
+ select SYSCTL_ARCH_UNALIGN_ALLOW
+ help
+ This enables misaligned 16 & 32 bit memory access from user space.
+ Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
+ potential bugs in code
+
+config ARC_STACK_NONEXEC
+ bool "Make stack non-executable"
+ default n
+ help
+ To disable the execute permissions of stack/heap of processes
+ which are enabled by default.
+
+config HZ
+ int "Timer Frequency"
+ default 100
+
+config ARC_METAWARE_HLINK
+ bool "Support for Metaware debugger assisted Host access"
+ default n
+ help
+ This options allows a Linux userland apps to directly access
+ host file system (open/creat/read/write etc) with help from
+ Metaware Debugger. This can come in handy for Linux-host communication
+ when there is no real usable peripheral such as EMAC.
+
+menuconfig ARC_DBG
+ bool "ARC debugging"
+ default y
+
+config ARC_DW2_UNWIND
+ bool "Enable DWARF specific kernel stack unwind"
+ depends on ARC_DBG
+ default y
+ select KALLSYMS
+ help
+ Compiles the kernel with DWARF unwind information and can be used
+ to get stack backtraces.
+
+ If you say Y here the resulting kernel image will be slightly larger
+ but not slower, and it will give very useful debugging information.
+ If you don't debug the kernel, you can say N, but we may not be able
+ to solve problems without frame unwind information
+
+config ARC_DBG_TLB_PARANOIA
+ bool "Paranoia Checks in Low Level TLB Handlers"
+ depends on ARC_DBG
+ default n
+
+config ARC_DBG_TLB_MISS_COUNT
+ bool "Profile TLB Misses"
+ default n
+ select DEBUG_FS
+ depends on ARC_DBG
+ help
+ Counts number of I and D TLB Misses and exports them via Debugfs
+ The counters can be cleared via Debugfs as well
+
+config CMDLINE
+ string "Kernel command line to built-in"
+ default "print-fatal-signals=1"
+ help
+ The default command line which will be appended to the optional
+ u-boot provided command line (see below)
+
+config CMDLINE_UBOOT
+ bool "Support U-boot kernel command line passing"
+ default n
+ help
+ If you are using U-boot (www.denx.de) and wish to pass the kernel
+ command line from the U-boot environment to the Linux kernel then
+ switch this option on.
+ ARC U-boot will setup the cmdline in RAM/flash and set r2 to point
+ to it. kernel startup code will copy the string into cmdline buffer
+ and also append CONFIG_CMDLINE.
+
+config ARC_BUILTIN_DTB_NAME
+ string "Built in DTB"
+ help
+ Set the name of the DTB to embed in the vmlinux binary
+ Leaving it blank selects the minimal "skeleton" dtb
+
+source "kernel/Kconfig.preempt"
+
+endmenu # "ARC Architecture Configuration"
+
+source "mm/Kconfig"
+source "net/Kconfig"
+source "drivers/Kconfig"
+source "fs/Kconfig"
+source "arch/arc/Kconfig.debug"
+source "security/Kconfig"
+source "crypto/Kconfig"
+source "lib/Kconfig"
diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug
new file mode 100644
index 000000000000..962c6099659e
--- /dev/null
+++ b/arch/arc/Kconfig.debug
@@ -0,0 +1,34 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config EARLY_PRINTK
+ bool "Early printk" if EMBEDDED
+ default y
+ help
+ Write kernel log output directly into the VGA buffer or to a serial
+ port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks ugly and doesn't cooperate
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+ help
+ This option will cause messages to be printed if free stack space
+ drops below a certain limit.
+
+config 16KSTACKS
+ bool "Use 16Kb for kernel stacks instead of 8Kb"
+ help
+ If you say Y here the kernel will use a 16Kb stacksize for the
+ kernel stack attached to each process/thread. The default is 8K.
+ This increases the resident kernel footprint and will cause less
+ threads to run on the system and also increase the pressure
+ on the VM subsystem for higher order allocations.
+
+endmenu
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
new file mode 100644
index 000000000000..92379c7cbc1a
--- /dev/null
+++ b/arch/arc/Makefile
@@ -0,0 +1,126 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+UTS_MACHINE := arc
+
+KBUILD_DEFCONFIG := fpga_defconfig
+
+cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__
+
+LINUXINCLUDE += -include ${src}/arch/arc/include/asm/defines.h
+
+ifdef CONFIG_ARC_CURR_IN_REG
+# For a global register defintion, make sure it gets passed to every file
+# We had a customer reported bug where some code built in kernel was NOT using
+# any kernel headers, and missing the r25 global register
+# Can't do unconditionally (like above) because of recursive include issues
+# due to <linux/thread_info.h>
+LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h
+endif
+
+atleast_gcc44 := $(call cc-ifversion, -gt, 0402, y)
+cflags-$(atleast_gcc44) += -fsection-anchors
+
+cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
+cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc
+cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
+
+ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+# Generic build system uses -O2, we want -O3
+cflags-y += -O3
+endif
+
+# small data is default for elf32 tool-chain. If not usable, disable it
+# This also allows repurposing GP as scratch reg to gcc reg allocator
+disable_small_data := y
+cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
+ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
+
+# STAR 9000518362:
+# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
+# --build-id w/o "-marclinux".
+# Default arc-elf32-ld is OK
+ldflags-y += -marclinux
+
+ARC_LIBGCC := -mA7
+cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16
+
+ifndef CONFIG_ARC_HAS_HW_MPY
+ cflags-y += -mno-mpy
+
+# newlib for ARC700 assumes MPY to be always present, which is generally true
+# However, if someone really doesn't want MPY, we need to use the 600 ver
+# which coupled with -mno-mpy will use mpy emulation
+# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments,
+# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted
+
+ ARC_LIBGCC := -marc600
+ ifneq ($(atleast_gcc44),y)
+ cflags-y += -multcost=30
+ endif
+endif
+
+LIBGCC := $(shell $(CC) $(ARC_LIBGCC) $(cflags-y) --print-libgcc-file-name)
+
+# Modules with short calls might break for calls into builtin-kernel
+KBUILD_CFLAGS_MODULE += -mlong-calls
+
+# Finally dump eveything into kernel build system
+KBUILD_CFLAGS += $(cflags-y)
+KBUILD_AFLAGS += $(KBUILD_CFLAGS)
+LDFLAGS += $(ldflags-y)
+
+head-y := arch/arc/kernel/head.o
+
+# See arch/arc/Kbuild for content of core part of the kernel
+core-y += arch/arc/
+
+# w/o this dtb won't embed into kernel binary
+core-y += arch/arc/boot/dts/
+
+core-$(CONFIG_ARC_PLAT_FPGA_LEGACY) += arch/arc/plat-arcfpga/
+
+drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/
+
+libs-y += arch/arc/lib/ $(LIBGCC)
+
+#default target for make without any arguements.
+KBUILD_IMAGE := bootpImage
+
+all: $(KBUILD_IMAGE)
+boot := arch/arc/boot
+
+bootpImage: vmlinux
+
+uImage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+%.dtb %.dtb.S %.dtb.o: scripts
+ $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
+
+dtbs: scripts
+ $(Q)$(MAKE) $(build)=$(boot)/dts dtbs
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+# Hacks to enable final link due to absence of link-time branch relexation
+# and gcc choosing optimal(shorter) branches at -O3
+#
+# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
+# However lib/decompress_inflate.o (.init.text) calls
+# zlib_inflate_workspacesize (.text) causing relocation errors.
+# Thus forcing all exten calls in this file to be long calls
+export CFLAGS_decompress_inflate.o = -mmedium-calls
+export CFLAGS_initramfs.o = -mmedium-calls
+ifdef CONFIG_SMP
+export CFLAGS_core.o = -mmedium-calls
+endif
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
new file mode 100644
index 000000000000..7d514c24e095
--- /dev/null
+++ b/arch/arc/boot/Makefile
@@ -0,0 +1,26 @@
+targets := vmlinux.bin vmlinux.bin.gz uImage
+
+# uImage build relies on mkimage being availble on your host for ARC target
+# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
+# and make sure it's reacable from your PATH
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
+OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+LINUX_START_TEXT = $$(readelf -h vmlinux | \
+ grep "Entry point address" | grep -o 0x.*)
+
+UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE)
+UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
+UIMAGE_COMPRESSION = gzip
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,uimage)
+
+PHONY += FORCE
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
new file mode 100644
index 000000000000..5776835d583f
--- /dev/null
+++ b/arch/arc/boot/dts/Makefile
@@ -0,0 +1,13 @@
+# Built-in dtb
+builtindtb-y := angel4
+
+ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"")
+ builtindtb-y := $(patsubst "%",%,$(CONFIG_ARC_BUILTIN_DTB_NAME))
+endif
+
+obj-y += $(builtindtb-y).dtb.o
+targets += $(builtindtb-y).dtb
+
+dtbs: $(addprefix $(obj)/, $(builtindtb-y).dtb)
+
+clean-files := *.dtb
diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts
new file mode 100644
index 000000000000..bae4f936cb03
--- /dev/null
+++ b/arch/arc/boot/dts/angel4.dts
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "snps,arc-angel4";
+ clock-frequency = <80000000>; /* 80 MHZ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+
+ chosen {
+ bootargs = "console=ttyARC0,115200n8";
+ };
+
+ aliases {
+ serial0 = &arcuart0;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>; /* 256M */
+ };
+
+ fpga {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* child and parent address space 1:1 mapped */
+ ranges;
+
+ intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ arcuart0: serial@c0fc1000 {
+ compatible = "snps,arc-uart";
+ reg = <0xc0fc1000 0x100>;
+ interrupts = <5>;
+ clock-frequency = <80000000>;
+ current-speed = <115200>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/skeleton.dts b/arch/arc/boot/dts/skeleton.dts
new file mode 100644
index 000000000000..25a84fb5b3dc
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton.dts
@@ -0,0 +1,10 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
new file mode 100644
index 000000000000..a870bdd5e404
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Skeleton device tree; the bare minimum needed to boot; just include and
+ * add a compatible value.
+ */
+
+/ {
+ compatible = "snps,arc";
+ clock-frequency = <80000000>; /* 80 MHZ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chosen { };
+ aliases { };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "snps,arc770d";
+ reg = <0>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>; /* 256M */
+ };
+};
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig
new file mode 100644
index 000000000000..b8698067ebbe
--- /dev/null
+++ b/arch/arc/configs/fpga_defconfig
@@ -0,0 +1,61 @@
+CONFIG_CROSS_COMPILE="arc-elf32-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_FPGA_LEGACY=y
+CONFIG_ARC_BOARD_ML509=y
+# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_ARC=y
+CONFIG_SERIAL_ARC_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_XZ_DEC=y
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
new file mode 100644
index 000000000000..48af742f8b5a
--- /dev/null
+++ b/arch/arc/include/asm/Kbuild
@@ -0,0 +1,49 @@
+generic-y += auxvec.h
+generic-y += bugs.h
+generic-y += bitsperlong.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += fb.h
+generic-y += ftrace.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
new file mode 100644
index 000000000000..1b907c465666
--- /dev/null
+++ b/arch/arc/include/asm/arcregs.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ARCREGS_H
+#define _ASM_ARC_ARCREGS_H
+
+#ifdef __KERNEL__
+
+/* Build Configuration Registers */
+#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
+#define ARC_REG_CRC_BCR 0x62
+#define ARC_REG_DVFB_BCR 0x64
+#define ARC_REG_EXTARITH_BCR 0x65
+#define ARC_REG_VECBASE_BCR 0x68
+#define ARC_REG_PERIBASE_BCR 0x69
+#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */
+#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */
+#define ARC_REG_MMU_BCR 0x6f
+#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
+#define ARC_REG_TIMERS_BCR 0x75
+#define ARC_REG_ICCM_BCR 0x78
+#define ARC_REG_XY_MEM_BCR 0x79
+#define ARC_REG_MAC_BCR 0x7a
+#define ARC_REG_MUL_BCR 0x7b
+#define ARC_REG_SWAP_BCR 0x7c
+#define ARC_REG_NORM_BCR 0x7d
+#define ARC_REG_MIXMAX_BCR 0x7e
+#define ARC_REG_BARREL_BCR 0x7f
+#define ARC_REG_D_UNCACH_BCR 0x6A
+
+/* status32 Bits Positions */
+#define STATUS_H_BIT 0 /* CPU Halted */
+#define STATUS_E1_BIT 1 /* Int 1 enable */
+#define STATUS_E2_BIT 2 /* Int 2 enable */
+#define STATUS_A1_BIT 3 /* Int 1 active */
+#define STATUS_A2_BIT 4 /* Int 2 active */
+#define STATUS_AE_BIT 5 /* Exception active */
+#define STATUS_DE_BIT 6 /* PC is in delay slot */
+#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_L_BIT 12 /* Loop inhibit */
+
+/* These masks correspond to the status word(STATUS_32) bits */
+#define STATUS_H_MASK (1<<STATUS_H_BIT)
+#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
+#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
+#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_L_MASK (1<<STATUS_L_BIT)
+
+/*
+ * ECR: Exception Cause Reg bits-n-pieces
+ * [23:16] = Exception Vector
+ * [15: 8] = Exception Cause Code
+ * [ 7: 0] = Exception Parameters (for certain types only)
+ */
+#define ECR_VEC_MASK 0xff0000
+#define ECR_CODE_MASK 0x00ff00
+#define ECR_PARAM_MASK 0x0000ff
+
+/* Exception Cause Vector Values */
+#define ECR_V_INSN_ERR 0x02
+#define ECR_V_MACH_CHK 0x20
+#define ECR_V_ITLB_MISS 0x21
+#define ECR_V_DTLB_MISS 0x22
+#define ECR_V_PROTV 0x23
+
+/* Protection Violation Exception Cause Code Values */
+#define ECR_C_PROTV_INST_FETCH 0x00
+#define ECR_C_PROTV_LOAD 0x01
+#define ECR_C_PROTV_STORE 0x02
+#define ECR_C_PROTV_XCHG 0x03
+#define ECR_C_PROTV_MISALIG_DATA 0x04
+
+/* DTLB Miss Exception Cause Code Values */
+#define ECR_C_BIT_DTLB_LD_MISS 8
+#define ECR_C_BIT_DTLB_ST_MISS 9
+
+
+/* Auxiliary registers */
+#define AUX_IDENTITY 4
+#define AUX_INTR_VEC_BASE 0x25
+#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+#define AUX_IRQ_LV12 0x43 /* interrupt level register */
+
+#define AUX_IENABLE 0x40c
+#define AUX_ITRIGGER 0x40d
+#define AUX_IPULSE 0x415
+
+/* Timer related Aux registers */
+#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
+#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
+#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
+#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
+#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
+#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
+
+#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
+#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
+
+/* MMU Management regs */
+#define ARC_REG_TLBPD0 0x405
+#define ARC_REG_TLBPD1 0x406
+#define ARC_REG_TLBINDEX 0x407
+#define ARC_REG_TLBCOMMAND 0x408
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
+
+/* Error code if probe fails */
+#define TLB_LKUP_ERR 0x80000000
+
+/* TLB Commands */
+#define TLBWrite 0x1
+#define TLBRead 0x2
+#define TLBGetIndex 0x3
+#define TLBProbe 0x4
+
+#if (CONFIG_ARC_MMU_VER >= 2)
+#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
+#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
+#else
+#undef TLBWriteNI /* These cmds don't exist on older MMU */
+#undef TLBIVUTLB
+#endif
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
+#define ARC_REG_IC_IVIC 0x10
+#define ARC_REG_IC_CTRL 0x11
+#define ARC_REG_IC_IVIL 0x19
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_IC_PTAG 0x1E
+#endif
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_CACHE_DISABLE 0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR 0x72
+#define ARC_REG_DC_IVDC 0x47
+#define ARC_REG_DC_CTRL 0x48
+#define ARC_REG_DC_IVDL 0x4A
+#define ARC_REG_DC_FLSH 0x4B
+#define ARC_REG_DC_FLDL 0x4C
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_DC_PTAG 0x5C
+#endif
+
+/* Bit val in DC_CTRL */
+#define DC_CTRL_INV_MODE_FLUSH 0x40
+#define DC_CTRL_FLUSH_STATUS 0x100
+
+/* MMU Management regs */
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
+
+/*
+ * Floating Pt Registers
+ * Status regs are read-only (build-time) so need not be saved/restored
+ */
+#define ARC_AUX_FP_STAT 0x300
+#define ARC_AUX_DPFP_1L 0x301
+#define ARC_AUX_DPFP_1H 0x302
+#define ARC_AUX_DPFP_2L 0x303
+#define ARC_AUX_DPFP_2H 0x304
+#define ARC_AUX_DPFP_STAT 0x305
+
+#ifndef __ASSEMBLY__
+
+/*
+ ******************************************************************
+ * Inline ASM macros to read/write AUX Regs
+ * Essentially invocation of lr/sr insns from "C"
+ */
+
+#if 1
+
+#define read_aux_reg(reg) __builtin_arc_lr(reg)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(reg_immed, val) \
+ __builtin_arc_sr((unsigned int)val, reg_immed)
+
+#else
+
+#define read_aux_reg(reg) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " lr %0, [%1]" \
+ : "=r"(__ret) \
+ : "i"(reg)); \
+ __ret; \
+})
+
+/*
+ * Aux Reg address is specified as long immediate by caller
+ * e.g.
+ * write_aux_reg(0x69, some_val);
+ * This generates tightest code.
+ */
+#define write_aux_reg(reg_imm, val) \
+({ \
+ __asm__ __volatile__( \
+ " sr %0, [%1] \n" \
+ : \
+ : "ir"(val), "i"(reg_imm)); \
+})
+
+/*
+ * Aux Reg address is specified in a variable
+ * * e.g.
+ * reg_num = 0x69
+ * write_aux_reg2(reg_num, some_val);
+ * This has to generate glue code to load the reg num from
+ * memory to a reg hence not recommended.
+ */
+#define write_aux_reg2(reg_in_var, val) \
+({ \
+ unsigned int tmp; \
+ \
+ __asm__ __volatile__( \
+ " ld %0, [%2] \n\t" \
+ " sr %1, [%0] \n\t" \
+ : "=&r"(tmp) \
+ : "r"(val), "memory"(&reg_in_var)); \
+})
+
+#endif
+
+#define READ_BCR(reg, into) \
+{ \
+ unsigned int tmp; \
+ tmp = read_aux_reg(reg); \
+ if (sizeof(tmp) == sizeof(into)) { \
+ into = *((typeof(into) *)&tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+#define WRITE_BCR(reg, into) \
+{ \
+ unsigned int tmp; \
+ if (sizeof(tmp) == sizeof(into)) { \
+ tmp = (*(unsigned int *)(into)); \
+ write_aux_reg(reg, tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+/* Helpers */
+#define TO_KB(bytes) ((bytes) >> 10)
+#define TO_MB(bytes) (TO_KB(bytes) >> 10)
+#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
+#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+ struct {
+ unsigned int l, h;
+ } aux_dpfp[2];
+};
+#endif
+
+/*
+ ***************************************************************
+ * Build Configuration Registers, with encoded hardware config
+ */
+struct bcr_identity {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int chip_id:16, cpu_id:8, family:8;
+#else
+ unsigned int family:8, cpu_id:8, chip_id:16;
+#endif
+};
+
+struct bcr_mmu_1_2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
+#else
+ unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
+#endif
+};
+
+struct bcr_mmu_3 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
+ u_itlb:4, u_dtlb:4;
+#else
+ unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
+ ways:4, ver:8;
+#endif
+};
+
+#define EXTN_SWAP_VALID 0x1
+#define EXTN_NORM_VALID 0x2
+#define EXTN_MINMAX_VALID 0x2
+#define EXTN_BARREL_VALID 0x2
+
+struct bcr_extn {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2,
+ norm:2, swap:1;
+#else
+ unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2,
+ crc:1, pad:20;
+#endif
+};
+
+/* DSP Options Ref Manual */
+struct bcr_extn_mac_mul {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:16, type:8, ver:8;
+#else
+ unsigned int ver:8, type:8, pad:16;
+#endif
+};
+
+struct bcr_extn_xymem {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
+#else
+ unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
+#endif
+};
+
+struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+ unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+};
+
+struct bcr_perip {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int start:8, pad2:8, sz:8, pad:8;
+#else
+ unsigned int pad:8, sz:8, pad2:8, start:8;
+#endif
+};
+struct bcr_iccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int base:16, pad:5, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, pad:5, base:16;
+#endif
+};
+
+/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
+struct bcr_dccm_base {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int addr:24, ver:8;
+#else
+ unsigned int ver:8, addr:24;
+#endif
+};
+
+/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
+struct bcr_dccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int res:21, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, res:21;
+#endif
+};
+
+/* Both SP and DP FPU BCRs have same format */
+struct bcr_fp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int fast:1, ver:8;
+#else
+ unsigned int ver:8, fast:1;
+#endif
+};
+
+/*
+ *******************************************************************
+ * Generic structures to hold build configuration used at runtime
+ */
+
+struct cpuinfo_arc_mmu {
+ unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
+};
+
+struct cpuinfo_arc_cache {
+ unsigned int has_aliasing, sz, line_len, assoc, ver;
+};
+
+struct cpuinfo_arc_ccm {
+ unsigned int base_addr, sz;
+};
+
+struct cpuinfo_arc {
+ struct cpuinfo_arc_cache icache, dcache;
+ struct cpuinfo_arc_mmu mmu;
+ struct bcr_identity core;
+ unsigned int timers;
+ unsigned int vec_base;
+ unsigned int uncached_base;
+ struct cpuinfo_arc_ccm iccm, dccm;
+ struct bcr_extn extn;
+ struct bcr_extn_xymem extn_xymem;
+ struct bcr_extn_mac_mul extn_mac_mul;
+ struct bcr_fp fp, dpfp;
+};
+
+extern struct cpuinfo_arc cpuinfo_arc700[];
+
+#endif /* __ASEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/asm-offsets.h b/arch/arc/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..dad18768fe43
--- /dev/null
+++ b/arch/arc/include/asm/asm-offsets.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <generated/asm-offsets.h>
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
new file mode 100644
index 000000000000..83f03ca6caf6
--- /dev/null
+++ b/arch/arc/include/asm/atomic.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ATOMIC_H
+#define _ASM_ARC_ATOMIC_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#define atomic_read(v) ((v)->counter)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " add %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp) /* Early clobber, to prevent reg reuse */
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " sub %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+}
+
+/* add and also return the new value */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " add %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " sub %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bic %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(addr), "ir"(mask)
+ : "cc");
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#ifndef CONFIG_SMP
+
+ /* violating atomic_xxx API locking protocol in UP for optimization sake */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#else
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ /*
+ * Independent of hardware support, all of the atomic_xxx() APIs need
+ * to follow the same locking rules to make sure that a "hardware"
+ * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+ * sequence
+ *
+ * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+ * requires the locking.
+ */
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter = i;
+ atomic_ops_unlock(flags);
+}
+#endif
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter += i;
+ atomic_ops_unlock(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter -= i;
+ atomic_ops_unlock(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ unsigned long temp;
+
+ atomic_ops_lock(flags);
+ temp = v->counter;
+ temp += i;
+ v->counter = temp;
+ atomic_ops_unlock(flags);
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ unsigned long temp;
+
+ atomic_ops_lock(flags);
+ temp = v->counter;
+ temp -= i;
+ v->counter = temp;
+ atomic_ops_unlock(flags);
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ *addr &= ~mask;
+ atomic_ops_unlock(flags);
+}
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+/**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v
+ */
+#define __atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
+ c = old; \
+ c; \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+#define atomic_inc(v) atomic_add(1, v)
+#define atomic_dec(v) atomic_sub(1, v)
+
+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+
+#define ATOMIC_INIT(i) { (i) }
+
+#include <asm-generic/atomic64.h>
+
+#endif
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
new file mode 100644
index 000000000000..f6cb7c4ffb35
--- /dev/null
+++ b/arch/arc/include/asm/barrier.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
+#define mb() __asm__ __volatile__ ("" : : : "memory")
+#define rmb() mb()
+#define wmb() mb()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define read_barrier_depends() mb()
+
+/* TODO-vineetg verify the correctness of macros here */
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#endif
+
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#define smp_read_barrier_depends() do { } while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
new file mode 100644
index 000000000000..647a83a8e756
--- /dev/null
+++ b/arch/arc/include/asm/bitops.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
+ * The Kconfig glue ensures that in SMP, this is only set if the container
+ * SoC/platform has cross-core coherent LLOCK/SCOND
+ */
+#if defined(CONFIG_ARC_HAS_LLSC)
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bset %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bclr %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bxor %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+/*
+ * Semantically:
+ * Test the bit
+ * if clear
+ * set it and return 0 (old value)
+ * else
+ * return 1 (old value).
+ *
+ * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
+ * and the old value of bit is returned
+ */
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bset %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bclr %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bxor %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#include <asm/smp.h>
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ *
+ * There's "significant" micro-optimization in writing our own variants of
+ * bitops (over generic variants)
+ *
+ * (1) The generic APIs have "signed" @nr while we have it "unsigned"
+ * This avoids extra code to be generated for pointer arithmatic, since
+ * is "not sure" that index is NOT -ve
+ * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
+ * only consider bottom 5 bits of @nr, so NO need to mask them off.
+ * (GCC Quirk: however for constant @nr we still need to do the masking
+ * at compile time)
+ */
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp | (1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp & ~(1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp ^ (1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old | (1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old & ~(1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old ^ (1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+/***************************************
+ * Non atomic variants
+ **************************************/
+
+static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp | (1UL << nr);
+}
+
+static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp & ~(1UL << nr);
+}
+
+static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp ^ (1UL << nr);
+}
+
+static inline int
+__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old | (1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old & ~(1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old ^ (1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int
+__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+ return ((1UL << (nr & 31)) &
+ (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
+}
+
+static inline int
+__test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+ unsigned long mask;
+
+ addr += nr >> 5;
+
+ /* ARC700 only considers 5 bits in bit-fiddling insn */
+ mask = 1 << nr;
+
+ return ((mask & *addr) != 0);
+}
+
+#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
+ __constant_test_bit((nr), (addr)) : \
+ __test_bit((nr), (addr)))
+
+/*
+ * Count the number of zeros, starting from MSB
+ * Helper for fls( ) friends
+ * This is a pure count, so (1-32) or (0-31) doesn't apply
+ * It could be 0 to 32, based on num of 0's in there
+ * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
+ */
+static inline __attribute__ ((const)) int clz(unsigned int x)
+{
+ unsigned int res;
+
+ __asm__ __volatile__(
+ " norm.f %0, %1 \n"
+ " mov.n %0, 0 \n"
+ " add.p %0, %0, 1 \n"
+ : "=r"(res)
+ : "r"(x)
+ : "cc");
+
+ return res;
+}
+
+static inline int constant_fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned long x)
+{
+ if (__builtin_constant_p(x))
+ return constant_fls(x);
+
+ return 32 - clz(x);
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __fls(unsigned long x)
+{
+ if (!x)
+ return 0;
+ else
+ return fls(x) - 1;
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __ffs(unsigned long word)
+{
+ if (!word)
+ return word;
+
+ return ffs(word) - 1;
+}
+
+/*
+ * ffz = Find First Zero in word.
+ * @return:[0-31], 32 if all 1's
+ */
+#define ffz(x) __ffs(~(x))
+
+/* TODO does this affect uni-processor code */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
new file mode 100644
index 000000000000..2ad8f9b1c54b
--- /dev/null
+++ b/arch/arc/include/asm/bug.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_BUG_H
+#define _ASM_ARC_BUG_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+struct task_struct;
+
+void show_regs(struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address, unsigned long cause_reg);
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+ unsigned long cause_reg);
+
+#define BUG() do { \
+ dump_stack(); \
+ pr_warn("Kernel BUG in %s: %s: %d!\n", \
+ __FILE__, __func__, __LINE__); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
new file mode 100644
index 000000000000..6632273861fd
--- /dev/null
+++ b/arch/arc/include/asm/cache.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHE_H
+#define __ARC_ASM_CACHE_H
+
+/* In case $$ not config, setup a dummy number for rest of kernel */
+#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
+#define L1_CACHE_SHIFT 6
+#else
+#define L1_CACHE_SHIFT CONFIG_ARC_CACHE_LINE_SHIFT
+#endif
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define ARC_ICACHE_WAYS 2
+#define ARC_DCACHE_WAYS 4
+
+/* Helpers */
+#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
+#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
+
+#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
+#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
+
+#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN
+#error "Need to fix some code as I/D cache lines not same"
+#else
+#define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK))
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* Uncached access macros */
+#define arc_read_uncached_32(ptr) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " ld.di %0, [%1] \n" \
+ : "=r"(__ret) \
+ : "r"(ptr)); \
+ __ret; \
+})
+
+#define arc_write_uncached_32(ptr, data)\
+({ \
+ __asm__ __volatile__( \
+ " st.di %0, [%1] \n" \
+ : \
+ : "r"(data), "r"(ptr)); \
+})
+
+/* used to give SHMLBA a value to avoid Cache Aliasing */
+extern unsigned int ARC_shmlba;
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+/*
+ * ARC700 doesn't cache any access in top 256M.
+ * Ideal for wiring memory mapped peripherals as we don't need to do
+ * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
+ */
+#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
+
+extern void arc_cache_init(void);
+extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+extern void __init read_decode_cache_bcr(void);
+#endif
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
new file mode 100644
index 000000000000..97ee96f26505
--- /dev/null
+++ b/arch/arc/include/asm/cacheflush.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ * -flush_cache_dup_mm (fork)
+ * -likewise for flush_cache_mm (exit/execve)
+ * -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
+ *
+ * vineetg: April 2008
+ * -Added a critical CacheLine flush to copy_to_user_page( ) which
+ * was causing gdbserver to not setup breakpoints consistently
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+void flush_cache_all(void);
+
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
+ int len);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+void flush_dcache_page(struct page *page);
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz);
+void dma_cache_inv(unsigned long start, unsigned long sz);
+void dma_cache_wback(unsigned long start, unsigned long sz);
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+/* TBD: optimize this */
+#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vunmap(start, end) flush_cache_all()
+
+/*
+ * VM callbacks when entire/range of user-space V-P mappings are
+ * torn-down/get-invalidated
+ *
+ * Currently we don't support D$ aliasing configs for our VIPT caches
+ * NOPS for VIPT Cache with non-aliasing D$ configurations only
+ */
+#define flush_cache_dup_mm(mm) /* called on fork */
+#define flush_cache_mm(mm) /* called on munmap/exit */
+#define flush_cache_range(mm, u_vstart, u_vend)
+#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+ if (vma->vm_flags & VM_EXEC) \
+ flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len); \
+
+#endif
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
new file mode 100644
index 000000000000..10957298b7a3
--- /dev/null
+++ b/arch/arc/include/asm/checksum.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Joern Rennecke <joern.rennecke@embecosm.com>: Jan 2012
+ * -Insn Scheduling improvements to csum core routines.
+ * = csum_fold( ) largely derived from ARM version.
+ * = ip_fast_cum( ) to have module scheduling
+ * -gcc 4.4.x broke networking. Alias analysis needed to be primed.
+ * worked around by adding memory clobber to ip_fast_csum( )
+ *
+ * vineetg: May 2010
+ * -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
+ */
+
+#ifndef _ASM_ARC_CHECKSUM_H
+#define _ASM_ARC_CHECKSUM_H
+
+/*
+ * Fold a partial checksum
+ *
+ * The 2 swords comprising the 32bit sum are added, any carry to 16th bit
+ * added back and final sword result inverted.
+ */
+static inline __sum16 csum_fold(__wsum s)
+{
+ unsigned r = s << 16 | s >> 16; /* ror */
+ s = ~s;
+ s -= r;
+ return s >> 16;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ const void *ptr = iph;
+ unsigned int tmp, tmp2, sum;
+
+ __asm__(
+ " ld.ab %0, [%3, 4] \n"
+ " ld.ab %2, [%3, 4] \n"
+ " sub %1, %4, 2 \n"
+ " lsr.f lp_count, %1, 1 \n"
+ " bcc 0f \n"
+ " add.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ "0: lp 1f \n"
+ " ld.ab %1, [%3, 4] \n"
+ " adc.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ " adc.f %0, %0, %1 \n"
+ "1: adc.f %0, %0, %2 \n"
+ " add.cs %0,%0,1 \n"
+ : "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
+ : "r"(ihl)
+ : "cc", "lp_count", "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * TCP pseudo Header is 12 bytes:
+ * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ __asm__ __volatile__(
+ " add.f %0, %0, %1 \n"
+ " adc.f %0, %0, %2 \n"
+ " adc.f %0, %0, %3 \n"
+ " adc.f %0, %0, %4 \n"
+ " adc %0, %0, 0 \n"
+ : "+&r"(sum)
+ : "r"(saddr), "r"(daddr),
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ "r"(len),
+#else
+ "r"(len << 8),
+#endif
+ "r"(htons(proto))
+ : "cc");
+
+ return sum;
+}
+
+#define csum_fold csum_fold
+#define ip_fast_csum ip_fast_csum
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_ARC_CHECKSUM_H */
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
new file mode 100644
index 000000000000..bf9d29f5bd53
--- /dev/null
+++ b/arch/arc/include/asm/clk.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_CLK_H
+#define _ASM_ARC_CLK_H
+
+/* Although we can't really hide core_freq, the accessor is still better way */
+extern unsigned long core_freq;
+
+static inline unsigned long arc_get_core_freq(void)
+{
+ return core_freq;
+}
+
+extern int arc_set_core_freq(unsigned long);
+
+#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..03cd6894855d
--- /dev/null
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_CMPXCHG_H
+#define __ASM_ARC_CMPXCHG_H
+
+#include <linux/types.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " brne %0, %2, 2f \n"
+ " scond %3, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev)
+ : "r"(ptr), "ir"(expected),
+ "r"(new) /* can't be "ir". scond can't take limm for "b" */
+ : "cc");
+
+ return prev;
+}
+
+#else
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ unsigned long flags;
+ int prev;
+ volatile unsigned long *p = ptr;
+
+ atomic_ops_lock(flags);
+ prev = *p;
+ if (prev == expected)
+ *p = new;
+ atomic_ops_unlock(flags);
+ return prev;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), (unsigned long)(n)))
+
+/*
+ * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
+ * just to gaurantee semantics.
+ * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
+ * which also happens to be atomic_ops_lock.
+ *
+ * Thus despite semantically being different, implementation of atomic_cmpxchg()
+ * is same as cmpxchg().
+ */
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+
+/*
+ * xchg (reg with memory) based on "Native atomic" EX insn
+ */
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+ int size)
+{
+ extern unsigned long __xchg_bad_pointer(void);
+
+ switch (size) {
+ case 4:
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r"(val)
+ : "r"(ptr)
+ : "memory");
+
+ return val;
+ }
+ return __xchg_bad_pointer();
+}
+
+#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+ sizeof(*(ptr))))
+
+/*
+ * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
+ * not require any locking. However there's a quirk.
+ * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
+ * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
+ * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
+ * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
+ *
+ * This however is only relevant if SMP and/or ARC lacks LLSC
+ * if (UP or LLSC)
+ * xchg doesn't need serialization
+ * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
+ * xchg needs serialization
+ */
+
+#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
+
+#define xchg(ptr, with) \
+({ \
+ unsigned long flags; \
+ typeof(*(ptr)) old_val; \
+ \
+ atomic_ops_lock(flags); \
+ old_val = _xchg(ptr, with); \
+ atomic_ops_unlock(flags); \
+ old_val; \
+})
+
+#else
+
+#define xchg(ptr, with) _xchg(ptr, with)
+
+#endif
+
+/*
+ * "atomic" variant of xchg()
+ * REQ: It needs to follow the same serialization rules as other atomic_xxx()
+ * Since xchg() doesn't always do that, it would seem that following defintion
+ * is incorrect. But here's the rationale:
+ * SMP : Even xchg() takes the atomic_ops_lock, so OK.
+ * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
+ * is natively "SMP safe", no serialization required).
+ * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
+ * could clobber them. atomic_xchg() itself would be 1 insn, so it
+ * can't be clobbered by others. Thus no serialization required when
+ * atomic_xchg is involved.
+ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
new file mode 100644
index 000000000000..87b918585c4a
--- /dev/null
+++ b/arch/arc/include/asm/current.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: May 16th, 2008
+ * - Current macro is now implemented as "global register" r25
+ */
+
+#ifndef _ASM_ARC_CURRENT_H
+#define _ASM_ARC_CURRENT_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+register struct task_struct *curr_arc asm("r25");
+#define current (curr_arc)
+
+#else
+#include <asm-generic/current.h>
+#endif /* ! CONFIG_ARC_CURR_IN_REG */
+
+#endif /* ! __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/defines.h b/arch/arc/include/asm/defines.h
new file mode 100644
index 000000000000..6097bb439cc5
--- /dev/null
+++ b/arch/arc/include/asm/defines.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_DEFINES_H__
+#define __ARC_ASM_DEFINES_H__
+
+#if defined(CONFIG_ARC_MMU_V1)
+#define CONFIG_ARC_MMU_VER 1
+#elif defined(CONFIG_ARC_MMU_V2)
+#define CONFIG_ARC_MMU_VER 2
+#elif defined(CONFIG_ARC_MMU_V3)
+#define CONFIG_ARC_MMU_VER 3
+#endif
+
+#ifdef CONFIG_ARC_HAS_LLSC
+#define __CONFIG_ARC_HAS_LLSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_LLSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_SWAPE
+#define __CONFIG_ARC_HAS_SWAPE_VAL 1
+#else
+#define __CONFIG_ARC_HAS_SWAPE_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_RTSC
+#define __CONFIG_ARC_HAS_RTSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_RTSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_MMU_SASID
+#define __CONFIG_ARC_MMU_SASID_VAL 1
+#else
+#define __CONFIG_ARC_MMU_SASID_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+#define __CONFIG_ARC_HAS_ICACHE 1
+#else
+#define __CONFIG_ARC_HAS_ICACHE 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+#define __CONFIG_ARC_HAS_DCACHE 1
+#else
+#define __CONFIG_ARC_HAS_DCACHE 0
+#endif
+
+#endif /* __ARC_ASM_DEFINES_H__ */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
new file mode 100644
index 000000000000..442ce5d0f709
--- /dev/null
+++ b/arch/arc/include/asm/delay.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Delay routines using pre computed loops_per_jiffy value.
+ *
+ * vineetg: Feb 2012
+ * -Rewrote in "C" to avoid dealing with availability of H/w MPY
+ * -Also reduced the num of MPY operations from 3 to 2
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_UDELAY_H
+#define __ASM_ARC_UDELAY_H
+
+#include <asm/param.h> /* HZ */
+
+static inline void __delay(unsigned long loops)
+{
+ __asm__ __volatile__(
+ "1: sub.f %0, %0, 1 \n"
+ " jpnz 1b \n"
+ : "+r"(loops)
+ :
+ : "cc");
+}
+
+extern void __bad_udelay(void);
+
+/*
+ * Normal Math for computing loops in "N" usecs
+ * -we have precomputed @loops_per_jiffy
+ * -1 sec has HZ jiffies
+ * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
+ *
+ * Approximate Division by multiplication:
+ * -Mathematically if we multiply and divide a number by same value the
+ * result remains unchanged: In this case, we use 2^32
+ * -> (loops_per_N_usec * 2^32 ) / 2^32
+ * -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
+ * -> (loops_per_jiffy * HZ * N * 4295) / 2^32
+ *
+ * -Divide by 2^32 is very simply right shift by 32
+ * -We simply need to ensure that the multiply per above eqn happens in
+ * 64-bit precision (if CPU doesn't support it - gcc can emaulate it)
+ */
+
+static inline void __udelay(unsigned long usecs)
+{
+ unsigned long loops;
+
+ /* (long long) cast ensures 64 bit MPY - real or emulated
+ * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
+ */
+ loops = ((long long)(usecs * 4295 * HZ) *
+ (long long)(loops_per_jiffy)) >> 32;
+
+ __delay(loops);
+}
+
+#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
+ : __udelay(n)) : __udelay(n))
+
+#endif /* __ASM_ARC_UDELAY_H */
diff --git a/arch/arc/include/asm/disasm.h b/arch/arc/include/asm/disasm.h
new file mode 100644
index 000000000000..f1cce3d059a1
--- /dev/null
+++ b/arch/arc/include/asm/disasm.h
@@ -0,0 +1,116 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_DISASM_H__
+#define __ARC_DISASM_H__
+
+enum {
+ op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
+ op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
+ op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
+ op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
+ op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
+ op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
+ op_B_S = 30, op_BL_S = 31
+};
+
+enum flow {
+ noflow,
+ direct_jump,
+ direct_call,
+ indirect_jump,
+ indirect_call,
+ invalid_instr
+};
+
+#define IS_BIT(word, n) ((word) & (1<<n))
+#define BITS(word, s, e) (((word) >> (s)) & (~((-2) << ((e) - (s)))))
+
+#define MAJOR_OPCODE(word) (BITS((word), 27, 31))
+#define MINOR_OPCODE(word) (BITS((word), 16, 21))
+#define FIELD_A(word) (BITS((word), 0, 5))
+#define FIELD_B(word) ((BITS((word), 12, 14)<<3) | \
+ (BITS((word), 24, 26)))
+#define FIELD_C(word) (BITS((word), 6, 11))
+#define FIELD_u6(word) FIELDC(word)
+#define FIELD_s12(word) sign_extend(((BITS((word), 0, 5) << 6) | \
+ BITS((word), 6, 11)), 12)
+
+/* note that for BL/BRcc these two macro's need another AND statement to mask
+ * out bit 1 (make the result a multiple of 4) */
+#define FIELD_s9(word) sign_extend(((BITS(word, 15, 15) << 8) | \
+ BITS(word, 16, 23)), 9)
+#define FIELD_s21(word) sign_extend(((BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+#define FIELD_s25(word) sign_extend(((BITS(word, 0, 3) << 21) | \
+ (BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+
+/* note: these operate on 16 bits! */
+#define FIELD_S_A(word) ((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
+#define FIELD_S_B(word) ((BITS((word), 10, 10)<<3) | \
+ BITS((word), 8, 10))
+#define FIELD_S_C(word) ((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
+#define FIELD_S_H(word) ((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
+#define FIELD_S_u5(word) (BITS((word), 0, 4))
+#define FIELD_S_u6(word) (BITS((word), 0, 4) << 1)
+#define FIELD_S_u7(word) (BITS((word), 0, 4) << 2)
+#define FIELD_S_u10(word) (BITS((word), 0, 7) << 2)
+#define FIELD_S_s7(word) sign_extend(BITS((word), 0, 5) << 1, 9)
+#define FIELD_S_s8(word) sign_extend(BITS((word), 0, 7) << 1, 9)
+#define FIELD_S_s9(word) sign_extend(BITS((word), 0, 8), 9)
+#define FIELD_S_s10(word) sign_extend(BITS((word), 0, 8) << 1, 10)
+#define FIELD_S_s11(word) sign_extend(BITS((word), 0, 8) << 2, 11)
+#define FIELD_S_s13(word) sign_extend(BITS((word), 0, 10) << 2, 13)
+
+#define STATUS32_L 0x00000100
+#define REG_LIMM 62
+
+struct disasm_state {
+ /* generic info */
+ unsigned long words[2];
+ int instr_len;
+ int major_opcode;
+ /* info for branch/jump */
+ int is_branch;
+ int target;
+ int delay_slot;
+ enum flow flow;
+ /* info for load/store */
+ int src1, src2, src3, dest, wb_reg;
+ int zz, aa, x, pref, di;
+ int fault, write;
+};
+
+static inline int sign_extend(int value, int bits)
+{
+ if (IS_BIT(value, (bits - 1)))
+ value |= (0xffffffff << bits);
+
+ return value;
+}
+
+static inline int is_short_instr(unsigned long addr)
+{
+ uint16_t word = *((uint16_t *)addr);
+ int opcode = (word >> 11) & 0x1F;
+ return (opcode >= 0x0B);
+}
+
+void disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs);
+int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
+ *cregs, unsigned long *fall_thru, unsigned long *target);
+long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
+void set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs);
+
+#endif /* __ARC_DISASM_H__ */
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..31f77aec0823
--- /dev/null
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -0,0 +1,221 @@
+/*
+ * DMA Mapping glue for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-coherent.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
+/*
+ * dma_map_* API take cpu addresses, which is kernel logical address in the
+ * untranslated address space (0x8000_0000) based. The dma address (bus addr)
+ * ideally needs to be 0x0000_0000 based hence these glue routines.
+ * However given that intermediate bus bridges can ignore the high bit, we can
+ * do with these routines being no-ops.
+ * If a platform/device comes up which sriclty requires 0 based bus addr
+ * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
+ */
+#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
+#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
+
+#else
+#include <plat/dma_addr.h>
+#endif
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+ dma_addr_t dma_handle);
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+/*
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
+ */
+
+static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+ default:
+ pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+ }
+}
+
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir);
+
+#define _dma_cache_sync(addr, sz, dir) \
+do { \
+ if (__builtin_constant_p(dir)) \
+ __inline_dma_cache_sync(addr, sz, dir); \
+ else \
+ __arc_dma_cache_sync(addr, sz, dir); \
+} \
+while (0);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ _dma_cache_sync((unsigned long)cpu_addr, size, dir);
+ return plat_kernel_addr_to_dma(dev, cpu_addr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long paddr = page_to_phys(page) + offset;
+ return dma_map_single(dev, (void *)paddr, size, dir);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
+
+ return nents;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+ DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+ DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+ size, DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+ size, DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+ /* Support 32 bit DMA mask exclusively */
+ return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+#endif
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
new file mode 100644
index 000000000000..ca7c45181de9
--- /dev/null
+++ b/arch/arc/include/asm/dma.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_H
+#define ASM_ARC_DMA_H
+
+#define MAX_DMA_ADDRESS 0xC0000000
+
+#endif
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
new file mode 100644
index 000000000000..f4c8d36ebecb
--- /dev/null
+++ b/arch/arc/include/asm/elf.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_ELF_H
+#define __ASM_ARC_ELF_H
+
+#include <linux/types.h>
+#include <uapi/asm/elf.h>
+
+/* These ELF defines belong to uapi but libc elf.h already defines them */
+#define EM_ARCOMPACT 93
+
+/* ARC Relocations (kernel Modules only) */
+#define R_ARC_32 0x4
+#define R_ARC_32_ME 0x1B
+#define R_ARC_S25H_PCREL 0x10
+#define R_ARC_S25W_PCREL 0x11
+
+/*to set parameters in the core dumps */
+#define ELF_ARCH EM_ARCOMPACT
+#define ELF_CLASS ELFCLASS32
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+
+/*
+ * To ensure that
+ * -we don't load something for the wrong architecture.
+ * -The userspace is using the correct syscall ABI
+ */
+struct elf32_hdr;
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/*
+ * When the program starts, a1 contains a pointer to a function to be
+ * registered with atexit, as per the SVR4 ABI. A value of 0 means we
+ * have no such handler.
+ */
+#define ELF_PLAT_INIT(_r, load_addr) ((_r)->r0 = 0)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+
+#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
new file mode 100644
index 000000000000..23daa326fc9b
--- /dev/null
+++ b/arch/arc/include/asm/entry.h
@@ -0,0 +1,724 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ * Stack switching code can no longer reliably rely on the fact that
+ * if we are NOT in user mode, stack is switched to kernel mode.
+ * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ * it's prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we also need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ * -Modified CALLEE_REG save/restore macros to handle the fact that
+ * r25 contains the kernel current task ptr
+ * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ * address Write back load ld.ab instead of seperate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_H
+#define __ASM_ARC_ENTRY_H
+
+#ifdef __ASSEMBLY__
+#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/asm-offsets.h>
+#include <asm/arcregs.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h> /* For VMALLOC_START */
+#include <asm/thread_info.h> /* For THREAD_SIZE */
+
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a reg1, [reg2, x] => Pre Incr
+ * Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab reg1, [reg2, x] => Post Incr
+ * Eff Addr for load = [reg2]
+ */
+
+/*--------------------------------------------------------------
+ * Save caller saved registers (scratch registers) ( r0 - r12 )
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLER_SAVED
+ st.a r0, [sp, -4]
+ st.a r1, [sp, -4]
+ st.a r2, [sp, -4]
+ st.a r3, [sp, -4]
+ st.a r4, [sp, -4]
+ st.a r5, [sp, -4]
+ st.a r6, [sp, -4]
+ st.a r7, [sp, -4]
+ st.a r8, [sp, -4]
+ st.a r9, [sp, -4]
+ st.a r10, [sp, -4]
+ st.a r11, [sp, -4]
+ st.a r12, [sp, -4]
+.endm
+
+/*--------------------------------------------------------------
+ * Restore caller saved registers (scratch registers)
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLER_SAVED
+ ld.ab r12, [sp, 4]
+ ld.ab r11, [sp, 4]
+ ld.ab r10, [sp, 4]
+ ld.ab r9, [sp, 4]
+ ld.ab r8, [sp, 4]
+ ld.ab r7, [sp, 4]
+ ld.ab r6, [sp, 4]
+ ld.ab r5, [sp, 4]
+ ld.ab r4, [sp, 4]
+ ld.ab r3, [sp, 4]
+ ld.ab r2, [sp, 4]
+ ld.ab r1, [sp, 4]
+ ld.ab r0, [sp, 4]
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ * on kernel stack.
+ * User mode callee regs need to be saved in case of
+ * -fork and friends for replicating from parent to child
+ * -before going into do_signal( ) for ptrace/core-dump
+ * Special case handling is required for r25 in case it is used by kernel
+ * for caching task ptr. Low level exception/ISR save user mode r25
+ * into task->thread.user_r25. So it needs to be retrieved from there and
+ * saved into kernel stack with rest of callee reg-file
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_USER
+ st.a r13, [sp, -4]
+ st.a r14, [sp, -4]
+ st.a r15, [sp, -4]
+ st.a r16, [sp, -4]
+ st.a r17, [sp, -4]
+ st.a r18, [sp, -4]
+ st.a r19, [sp, -4]
+ st.a r20, [sp, -4]
+ st.a r21, [sp, -4]
+ st.a r22, [sp, -4]
+ st.a r23, [sp, -4]
+ st.a r24, [sp, -4]
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ; Retrieve orig r25 and save it on stack
+ ld r12, [r25, TASK_THREAD + THREAD_USER_R25]
+ st.a r12, [sp, -4]
+#else
+ st.a r25, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" callee_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ * kernel mode callee regs needed to be saved in case of context switch
+ * If r25 is used for caching task pointer then that need not be saved
+ * as it can be re-created from current task global
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_KERNEL
+ st.a r13, [sp, -4]
+ st.a r14, [sp, -4]
+ st.a r15, [sp, -4]
+ st.a r16, [sp, -4]
+ st.a r17, [sp, -4]
+ st.a r18, [sp, -4]
+ st.a r19, [sp, -4]
+ st.a r20, [sp, -4]
+ st.a r21, [sp, -4]
+ st.a r22, [sp, -4]
+ st.a r23, [sp, -4]
+ st.a r24, [sp, -4]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ sub sp, sp, 8
+#else
+ st.a r25, [sp, -4]
+ sub sp, sp, 4
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_KERNEL:
+ * Loads callee (non scratch) Reg File by popping from Kernel mode stack.
+ * This is reverse of SAVE_CALLEE_SAVED,
+ *
+ * NOTE:
+ * Ideally this shd only be called in switch_to for loading
+ * switched-IN task's CALLEE Reg File.
+ * For all other cases RESTORE_CALLEE_SAVED_FAST must be used
+ * which simply pops the stack w/o touching regs.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_KERNEL
+
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */
+#else
+ add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
+ ld.ab r25, [sp, 4]
+#endif
+
+ ld.ab r24, [sp, 4]
+ ld.ab r23, [sp, 4]
+ ld.ab r22, [sp, 4]
+ ld.ab r21, [sp, 4]
+ ld.ab r20, [sp, 4]
+ ld.ab r19, [sp, 4]
+ ld.ab r18, [sp, 4]
+ ld.ab r17, [sp, 4]
+ ld.ab r16, [sp, 4]
+ ld.ab r15, [sp, 4]
+ ld.ab r14, [sp, 4]
+ ld.ab r13, [sp, 4]
+
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_USER:
+ * This is called after do_signal where tracer might have changed callee regs
+ * thus we need to restore the reg file.
+ * Special case handling is required for r25 in case it is used by kernel
+ * for caching task ptr. Ptrace would have modified on-kernel-stack value of
+ * r25, which needs to be shoved back into task->thread.user_r25 where from
+ * Low level exception/ISR return code will retrieve to populate with rest of
+ * callee reg-file.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_USER
+
+ add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld.ab r12, [sp, 4]
+ st r12, [r25, TASK_THREAD + THREAD_USER_R25]
+#else
+ ld.ab r25, [sp, 4]
+#endif
+
+ ld.ab r24, [sp, 4]
+ ld.ab r23, [sp, 4]
+ ld.ab r22, [sp, 4]
+ ld.ab r21, [sp, 4]
+ ld.ab r20, [sp, 4]
+ ld.ab r19, [sp, 4]
+ ld.ab r18, [sp, 4]
+ ld.ab r17, [sp, 4]
+ ld.ab r16, [sp, 4]
+ ld.ab r15, [sp, 4]
+ ld.ab r14, [sp, 4]
+ ld.ab r13, [sp, 4]
+.endm
+
+/*--------------------------------------------------------------
+ * Super FAST Restore callee saved regs by simply re-adjusting SP
+ *-------------------------------------------------------------*/
+.macro DISCARD_CALLEE_SAVED_USER
+ add sp, sp, 14 * 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore User mode r25 saved in task_struct->thread.user_r25
+ *-------------------------------------------------------------*/
+.macro RESTORE_USER_R25
+ ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
+.endm
+
+/*-------------------------------------------------------------
+ * given a tsk struct, get to the base of it's kernel mode stack
+ * tsk->thread_info is really a PAGE, whose bottom hoists stack
+ * which grows upwards towards thread_info
+ *------------------------------------------------------------*/
+
+.macro GET_TSK_STACK_BASE tsk, out
+
+ /* Get task->thread_info (this is essentially start of a PAGE) */
+ ld \out, [\tsk, TASK_THREAD_INFO]
+
+ /* Go to end of page where stack begins (grows upwards) */
+ add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */
+
+.endm
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry : r9 contains pre-IRQ/exception/trap status32
+ * Exit : SP is set to kernel mode stack pointer
+ * If CURR_IN_REG, r25 set to "current" task pointer
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+ /* User Mode when this happened ? Yes: Proceed to switch stack */
+ bbit1 r9, STATUS_U_BIT, 88f
+
+ /* OK we were already in kernel mode when this event happened, thus can
+ * assume SP is kernel mode SP. _NO_ need to do any stack switching
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ /* However....
+ * If Level 2 Interrupts enabled, we may end up with a corner case:
+ * 1. User Task executing
+ * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+ * 3. But before it could switch SP from USER to KERNEL stack
+ * a L2 IRQ "Interrupts" L1
+ * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * not switched.
+ * To handle this, we may need to switch stack even if in kernel mode
+ * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+ */
+ brlo sp, VMALLOC_START, 88f
+
+ /* TODO: vineetg:
+ * We need to be a bit more cautious here. What if a kernel bug in
+ * L1 ISR, caused SP to go whaco (some small value which looks like
+ * USER stk) and then we take L2 ISR.
+ * Above brlo alone would treat it as a valid L1-L2 sceanrio
+ * instead of shouting alound
+ * The only feasible way is to make sure this L2 happened in
+ * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+ * L1 ISR before it switches stack
+ */
+
+#endif
+
+ /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
+ * safe-keeping not really needed, but it keeps the epilogue code
+ * (SP restore) simpler/uniform.
+ */
+ b.d 77f
+
+ st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+ GET_CURR_TASK_ON_CPU r9
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+ /* If current task pointer cached in r25, time to
+ * -safekeep USER r25 in task->thread_struct->user_r25
+ * -load r25 with current task ptr
+ */
+ st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
+ mov r25, r9
+#endif
+
+ /* With current tsk in r9, get it's kernel mode stack base */
+ GET_TSK_STACK_BASE r9, r9
+
+#ifdef PT_REGS_CANARY
+ st 0xabcdabcd, [r9, 0]
+#endif
+
+ /* Save Pre Intr/Exception User SP on kernel stack */
+ st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8
+
+ /* CAUTION:
+ * SP should be set at the very end when we are done with everything
+ * In case of 2 levels of interrupt we depend on value of SP to assume
+ * that everything else is done (loading r25 etc)
+ */
+
+ /* set SP to point to kernel mode stack */
+ mov sp, r9
+
+77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
+
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN reg
+
+ ld \reg, [sp, PT_status32]
+ bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
+ bset \reg, \reg, STATUS_L_BIT
+ sr \reg, [erstatus]
+ mov \reg, 55f
+ sr \reg, [eret]
+
+ rtie
+55:
+.endm
+
+/*
+ * @reg [OUT] &thread_info of "current"
+ */
+.macro GET_CURR_THR_INFO_FROM_SP reg
+ and \reg, sp, ~(THREAD_SIZE - 1)
+.endm
+
+/*
+ * @reg [OUT] thread_info->flags of "current"
+ */
+.macro GET_CURR_THR_INFO_FLAGS reg
+ GET_CURR_THR_INFO_FROM_SP \reg
+ ld \reg, [\reg, THREAD_INFO_FLAGS]
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of ptregs.
+ *-------------------------------------------------------------*/
+.macro EXCPN_PROLOG_FREEUP_REG reg
+#ifdef CONFIG_SMP
+ sr \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+ st \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+.macro EXCPN_PROLOG_RESTORE_REG reg
+#ifdef CONFIG_SMP
+ lr \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+ ld \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
+ * Requires SP to be already switched to kernel mode Stack
+ * sp points to the next free element on the stack at exit of this macro.
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ * Note that syscalls are implemented via TRAP which is also a exception
+ * from CPU's point of view
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_EXCEPTION marker
+
+ st \marker, [sp, 8]
+ st r0, [sp, 4] /* orig_r0, needed only for sys calls */
+
+ /* Restore r9 used to code the early prologue */
+ EXCPN_PROLOG_RESTORE_REG r9
+
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ lr r9, [eret]
+ st.a r9, [sp, -4]
+ lr r9, [erstatus]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [erbta]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbeef
+ st r9, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for exceptions
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_SYS
+ SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for sys calls
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_TRAP
+ /*
+ * Setup pt_regs->orig_r8.
+ * Encode syscall number (r8) in upper short word of event type (r9)
+ * N.B. #1: This is already endian safe (see ptrace.h)
+ * #2: Only r9 can be used as scratch as it is already clobbered
+ * and it's contents are no longer needed by the latter part
+ * of exception prologue
+ */
+ lsl r9, r8, 16
+ or r9, r9, orig_r8_IS_SCALL
+
+ SAVE_ALL_EXCEPTION r9
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro RESTORE_ALL_SYS
+
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4]
+ sr r9, [erbta]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [erstatus]
+ ld.ab r9, [sp, 4]
+ sr r9, [eret]
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save all registers used by interrupt handlers.
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_INT1
+
+ /* restore original r9 , saved in int1_saved_reg
+ * It will be saved on stack in macro: SAVE_CALLER_SAVED
+ */
+#ifdef CONFIG_SMP
+ lr r9, [ARC_REG_SCRATCH_DATA0]
+#else
+ ld r9, [@int1_saved_reg]
+#endif
+
+ /* now we are ready to save the remaining context :) */
+ st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
+ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ st.a ilink1, [sp, -4]
+ lr r9, [status32_l1]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [bta_l1]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbee1
+ st r9, [sp, -4]
+#endif
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+.macro SAVE_ALL_INT2
+
+ /* TODO-vineetg: SMP we can't use global nor can we use
+ * SCRATCH0 as we do for int1 because while int1 is using
+ * it, int2 can come
+ */
+ /* retsore original r9 , saved in sys_saved_r9 */
+ ld r9, [@int2_saved_reg]
+
+ /* now we are ready to save the remaining context :) */
+ st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
+ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ st.a ilink2, [sp, -4]
+ lr r9, [status32_l2]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [bta_l2]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbee2
+ st r9, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+
+.macro RESTORE_ALL_INT1
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4] /* Actual reg file */
+ sr r9, [bta_l1]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [status32_l1]
+ ld.ab r9, [sp, 4]
+ mov ilink1, r9
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+.macro RESTORE_ALL_INT2
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4]
+ sr r9, [bta_l2]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [status32_l2]
+ ld.ab r9, [sp, 4]
+ mov ilink2, r9
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+
+.endm
+
+
+/* Get CPU-ID of this core */
+.macro GET_CPU_ID reg
+ lr \reg, [identity]
+ lsr \reg, \reg, 8
+ bmsk \reg, \reg, 7
+.endm
+
+#ifdef CONFIG_SMP
+
+/*-------------------------------------------------
+ * Retrieve the current running task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ */
+.macro GET_CURR_TASK_ON_CPU reg
+ GET_CPU_ID \reg
+ ld.as \reg, [@_current_task, \reg]
+.endm
+
+/*-------------------------------------------------
+ * Save a new task as the "current" task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ *
+ * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
+ * because ST r0, [r1, offset] can ONLY have s9 @offset
+ * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
+ */
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ GET_CPU_ID \tmp
+ add2 \tmp, @_current_task, \tmp
+ st \tsk, [\tmp]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov r25, \tsk
+#endif
+
+.endm
+
+
+#else /* Uniprocessor implementation of macros */
+
+.macro GET_CURR_TASK_ON_CPU reg
+ ld \reg, [@_current_task]
+.endm
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ st \tsk, [@_current_task]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov r25, \tsk
+#endif
+.endm
+
+#endif /* SMP / UNI */
+
+/* ------------------------------------------------------------------
+ * Get the ptr to some field of Current Task at @off in task struct
+ * -Uses r25 for Current task ptr if that is enabled
+ */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ add \reg, r25, \off
+.endm
+
+#else
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ GET_CURR_TASK_ON_CPU \reg
+ add \reg, \reg, \off
+.endm
+
+#endif /* CONFIG_ARC_CURR_IN_REG */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/exec.h b/arch/arc/include/asm/exec.h
new file mode 100644
index 000000000000..28abc6905e07
--- /dev/null
+++ b/arch/arc/include/asm/exec.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_EXEC_H
+#define __ASM_ARC_EXEC_H
+
+/* Align to 16b */
+#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
+
+#endif
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
new file mode 100644
index 000000000000..4dc64ddebece
--- /dev/null
+++ b/arch/arc/include/asm/futex.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: August 2010: From Android kernel work
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+ \
+ __asm__ __volatile__( \
+ "1: ld %1, [%2] \n" \
+ insn "\n" \
+ "2: st %0, [%2] \n" \
+ " mov %0, 0 \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: mov %0, %4 \n" \
+ " b 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .word 1b, 4b \n" \
+ " .word 2b, 4b \n" \
+ " .previous \n" \
+ \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
+ : "cc", "memory")
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+/* Compare-xchg with preemption disabled.
+ * Notes:
+ * -Best-Effort: Exchg happens only if compare succeeds.
+ * If compare fails, returns; leaving retry/looping to upper layers
+ * -successful cmp-xchg: return orig value in @addr (same as cmp val)
+ * -Compare fails: return orig value in @addr
+ * -user access r/w fails: return -EFAULT
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+ u32 newval)
+{
+ u32 val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ /* TBD : can use llock/scond */
+ __asm__ __volatile__(
+ "1: ld %0, [%3] \n"
+ " brne %0, %1, 3f \n"
+ "2: st %2, [%3] \n"
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ "4: mov %0, %4 \n"
+ " b 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 2b, 4b \n"
+ " .previous\n"
+ : "=&r"(val)
+ : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+ : "cc", "memory");
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ *uval = val;
+ return val;
+}
+
+#endif
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
new file mode 100644
index 000000000000..473424d7528b
--- /dev/null
+++ b/arch/arc/include/asm/io.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_IO_H
+#define _ASM_ARC_IO_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+
+#define PCI_IOBASE ((void __iomem *)0)
+
+extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long flags);
+extern void iounmap(const void __iomem *addr);
+
+#define ioremap_nocache(phy, sz) ioremap(phy, sz)
+#define ioremap_wc(phy, sz) ioremap(phy, sz)
+
+/* Change struct page to physical address */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 b;
+
+ __asm__ __volatile__(
+ " ldb%U1 %0, %1 \n"
+ : "=r" (b)
+ : "m" (*(volatile u8 __force *)addr)
+ : "memory");
+
+ return b;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 s;
+
+ __asm__ __volatile__(
+ " ldw%U1 %0, %1 \n"
+ : "=r" (s)
+ : "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+ return s;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 w;
+
+ __asm__ __volatile__(
+ " ld%U1 %0, %1 \n"
+ : "=r" (w)
+ : "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+ return w;
+}
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stb%U1 %0, %1 \n"
+ :
+ : "r" (b), "m" (*(volatile u8 __force *)addr)
+ : "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 s, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stw%U1 %0, %1 \n"
+ :
+ : "r" (s), "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " st%U1 %0, %1 \n"
+ :
+ : "r" (w), "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+}
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
new file mode 100644
index 000000000000..4c588f9820cf
--- /dev/null
+++ b/arch/arc/include/asm/irq.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQ_H
+#define __ASM_ARC_IRQ_H
+
+#define NR_IRQS 32
+
+/* Platform Independent IRQs */
+#define TIMER0_IRQ 3
+#define TIMER1_IRQ 4
+
+#include <asm-generic/irq.h>
+
+extern void __init arc_init_IRQ(void);
+extern int __init get_hw_config_num_irq(void);
+
+void __cpuinit arc_local_timer_setup(unsigned int cpu);
+
+#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
new file mode 100644
index 000000000000..ccd84806b62f
--- /dev/null
+++ b/arch/arc/include/asm/irqflags.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQFLAGS_H
+#define __ASM_ARC_IRQFLAGS_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ * -Remove explicit mov of current status32 into reg, that is not needed
+ * -Use BIC insn instead of INVERTED + AND
+ * -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#ifdef __KERNEL__
+
+#include <asm/arcregs.h>
+
+#ifndef __ASSEMBLY__
+
+/******************************************************************
+ * IRQ Control Macros
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+ unsigned long temp, flags;
+
+ __asm__ __volatile__(
+ " lr %1, [status32] \n"
+ " bic %0, %1, %2 \n"
+ " and.f 0, %1, %2 \n"
+ " flag.nz %0 \n"
+ : "=r"(temp), "=r"(flags)
+ : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+ : "cc");
+
+ return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+ __asm__ __volatile__(
+ " flag %0 \n"
+ :
+ : "r"(flags));
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+extern void arch_local_irq_enable(void);
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ " and %0, %0, %1 \n"
+ " flag %0 \n"
+ : "=&r"(temp)
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ : "=&r"(temp));
+
+ return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ | STATUS_E2_MASK
+#endif
+ ));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arch_mask_irq(unsigned int irq)
+{
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb &= ~(1 << irq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static inline void arch_unmask_irq(unsigned int irq)
+{
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb |= (1 << irq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+#else
+
+.macro IRQ_DISABLE scratch
+ lr \scratch, [status32]
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+.macro IRQ_DISABLE_SAVE scratch, save
+ lr \scratch, [status32]
+ mov \save, \scratch /* Make a copy */
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+.macro IRQ_ENABLE scratch
+ lr \scratch, [status32]
+ or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* KERNEL */
+
+#endif
diff --git a/arch/arc/include/asm/kdebug.h b/arch/arc/include/asm/kdebug.h
new file mode 100644
index 000000000000..3fbe6c472c0a
--- /dev/null
+++ b/arch/arc/include/asm/kdebug.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_IERR,
+ DIE_OOPS
+};
+
+#endif
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
new file mode 100644
index 000000000000..f3c4934f0ca9
--- /dev/null
+++ b/arch/arc/include/asm/kgdb.h
@@ -0,0 +1,61 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_KGDB_H__
+#define __ARC_KGDB_H__
+
+#ifdef CONFIG_KGDB
+
+#include <asm/user.h>
+
+/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
+ * register API yet */
+#undef DBG_MAX_REG_NUM
+
+#define GDB_MAX_REGS 39
+
+#define BREAK_INSTR_SIZE 2
+#define CACHE_FLUSH_IS_SAFE 1
+#define NUMREGBYTES (GDB_MAX_REGS * 4)
+#define BUFMAX 2048
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__ ("trap_s 0x4\n");
+}
+
+extern void kgdb_trap(struct pt_regs *regs, int param);
+
+enum arc700_linux_regnums {
+ _R0 = 0,
+ _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+ _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+ _R25, _R26,
+ _BTA = 27,
+ _LP_START = 28,
+ _LP_END = 29,
+ _LP_COUNT = 30,
+ _STATUS32 = 31,
+ _BLINK = 32,
+ _FP = 33,
+ __SP = 34,
+ _EFA = 35,
+ _RET = 36,
+ _ORIG_R8 = 37,
+ _STOP_PC = 38
+};
+
+#else
+static inline void kgdb_trap(struct pt_regs *regs, int param)
+{
+}
+#endif
+
+#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
new file mode 100644
index 000000000000..4d9c211fce70
--- /dev/null
+++ b/arch/arc/include/asm/kprobes.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARC_KPROBES_H
+#define _ARC_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+
+typedef u16 kprobe_opcode_t;
+
+#define UNIMP_S_INSTRUCTION 0x79e0
+#define TRAP_S_2_INSTRUCTION 0x785e
+
+#define MAX_INSN_SIZE 8
+#define MAX_STACK_SIZE 64
+
+struct arch_specific_insn {
+ int is_short;
+ kprobe_opcode_t *t1_addr, *t2_addr;
+ kprobe_opcode_t t1_opcode, t2_opcode;
+};
+
+#define flush_insn_slot(p) do { } while (0)
+
+#define kretprobe_blacklist_size 0
+
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *p);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+};
+
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ struct pt_regs jprobe_saved_regs;
+ char jprobes_stack[MAX_STACK_SIZE];
+ struct prev_kprobe prev_kprobe;
+};
+
+int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
+void kretprobe_trampoline(void);
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs);
+#else
+static void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
new file mode 100644
index 000000000000..0283e9e44e0d
--- /dev/null
+++ b/arch/arc/include/asm/linkage.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifdef __ASSEMBLY__
+
+/* Can't use the ENTRY macro in linux/linkage.h
+ * gas considers ';' as comment vs. newline
+ */
+.macro ARC_ENTRY name
+ .global \name
+ .align 4
+ \name:
+.endm
+
+.macro ARC_EXIT name
+#define ASM_PREV_SYM_ADDR(name) .-##name
+ .size \ name, ASM_PREV_SYM_ADDR(\name)
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_DATA nm
+#ifdef CONFIG_ARC_HAS_DCCM
+ .section .data.arcfp
+#else
+ .section .data
+#endif
+ .global \nm
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_CODE
+#ifdef CONFIG_ARC_HAS_ICCM
+ .section .text.arcfp, "ax",@progbits
+#else
+ .section .text, "ax",@progbits
+#endif
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_ARC_HAS_ICCM
+#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#else
+#define __arcfp_code __attribute__((__section__(".text")))
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCCM
+#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#else
+#define __arcfp_data __attribute__((__section__(".data")))
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
new file mode 100644
index 000000000000..9998dc846ebb
--- /dev/null
+++ b/arch/arc/include/asm/mach_desc.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * based on METAG mach/arch.h (which in turn was based on ARM)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MACH_DESC_H_
+#define _ASM_ARC_MACH_DESC_H_
+
+/**
+ * struct machine_desc - Board specific callbacks, called from ARC common code
+ * Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
+ * a multi-platform kernel builds with array of such descriptors.
+ * We extend the early DT scan to also match the DT's "compatible" string
+ * against the @dt_compat of all such descriptors, and one with highest
+ * "DT score" is selected as global @machine_desc.
+ *
+ * @name: Board/SoC name
+ * @dt_compat: Array of device tree 'compatible' strings
+ * (XXX: although only 1st entry is looked at)
+ * @init_early: Very early callback [called from setup_arch()]
+ * @init_irq: setup external IRQ controllers [called from init_IRQ()]
+ * @init_smp: for each CPU (e.g. setup IPI)
+ * [(M):init_IRQ(), (o):start_kernel_secondary()]
+ * @init_time: platform specific clocksource/clockevent registration
+ * [called from time_init()]
+ * @init_machine: arch initcall level callback (e.g. populate static
+ * platform devices or parse Devicetree)
+ * @init_late: Late initcall level callback
+ *
+ */
+struct machine_desc {
+ const char *name;
+ const char **dt_compat;
+
+ void (*init_early)(void);
+ void (*init_irq)(void);
+#ifdef CONFIG_SMP
+ void (*init_smp)(unsigned int);
+#endif
+ void (*init_time)(void);
+ void (*init_machine)(void);
+ void (*init_late)(void);
+
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p) \
+ for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+static inline struct machine_desc *default_machine_desc(void)
+{
+ /* the default machine is the last one linked in */
+ if (__arch_info_end - 1 < __arch_info_begin)
+ return NULL;
+ return __arch_info_end - 1;
+}
+
+/*
+ * Set of macros to define architecture features.
+ * This is built into a table by the linker.
+ */
+#define MACHINE_START(_type, _name) \
+static const struct machine_desc __mach_desc_##_type \
+__used \
+__attribute__((__section__(".arch.info.init"))) = { \
+ .name = _name,
+
+#define MACHINE_END \
+};
+
+extern struct machine_desc *setup_machine_fdt(void *dt);
+extern void __init copy_devtree(void);
+
+#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
new file mode 100644
index 000000000000..56b02320f1a9
--- /dev/null
+++ b/arch/arc/include/asm/mmu.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMU_H
+#define _ASM_ARC_MMU_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long asid; /* Pvt Addr-Space ID for mm */
+#ifdef CONFIG_ARC_TLB_DBG
+ struct task_struct *tsk;
+#endif
+} mm_context_t;
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
new file mode 100644
index 000000000000..0d71fb11b57c
--- /dev/null
+++ b/arch/arc/include/asm/mmu_context.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Refactored get_new_mmu_context( ) to only handle live-mm.
+ * retiring-mm handled in other hooks
+ *
+ * Vineetg: March 25th, 2008: Bug #92690
+ * -Major rewrite of Core ASID allocation routine get_new_mmu_context
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_MMU_CONTEXT_H
+#define _ASM_ARC_MMU_CONTEXT_H
+
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+
+#include <asm-generic/mm_hooks.h>
+
+/* ARC700 ASID Management
+ *
+ * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
+ * with same vaddr (different tasks) to co-exit. This provides for
+ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
+ *
+ * Linux assigns each task a unique ASID. A simple round-robin allocation
+ * of H/w ASID is done using software tracker @asid_cache.
+ * When it reaches max 255, the allocation cycle starts afresh by flushing
+ * the entire TLB and wrapping ASID back to zero.
+ *
+ * For book-keeping, Linux uses a couple of data-structures:
+ * -mm_struct has an @asid field to keep a note of task's ASID (needed at the
+ * time of say switch_mm( )
+ * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
+ * given an ASID, finding the mm struct associated.
+ *
+ * The round-robin allocation algorithm allows for ASID stealing.
+ * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
+ * already assigned to another (switched-out) task. Obviously the prev owner
+ * is marked with an invalid ASID to make it request for a new ASID when it
+ * gets scheduled next time. However its TLB entries (with ASID "x") could
+ * exist, which must be cleared before the same ASID is used by the new owner.
+ * Flushing them would be plausible but costly solution. Instead we force a
+ * allocation policy quirk, which ensures that a stolen ASID won't have any
+ * TLB entries associates, alleviating the need to flush.
+ * The quirk essentially is not allowing ASID allocated in prev cycle
+ * to be used past a roll-over in the next cycle.
+ * When this happens (i.e. task ASID > asid tracker), task needs to refresh
+ * its ASID, aligning it to current value of tracker. If the task doesn't get
+ * scheduled past a roll-over, hence its ASID is not yet realigned with
+ * tracker, such ASID is anyways safely reusable because it is
+ * gauranteed that TLB entries with that ASID wont exist.
+ */
+
+#define FIRST_ASID 0
+#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
+#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
+#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
+
+/* ASID to mm struct mapping */
+extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
+
+extern int asid_cache;
+
+/*
+ * Assign a new ASID to task. If the task already has an ASID, it is
+ * relinquished.
+ */
+static inline void get_new_mmu_context(struct mm_struct *mm)
+{
+ struct mm_struct *prev_owner;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * Relinquish the currently owned ASID (if any).
+ * Doing unconditionally saves a cmp-n-branch; for already unused
+ * ASID slot, the value was/remains NULL
+ */
+ asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
+
+ /* move to new ASID */
+ if (++asid_cache > MAX_ASID) { /* ASID roll-over */
+ asid_cache = FIRST_ASID;
+ flush_tlb_all();
+ }
+
+ /*
+ * Is next ASID already owned by some-one else (we are stealing it).
+ * If so, let the orig owner be aware of this, so when it runs, it
+ * asks for a brand new ASID. This would only happen for a long-lived
+ * task with ASID from prev allocation cycle (before ASID roll-over).
+ *
+ * This might look wrong - if we are re-using some other task's ASID,
+ * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
+ * care of such a case: it ensures that task with ASID from prev alloc
+ * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
+ * The stealing scenario described here will only happen if that task
+ * didn't get a chance to refresh it's ASID - implying stale entries
+ * won't exist.
+ */
+ prev_owner = asid_mm_map[asid_cache];
+ if (prev_owner)
+ prev_owner->context.asid = NO_ASID;
+
+ /* Assign new ASID to tsk */
+ asid_mm_map[asid_cache] = mm;
+ mm->context.asid = asid_cache;
+
+#ifdef CONFIG_ARC_TLB_DBG
+ pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
+ " pid:%u, assigned asid:%lu\n",
+ (unsigned int)mm, (unsigned int)prev_owner,
+ (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
+ (mm->context.tsk)->pid, mm->context.asid);
+#endif
+
+ write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context.asid = NO_ASID;
+#ifdef CONFIG_ARC_TLB_DBG
+ mm->context.tsk = tsk;
+#endif
+ return 0;
+}
+
+/* Prepare the MMU for task: setup PID reg with allocated ASID
+ If task doesn't have an ASID (never alloc or stolen, get a new ASID)
+*/
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+#ifndef CONFIG_SMP
+ /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+ /*
+ * Get a new ASID if task doesn't have a valid one. Possible when
+ * -task never had an ASID (fresh after fork)
+ * -it's ASID was stolen - past an ASID roll-over.
+ * -There's a third obscure scenario (if this task is running for the
+ * first time afer an ASID rollover), where despite having a valid
+ * ASID, we force a get for new ASID - see comments at top.
+ *
+ * Both the non-alloc scenario and first-use-after-rollover can be
+ * detected using the single condition below: NO_ASID = 256
+ * while asid_cache is always a valid ASID value (0-255).
+ */
+ if (next->context.asid > asid_cache) {
+ get_new_mmu_context(next);
+ } else {
+ /*
+ * XXX: This will never happen given the chks above
+ * BUG_ON(next->context.asid > MAX_ASID);
+ */
+ write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
+ }
+
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ asid_mm_map[mm->context.asid] = NULL;
+ mm->context.asid = NO_ASID;
+
+ local_irq_restore(flags);
+}
+
+/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
+ * for retiring-mm. However destroy_context( ) still needs to do that because
+ * between mm_release( ) = >deactive_mm( ) and
+ * mmput => .. => __mmdrop( ) => destroy_context( )
+ * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * again (this teased me for a whole day).
+ */
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+#ifndef CONFIG_SMP
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+ /* Unconditionally get a new ASID */
+ get_new_mmu_context(next);
+
+}
+
+#define enter_lazy_tlb(mm, tsk)
+
+#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
new file mode 100644
index 000000000000..518222bb3f8e
--- /dev/null
+++ b/arch/arc/include/asm/module.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+
+ */
+
+#ifndef _ASM_ARC_MODULE_H
+#define _ASM_ARC_MODULE_H
+
+#include <asm-generic/module.h>
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+struct mod_arch_specific {
+ void *unw_info;
+ int unw_sec_idx;
+};
+#endif
+
+#define MODULE_PROC_FAMILY "ARC700"
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+
+#endif /* _ASM_ARC_MODULE_H */
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
new file mode 100644
index 000000000000..a2f88ff9f506
--- /dev/null
+++ b/arch/arc/include/asm/mutex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
+ * atomic dec based which can "count" any number of lock contenders.
+ * This ideally needs to be fixed in core, but for now switching to dec ver.
+ */
+#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
+#include <asm-generic/mutex-dec.h>
+#else
+#include <asm-generic/mutex-xchg.h>
+#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
new file mode 100644
index 000000000000..bdf546104551
--- /dev/null
+++ b/arch/arc/include/asm/page.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARC_PAGE_H
+#define __ASM_ARC_PAGE_H
+
+#include <uapi/asm/page.h>
+
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+/* TBD: for now don't worry about VIPT D$ aliasing */
+#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(addr, vaddr, pg) clear_page(addr)
+#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#define pte_pgprot(x) __pgprot(pte_val(x))
+
+#else /* !STRICT_MM_TYPECHECKS */
+
+typedef unsigned long pte_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+#define __pte(x) (x)
+#define __pgprot(x) (x)
+#define pte_pgprot(x) (x)
+
+#endif
+
+#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
+
+#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+/*
+ * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
+ *
+ * These macros have historically been misnamed
+ * virt here means link-address/program-address as embedded in object code.
+ * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
+ * 128th page, and virt_to_page( ) will return the struct page corresp to it.
+ * mem_map[ ] is an array of struct page for each page frame in the system
+ *
+ * Independent of where linux is linked at, link-addr = physical address
+ * So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
+ * would have been wrong in case kernel is not at 0x8zs
+ */
+#define __pa(vaddr) ((unsigned long)vaddr)
+#define __va(paddr) ((void *)((unsigned long)(paddr)))
+
+#define virt_to_page(kaddr) \
+ (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+/* Default Permissions for page, used in mmap.c */
+#ifdef CONFIG_ARC_STACK_NONEXEC
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#else
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#endif
+
+#define WANT_PAGE_VIRTUAL 1
+
+#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
new file mode 100644
index 000000000000..115ad96480e6
--- /dev/null
+++ b/arch/arc/include/asm/perf_event.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_PERF_EVENT_H
+#define __ASM_PERF_EVENT_H
+
+#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
new file mode 100644
index 000000000000..36a9f20c21a3
--- /dev/null
+++ b/arch/arc/include/asm/pgalloc.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2011
+ * -"/proc/meminfo | grep PageTables" kept on increasing
+ * Recently added pgtable dtor was not getting called.
+ *
+ * vineetg: May 2011
+ * -Variable pg-sz means that Page Tables could be variable sized themselves
+ * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
+ * -Page Table size capped to max 1 to save memory - hence verified.
+ * -Since these deal with constants, gcc compile-time optimizes them.
+ *
+ * vineetg: Nov 2010
+ * -Added pgtable ctor/dtor used for pgtable mem accounting
+ *
+ * vineetg: April 2010
+ * -Switched pgtable_t from being struct page * to unsigned long
+ * =Needed so that Page Table allocator (pte_alloc_one) is not forced to
+ * to deal with struct page. Thay way in future we can make it allocate
+ * multiple PG Tbls in one Page Frame
+ * =sweet side effect is avoiding calls to ugly page_address( ) from the
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGALLOC_H
+#define _ASM_ARC_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/log2.h>
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+ pmd_set(pmd, pte);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
+{
+ pmd_set(pmd, (pte_t *) ptep);
+}
+
+static inline int __get_order_pgd(void)
+{
+ return get_order(PTRS_PER_PGD * 4);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ int num, num2;
+ pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+
+ if (ret) {
+ num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
+ memzero(ret, num * sizeof(pgd_t));
+
+ num2 = VMALLOC_SIZE / PGDIR_SIZE;
+ memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
+
+ memzero(ret + num + num2,
+ (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
+
+ }
+ return ret;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_pages((unsigned long)pgd, __get_order_pgd());
+}
+
+
+/*
+ * With software-only page-tables, addr-split for traversal is tweakable and
+ * that directly governs how big tables would be at each level.
+ * Further, the MMU page size is configurable.
+ * Thus we need to programatically assert the size constraint
+ * All of this is const math, allowing gcc to do constant folding/propagation.
+ */
+
+static inline int __get_order_pte(void)
+{
+ return get_order(PTRS_PER_PTE * 4);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+ __get_order_pte());
+
+ return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pgtable_t pte_pg;
+
+ pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ if (pte_pg) {
+ memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+ pgtable_page_ctor(virt_to_page(pte_pg));
+ }
+
+ return pte_pg;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
+{
+ pgtable_page_dtor(virt_to_page(ptep));
+ free_pages(ptep, __get_order_pte());
+}
+
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+
+#define check_pgt_cache() do { } while (0)
+#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
+
+#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
new file mode 100644
index 000000000000..b7e36684c091
--- /dev/null
+++ b/arch/arc/include/asm/pgtable.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
+ * They are semantically the same although in different contexts
+ * VALID marks a TLB entry exists and it will only happen if PRESENT
+ * - Utilise some unused free bits to confine PTE flags to 12 bits
+ * This is a must for 4k pg-sz
+ *
+ * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
+ * -TLB Locking never really existed, except for initial specs
+ * -SILENT_xxx not needed for our port
+ * -Per my request, MMU V3 changes the layout of some of the bits
+ * to avoid a few shifts in TLB Miss handlers.
+ *
+ * vineetg: April 2010
+ * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
+ * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
+ *
+ * vineetg: April 2010
+ * -Switched form 8:11:13 split for page table lookup to 11:8:13
+ * -this speeds up page table allocation itself as we now have to memset 1K
+ * instead of 8k per page table.
+ * -TODO: Right now page table alloc is 8K and rest 7K is unused
+ * need to optimise it
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGTABLE_H
+#define _ASM_ARC_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/**************************************************************************
+ * Page Table Flags
+ *
+ * ARC700 MMU only deals with softare managed TLB entries.
+ * Page Tables are purely for Linux VM's consumption and the bits below are
+ * suited to that (uniqueness). Hence some are not implemented in the TLB and
+ * some have different value in TLB.
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * seperate PD0 and PD1, which combined forms a translation entry)
+ * while for PTE perspective, they are 8 and 9 respectively
+ * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
+ * (saves some bit shift ops in TLB Miss hdlrs)
+ */
+
+#if (CONFIG_ARC_MMU_VER <= 2)
+
+#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
+#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
+#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
+#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
+#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
+#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
+#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
+#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
+#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
+#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
+
+#else
+
+/* PD1 */
+#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
+#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
+#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
+#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
+#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
+#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
+
+/* PD0 */
+#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
+#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
+#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
+ usable for shared TLB entries (H) */
+
+#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
+#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
+
+#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
+#endif
+
+/* Kernel allowed all permissions for all pages */
+#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifdef CONFIG_ARC_CACHE_PAGES
+#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
+#else
+#define _PAGE_DEF_CACHEABLE (0)
+#endif
+
+/* Helper for every "user" page
+ * -kernel can R/W/X
+ * -by default cached, unless config otherwise
+ * -present in memory
+ */
+#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
+
+/* More Abbrevaited helpers */
+#define PAGE_U_NONE __pgprot(___DEF)
+#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
+#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
+#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_EXECUTE)
+
+#define PAGE_SHARED PAGE_U_W_R
+
+/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
+ * kernel vaddr space - visible in all addr spaces, but kernel mode only
+ * Thus Global, all-kernel-access, no-user-access, cached
+ */
+#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL)
+
+/* ioremap */
+#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
+ _PAGE_GLOBAL)
+
+/**************************************************************************
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+ *
+ * Certain cases have 1:1 mapping
+ * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
+ * which directly corresponds to PAGE_U_X_R
+ *
+ * Other rules which cause the divergence from 1:1 mapping
+ *
+ * 1. Although ARC700 can do exclusive execute/write protection (meaning R
+ * can be tracked independet of X/W unlike some other CPUs), still to
+ * keep things consistent with other archs:
+ * -Write implies Read: W => R
+ * -Execute implies Read: X => R
+ *
+ * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
+ * This is to enable COW mechanism
+ */
+ /* xwr */
+#define __P000 PAGE_U_NONE
+#define __P001 PAGE_U_R
+#define __P010 PAGE_U_R /* Pvt-W => !W */
+#define __P011 PAGE_U_R /* Pvt-W => !W */
+#define __P100 PAGE_U_X_R /* X => R */
+#define __P101 PAGE_U_X_R
+#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
+#define __P111 PAGE_U_X_R /* Pvt-W => !W */
+
+#define __S000 PAGE_U_NONE
+#define __S001 PAGE_U_R
+#define __S010 PAGE_U_W_R /* W => R */
+#define __S011 PAGE_U_W_R
+#define __S100 PAGE_U_X_R /* X => R */
+#define __S101 PAGE_U_X_R
+#define __S110 PAGE_U_X_W_R /* X => R */
+#define __S111 PAGE_U_X_W_R
+
+/****************************************************************
+ * Page Table Lookup split
+ *
+ * We implement 2 tier paging and since this is all software, we are free
+ * to customize the span of a PGD / PTE entry to suit us
+ *
+ * 32 bit virtual address
+ * -------------------------------------------------------
+ * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
+ * -------------------------------------------------------
+ * | | |
+ * | | --> off in page frame
+ * | |
+ * | ---> index into Page Table
+ * |
+ * ----> index into Page Directory
+ */
+
+#define BITS_IN_PAGE PAGE_SHIFT
+
+/* Optimal Sizing of Pg Tbl - based on MMU page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_8K)
+#define BITS_FOR_PTE 8
+#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define BITS_FOR_PTE 8
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define BITS_FOR_PTE 9
+#endif
+
+#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
+
+#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#ifdef __ASSEMBLY__
+#define PTRS_PER_PTE (1 << BITS_FOR_PTE)
+#define PTRS_PER_PGD (1 << BITS_FOR_PGD)
+#else
+#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
+#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
+#endif
+/*
+ * Number of entries a user land program use.
+ * TASK_SIZE is the maximum vaddr that can be used by a userland program.
+ */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * No special requirements for lowest virtual address we permit any user space
+ * mapping to be mapped at.
+ */
+#define FIRST_USER_ADDRESS 0
+
+
+/****************************************************************
+ * Bucket load of VM Helpers
+ */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* the zero page used for uninitialized and anonymous pages */
+extern char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+/* find the page descriptor of the Page Tbl ref by PMD entry */
+#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
+
+/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
+#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
+
+/* In a 2 level sys, setup the PGD entry with PTE value */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+ pmd_val(*pmdp) = (unsigned long)ptep;
+}
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
+
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
+#define pmd_present(x) (pmd_val(x))
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+
+#define pte_page(x) (mem_map + \
+ (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
+
+#define mk_pte(page, pgprot) \
+({ \
+ pte_t pte; \
+ pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
+ pte; \
+})
+
+/* TBD: Non linear mapping stuff */
+static inline int pte_file(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_FILE;
+}
+
+#define PTE_FILE_MAX_BITS 30
+#define pgoff_to_pte(x) __pte(x)
+#define pte_to_pgoff(x) (pte_val(x) >> 2)
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+/*
+ * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
+ * and returns ptr to PTE entry corresponding to @addr
+ */
+#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
+ __pte_index(addr))
+
+/* No mapping of Page Tables in high mem etc, so following same as above */
+#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
+#define pte_offset_map(dir, addr) pte_offset(dir, addr)
+
+/* Zoo of pte_xxx function */
+#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
+#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
+#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
+#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
+#define pte_special(pte) (0)
+
+#define PTE_BIT_FUNC(fn, op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
+PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Macro to mark a page protection as uncacheable */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ set_pte(ptep, pteval);
+}
+
+/*
+ * All kernel related VM pages are in init's mm.
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
+#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
+
+/*
+ * Macro to quickly access the PGD entry, utlising the fact that some
+ * arch may cache the pointer to Page Directory of "current" task
+ * in a MMU register
+ *
+ * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
+ * becomes read a register
+ *
+ * ********CAUTION*******:
+ * Kernel code might be dealing with some mm_struct of NON "current"
+ * Thus use this macro only when you are certain that "current" is current
+ * e.g. when dealing with signal frame setup code etc
+ */
+#ifndef CONFIG_SMP
+#define pgd_offset_fast(mm, addr) \
+({ \
+ pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
+ pgd_base + pgd_index(addr); \
+})
+#else
+#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
+#endif
+
+extern void paging_init(void);
+extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep);
+
+/* Encode swap {type,off} tuple into PTE
+ * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
+ * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
+ */
+#define __swp_entry(type, off) ((swp_entry_t) { \
+ ((type) & 0x1f) | ((off) << 13) })
+
+/* Decode a PTE containing swap "identifier "into constituents */
+#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
+#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
+
+/* NOPs, to keep generic kernel happy */
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define kern_addr_valid(addr) (1)
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma, from, pfn, size, prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
new file mode 100644
index 000000000000..5f26b2c1cba0
--- /dev/null
+++ b/arch/arc/include/asm/processor.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: March 2009
+ * -Implemented task_pt_regs( )
+ *
+ * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_PROCESSOR_H
+#define __ASM_ARC_PROCESSOR_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
+
+/* Arch specific stuff which needs to be saved per task.
+ * However these items are not so important so as to earn a place in
+ * struct thread_info
+ */
+struct thread_struct {
+ unsigned long ksp; /* kernel mode stack pointer */
+ unsigned long callee_reg; /* pointer to callee regs */
+ unsigned long fault_address; /* dbls as brkpt holder as well */
+ unsigned long cause_code; /* Exception Cause Code (ECR) */
+#ifdef CONFIG_ARC_CURR_IN_REG
+ unsigned long user_r25;
+#endif
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+ struct arc_fpu fpu;
+#endif
+};
+
+#define INIT_THREAD { \
+ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+}
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *t);
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1)
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while (0)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+/*
+ * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
+ * get optimised away by gcc
+ */
+#ifdef CONFIG_SMP
+#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
+#else
+#define cpu_relax() do { } while (0)
+#endif
+
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
+
+/*
+ * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * These can't be derived from pt_regs as that would give correp user-mode val
+ */
+#define KSTK_ESP(tsk) (tsk->thread.ksp)
+#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4)))
+#define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4)))
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * E1,E2 so that Interrupts are enabled in user mode
+ * L set, so Loop inhibited to begin with
+ * lp_start and lp_end seeded with bogus non-zero values so to easily catch
+ * the ARC700 sr to lp_start hardware bug
+ */
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ set_fs(USER_DS); /* reads from user space */ \
+ (_regs)->ret = (_pc); \
+ /* Interrupts enabled in User Mode */ \
+ (_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK \
+ | STATUS_E1_MASK | STATUS_E2_MASK; \
+ (_regs)->sp = (_usp); \
+ /* bogus seed values for debugging */ \
+ (_regs)->lp_start = 0x10; \
+ (_regs)->lp_end = 0x80; \
+} while (0)
+
+extern unsigned int get_wchan(struct task_struct *p);
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ * Should the PC register be read instead ? This macro does not seem to
+ * be used in many places so this wont be all that bad.
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+#endif /* !__ASSEMBLY__ */
+
+/* Kernels Virtual memory area.
+ * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
+ * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
+ * of the translated bottom 2GB for kernel virtual memory and protect
+ * these pages from user accesses by disabling Ru, Eu and Wu.
+ */
+#define VMALLOC_SIZE (0x10000000) /* 256M */
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+#define VMALLOC_END (PAGE_OFFSET)
+
+/* Most of the architectures seem to be keeping some kind of padding between
+ * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
+ */
+#define USER_KERNEL_GUTTER 0x10000000
+
+/* User address space:
+ * On ARC700, CPU allows the entire lower half of 32 bit address space to be
+ * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
+ * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
+ * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
+ * Thus total User vaddr space is (0:0x5FFF_FFFF)
+ */
+#define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h
new file mode 100644
index 000000000000..692d0d0789a7
--- /dev/null
+++ b/arch/arc/include/asm/prom.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_PROM_H_
+#define _ASM_ARC_PROM_H_
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+#endif
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
new file mode 100644
index 000000000000..8ae783d20a81
--- /dev/null
+++ b/arch/arc/include/asm/ptrace.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+#ifndef __ASM_ARC_PTRACE_H
+#define __ASM_ARC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+
+/* THE pt_regs: Defines how regs are saved during entry into kernel */
+
+struct pt_regs {
+ /*
+ * 1 word gutter after reg-file has been saved
+ * Technically not needed, Since SP always points to a "full" location
+ * (vs. "empty"). But pt_regs is shared with tools....
+ */
+ long res;
+
+ /* Real registers */
+ long bta; /* bta_l1, bta_l2, erbta */
+ long lp_start;
+ long lp_end;
+ long lp_count;
+ long status32; /* status32_l1, status32_l2, erstatus */
+ long ret; /* ilink1, ilink2 or eret */
+ long blink;
+ long fp;
+ long r26; /* gp */
+ long r12;
+ long r11;
+ long r10;
+ long r9;
+ long r8;
+ long r7;
+ long r6;
+ long r5;
+ long r4;
+ long r3;
+ long r2;
+ long r1;
+ long r0;
+ long sp; /* user/kernel sp depending on where we came from */
+ long orig_r0;
+
+ /*to distinguish bet excp, syscall, irq */
+ union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ /* so that assembly code is same for LE/BE */
+ unsigned long orig_r8:16, event:16;
+#else
+ unsigned long event:16, orig_r8:16;
+#endif
+ long orig_r8_word;
+ };
+};
+
+/* Callee saved registers - need to be saved only when you are scheduled out */
+
+struct callee_regs {
+ long res; /* Again this is not needed */
+ long r25;
+ long r24;
+ long r23;
+ long r22;
+ long r21;
+ long r20;
+ long r19;
+ long r18;
+ long r17;
+ long r16;
+ long r15;
+ long r14;
+ long r13;
+};
+
+#define instruction_pointer(regs) ((regs)->ret)
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* return 1 if user mode or 0 if kernel mode */
+#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
+
+#define user_stack_pointer(regs)\
+({ unsigned int sp; \
+ if (user_mode(regs)) \
+ sp = (regs)->sp;\
+ else \
+ sp = -1; \
+ sp; \
+})
+
+/* return 1 if PC in delay slot */
+#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
+
+#define in_syscall(regs) (regs->event & orig_r8_IS_SCALL)
+#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT)
+
+#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED)
+#define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED)
+
+#define current_pt_regs() \
+({ \
+ /* open-coded current_thread_info() */ \
+ register unsigned long sp asm ("sp"); \
+ unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
+ (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \
+})
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define orig_r8_IS_SCALL 0x0001
+#define orig_r8_IS_SCALL_RESTARTED 0x0002
+#define orig_r8_IS_BRKPT 0x0004
+#define orig_r8_IS_EXCPN 0x0004
+#define orig_r8_IS_IRQ1 0x0010
+#define orig_r8_IS_IRQ2 0x0020
+
+#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
new file mode 100644
index 000000000000..6fc1159dfefe
--- /dev/null
+++ b/arch/arc/include/asm/sections.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SECTIONS_H
+#define _ASM_ARC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _int_vec_base_lds[];
+extern char __arc_dccm_base[];
+extern char __dtb_start[];
+
+#endif
diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h
new file mode 100644
index 000000000000..da2c45979817
--- /dev/null
+++ b/arch/arc/include/asm/segment.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASMARC_SEGMENT_H
+#define __ASMARC_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(0)
+#define USER_DS MAKE_MM_SEG(TASK_SIZE)
+
+#define segment_eq(a, b) ((a) == (b))
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASMARC_SEGMENT_H */
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
new file mode 100644
index 000000000000..4dff5a1e4128
--- /dev/null
+++ b/arch/arc/include/asm/serial.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SERIAL_H
+#define _ASM_ARC_SERIAL_H
+
+/*
+ * early-8250 requires BASE_BAUD to be defined and includes this header.
+ * We put in a typical value:
+ * (core clk / 16) - i.e. UART samples 16 times per sec.
+ * Athough in multi-platform-image this might not work, specially if the
+ * clk driving the UART is different.
+ * We can't use DeviceTree as this is typically for early serial.
+ */
+
+#include <asm/clk.h>
+
+#define BASE_BAUD (arc_get_core_freq() / 16)
+
+#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
new file mode 100644
index 000000000000..229e50681497
--- /dev/null
+++ b/arch/arc/include/asm/setup.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASMARC_SETUP_H
+#define __ASMARC_SETUP_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define COMMAND_LINE_SIZE 256
+
+/*
+ * Data structure to map a ID to string
+ * Used a lot for bootup reporting of hardware diversity
+ */
+struct id_to_str {
+ int id;
+ const char *str;
+};
+
+struct cpuinfo_data {
+ struct id_to_str info;
+ int up_range;
+};
+
+extern int root_mountflags, end_mem;
+extern int running_on_hw;
+
+void __init setup_processor(void);
+void __init setup_arch_memory(void);
+
+#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
new file mode 100644
index 000000000000..c4fb211dcd25
--- /dev/null
+++ b/arch/arc/include/asm/smp.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_SMP_H
+#define __ASM_ARC_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+/* including cpumask.h leads to cyclic deps hence this Forward declaration */
+struct cpumask;
+
+/*
+ * APIs provided by arch SMP code to generic code
+ */
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+/*
+ * APIs provided by arch SMP code to rest of arch code
+ */
+extern void __init smp_init_cpus(void);
+extern void __init first_lines_of_secondary(void);
+extern const char *arc_platform_smp_cpuinfo(void);
+
+/*
+ * API expected BY platform smp code (FROM arch smp code)
+ *
+ * smp_ipi_irq_setup:
+ * Takes @cpu and @irq to which the arch-common ISR is hooked up
+ */
+extern int smp_ipi_irq_setup(int cpu, int irq);
+
+/*
+ * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
+ *
+ * @info: SoC SMP specific info for /proc/cpuinfo etc
+ * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
+ * @ipi_send: To send IPI to a @cpumask
+ * @ips_clear: To clear IPI received by @cpu at @irq
+ */
+struct plat_smp_ops {
+ const char *info;
+ void (*cpu_kick)(int cpu, unsigned long pc);
+ void (*ipi_send)(void *callmap);
+ void (*ipi_clear)(int cpu, int irq);
+};
+
+/* TBD: stop exporting it for direct population by platform */
+extern struct plat_smp_ops plat_smp_ops;
+
+#endif /* CONFIG_SMP */
+
+/*
+ * ARC700 doesn't support atomic Read-Modify-Write ops.
+ * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
+ * based on retry-if-irq-in-atomic (with hardware assist).
+ * However despite these, we provide the IRQ disabling variant
+ *
+ * (1) These insn were introduced only in 4.10 release. So for older released
+ * support needed.
+ *
+ * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * gaurantted by the platform (not something which core handles).
+ * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
+ * disabling for atomicity.
+ *
+ * However exported spinlock API is not usable due to cyclic hdr deps
+ * (even after system.h disintegration upstream)
+ * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
+ * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
+ *
+ * So the workaround is to use the lowest level arch spinlock API.
+ * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
+ * but same is not true for ARCH backend, hence the need for 2 variants
+ */
+#ifndef CONFIG_ARC_HAS_LLSC
+
+#include <linux/irqflags.h>
+#ifdef CONFIG_SMP
+
+#include <asm/spinlock.h>
+
+extern arch_spinlock_t smp_atomic_ops_lock;
+extern arch_spinlock_t smp_bitops_lock;
+
+#define atomic_ops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_atomic_ops_lock); \
+} while (0)
+
+#define atomic_ops_unlock(flags) do { \
+ arch_spin_unlock(&smp_atomic_ops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#define bitops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_bitops_lock); \
+} while (0)
+
+#define bitops_unlock(flags) do { \
+ arch_spin_unlock(&smp_bitops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#else /* !CONFIG_SMP */
+
+#define atomic_ops_lock(flags) local_irq_save(flags)
+#define atomic_ops_unlock(flags) local_irq_restore(flags)
+
+#define bitops_lock(flags) local_irq_save(flags)
+#define bitops_unlock(flags) local_irq_restore(flags)
+
+#endif /* !CONFIG_SMP */
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+#endif
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
new file mode 100644
index 000000000000..f158197ac5b0
--- /dev/null
+++ b/arch/arc/include/asm/spinlock.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/spinlock_types.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
+
+#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ " breq %0, %2, 1b \n"
+ : "+&r" (tmp)
+ : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ : "+r" (tmp)
+ : "r"(&(lock->slock))
+ : "memory");
+
+ return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ *
+ * The spinlock itself is contained in @counter and access to it is
+ * serialized with @lock_mutex.
+ *
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+/* Would read_trylock() succeed? */
+#define arch_read_can_lock(x) ((x)->counter > 0)
+
+/* Would write_trylock() succeed? */
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ */
+ if (rw->counter > 0) {
+ rw->counter--;
+ ret = 1;
+ }
+
+ arch_spin_unlock(&(rw->lock_mutex));
+
+ smp_mb();
+ return ret;
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ */
+ if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ rw->counter = 0;
+ ret = 1;
+ }
+ arch_spin_unlock(&(rw->lock_mutex));
+
+ return ret;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ while (!arch_read_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ while (!arch_write_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter++;
+ arch_spin_unlock(&(rw->lock_mutex));
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+ arch_spin_unlock(&(rw->lock_mutex));
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
new file mode 100644
index 000000000000..8276bfd61704
--- /dev/null
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED__ 0
+#define __ARCH_SPIN_LOCK_LOCKED__ 1
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED__ }
+#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
+
+/*
+ * Unlocked: 0x01_00_00_00
+ * Read lock(s): 0x00_FF_00_00 to say 0x01
+ * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
+ */
+typedef struct {
+ volatile unsigned int counter;
+ arch_spinlock_t lock_mutex;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
+#define __ARCH_RW_LOCK_UNLOCKED { .counter = __ARCH_RW_LOCK_UNLOCKED__ }
+
+#endif
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
new file mode 100644
index 000000000000..87676c8f1412
--- /dev/null
+++ b/arch/arc/include/asm/string.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -We had half-optimised memset/memcpy, got better versions of those
+ * -Added memcmp, strchr, strcpy, strcmp, strlen
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_STRING_H
+#define _ASM_ARC_STRING_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_STRCHR
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRLEN
+
+extern void *memset(void *ptr, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void memzero(void *ptr, __kernel_size_t n);
+extern int memcmp(const void *, const void *, __kernel_size_t);
+extern char *strchr(const char *s, int c);
+extern char *strcpy(char *dest, const char *src);
+extern int strcmp(const char *cs, const char *ct);
+extern __kernel_size_t strlen(const char *);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
new file mode 100644
index 000000000000..1b171ab5fec0
--- /dev/null
+++ b/arch/arc/include/asm/switch_to.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SWITCH_TO_H
+#define _ASM_ARC_SWITCH_TO_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+
+extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
+#define ARC_FPU_PREV(p, n) fpu_save_restore(p, n)
+#define ARC_FPU_NEXT(t)
+
+#else
+
+#define ARC_FPU_PREV(p, n)
+#define ARC_FPU_NEXT(n)
+
+#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+
+struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
+
+#define switch_to(prev, next, last) \
+do { \
+ ARC_FPU_PREV(prev, next); \
+ last = __switch_to(prev, next);\
+ ARC_FPU_NEXT(next); \
+ mb(); \
+} while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
new file mode 100644
index 000000000000..33ab3048e9b2
--- /dev/null
+++ b/arch/arc/include/asm/syscall.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALL_H
+#define _ASM_ARC_SYSCALL_H 1
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h> /* in_syscall() */
+
+static inline long
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ if (user_mode(regs) && in_syscall(regs))
+ return regs->orig_r8;
+ else
+ return -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
+ regs->r8 = regs->orig_r8;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ /* 0 if syscall succeeded, otherwise -Errorcode */
+ return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ regs->r0 = (long) error ?: val;
+}
+
+/*
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ */
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args)
+{
+ unsigned long *inside_ptregs = &(regs->r0);
+ inside_ptregs -= i;
+
+ BUG_ON((i + n) > 6);
+
+ while (n--) {
+ args[i++] = (*inside_ptregs);
+ inside_ptregs--;
+ }
+}
+
+#endif
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
new file mode 100644
index 000000000000..e53a5340ba4f
--- /dev/null
+++ b/arch/arc/include/asm/syscalls.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALLS_H
+#define _ASM_ARC_SYSCALLS_H 1
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+int sys_clone_wrapper(int, int, int, int, int);
+int sys_fork_wrapper(void);
+int sys_vfork_wrapper(void);
+int sys_cacheflush(uint32_t, uint32_t uint32_t);
+int sys_arc_settls(void *);
+int sys_arc_gettls(void);
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
new file mode 100644
index 000000000000..2d50a4cdd7f3
--- /dev/null
+++ b/arch/arc/include/asm/thread_info.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Oct 2009
+ * No need for ARC specific thread_info allocator (kmalloc/free). This is
+ * anyways one page allocation, thus slab alloc can be short-circuited and
+ * the generic version (get_free_page) would be loads better.
+ *
+ * Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+
+#ifdef CONFIG_16KSTACKS
+#define THREAD_SIZE_ORDER 1
+#else
+#define THREAD_SIZE_ORDER 0
+#endif
+
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+#include <asm/segment.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ struct task_struct *task; /* main task structure */
+ mm_segment_t addr_limit; /* thread address space */
+ struct exec_domain *exec_domain;/* execution domain */
+ __u32 cpu; /* current CPU */
+ unsigned long thr_ptr; /* TLS ptr */
+ struct restart_block restart_block;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+static inline __attribute_const__ struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm("sp");
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE 0x10000000
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_RESTORE_SIGMASK 0 /* restore sig mask in do_signal() */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
+#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
+
+/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE 16
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_MEMDIE (1<<TIF_MEMDIE)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME)
+
+/*
+ * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
+ * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * syscall, so all that reamins to be tested is _TIF_WORK_MASK
+ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/timex.h b/arch/arc/include/asm/timex.h
new file mode 100644
index 000000000000..0a82960a75e9
--- /dev/null
+++ b/arch/arc/include/asm/timex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TIMEX_H
+#define _ASM_ARC_TIMEX_H
+
+#define CLOCK_TICK_RATE 80000000 /* slated to be removed */
+
+#include <asm-generic/timex.h>
+
+/* XXX: get_cycles() to be implemented with RTSC insn */
+
+#endif /* _ASM_ARC_TIMEX_H */
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
new file mode 100644
index 000000000000..a5ff961b1efc
--- /dev/null
+++ b/arch/arc/include/asm/tlb-mmu1.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_TLB_MMU_V1_H__
+#define __ASM_TLB_MMU_V1_H__
+
+#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1)
+
+#include <asm/tlb.h>
+
+.macro TLB_WRITE_HEURISTICS
+
+#define JH_HACK1
+#undef JH_HACK2
+#undef JH_HACK3
+
+#ifdef JH_HACK3
+; Calculate set index for 2-way MMU
+; -avoiding use of GetIndex from MMU
+; and its unpleasant LFSR pseudo-random sequence
+;
+; r1 = TLBPD0 from TLB_RELOAD above
+;
+; -- jh_ex_way_set not cleared on startup
+; didn't want to change setup.c
+; hence extra instruction to clean
+;
+; -- should be in cache since in same line
+; as r0/r1 saves above
+;
+ld r0,[jh_ex_way_sel] ; victim pointer
+and r0,r0,1 ; clean
+xor.f r0,r0,1 ; flip
+st r0,[jh_ex_way_sel] ; store back
+asr r0,r1,12 ; get set # <<1, note bit 12=R=0
+or.nz r0,r0,1 ; set way bit
+and r0,r0,0xff ; clean
+sr r0,[ARC_REG_TLBINDEX]
+#endif
+
+#ifdef JH_HACK2
+; JH hack #2
+; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU
+; Slower in thrash case (where it matters) because more code is executed
+; Inefficient due to two-register paradigm of this miss handler
+;
+/* r1 = data TLBPD0 at this point */
+lr r0,[eret] /* instruction address */
+xor r0,r0,r1 /* compare set # */
+and.f r0,r0,0x000fe000 /* 2-way MMU mask */
+bne 88f /* not in same set - no need to probe */
+
+lr r0,[eret] /* instruction address */
+and r0,r0,PAGE_MASK /* VPN of instruction address */
+; lr r1,[ARC_REG_TLBPD0] /* Data VPN+ASID - already in r1 from TLB_RELOAD*/
+and r1,r1,0xff /* Data ASID */
+or r0,r0,r1 /* Instruction address + Data ASID */
+
+lr r1,[ARC_REG_TLBPD0] /* save TLBPD0 containing data TLB*/
+sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
+sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
+sr r1,[ARC_REG_TLBPD0] /* restore TLBPD0 */
+
+xor r0,r0,1 /* flip bottom bit of data index */
+b.d 89f
+sr r0,[ARC_REG_TLBINDEX] /* and put it back */
+88:
+sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+89:
+#endif
+
+#ifdef JH_HACK1
+;
+; Always checks whether instruction will be kicked out by dtlb miss
+;
+mov_s r3, r1 ; save PD0 prepared by TLB_RELOAD in r3
+lr r0,[eret] /* instruction address */
+and r0,r0,PAGE_MASK /* VPN of instruction address */
+bmsk r1,r3,7 /* Data ASID, bits 7-0 */
+or_s r0,r0,r1 /* Instruction address + Data ASID */
+
+sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
+sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
+sr r3,[ARC_REG_TLBPD0] /* restore TLBPD0 */
+
+sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+lr r1,[ARC_REG_TLBINDEX] /* r1 = index where MMU wants to put data */
+cmp r0,r1 /* if no match on indices, go around */
+xor.eq r1,r1,1 /* flip bottom bit of data index */
+sr r1,[ARC_REG_TLBINDEX] /* and put it back */
+#endif
+
+.endm
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
new file mode 100644
index 000000000000..3eb2ce0bdfa3
--- /dev/null
+++ b/arch/arc/include/asm/tlb.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TLB_H
+#define _ASM_ARC_TLB_H
+
+#ifdef __KERNEL__
+
+#include <asm/pgtable.h>
+
+/* Masks for actual TLB "PD"s */
+#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
+#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
+ _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+ _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifndef __ASSEMBLY__
+
+#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
+
+/*
+ * This pair is called at time of munmap/exit to flush cache and TLB entries
+ * for mappings being torn down.
+ * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
+ * as we don't support aliasing configs in our VIPT D$.
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
+ * albiet for difft reasons - its better handled by moving to new ASID
+ *
+ * Note, read http://lkml.org/lkml/2004/1/15/6
+ */
+#define tlb_start_vma(tlb, vma)
+#define tlb_end_vma(tlb, vma)
+
+#define __tlb_remove_tlb_entry(tlb, ptep, address)
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
+#else
+#define tlb_paranoid_check(a, b)
+#endif
+
+void arc_mmu_init(void);
+extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
+void __init read_decode_mmu_bcr(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
new file mode 100644
index 000000000000..b2f9bc7f68c8
--- /dev/null
+++ b/arch/arc/include/asm/tlbflush.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_TLBFLUSH__
+#define __ASM_ARC_TLBFLUSH__
+
+#include <linux/mm.h>
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+
+/* XXX: Revisit for SMP */
+#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+
+#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
new file mode 100644
index 000000000000..32420824375b
--- /dev/null
+++ b/arch/arc/include/asm/uaccess.h
@@ -0,0 +1,751 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2010
+ * -__clear_user( ) called multiple times during elf load was byte loop
+ * converted to do as much word clear as possible.
+ *
+ * vineetg: Dec 2009
+ * -Hand crafted constant propagation for "constant" copy sizes
+ * -stock kernel shrunk by 33K at -O3
+ *
+ * vineetg: Sept 2009
+ * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
+ * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
+ * -Enabled when doing -Os
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_UACCESS_H
+#define _ASM_ARC_UACCESS_H
+
+#include <linux/sched.h>
+#include <asm/errno.h>
+#include <linux/string.h> /* for generic string functions */
+
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+
+/*
+ * Algorthmically, for __user_ok() we want do:
+ * (start < TASK_SIZE) && (start+len < TASK_SIZE)
+ * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
+ * emitted directly in code.
+ *
+ * This can however be rewritten as follows:
+ * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
+ *
+ * Because it essentially checks if buffer end is within limit and @len is
+ * non-ngeative, which implies that buffer start will be within limit too.
+ *
+ * The reason for rewriting being, for majorit yof cases, @len is generally
+ * compile time constant, causing first sub-expression to be compile time
+ * subsumed.
+ *
+ * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
+ * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
+ * would already have been done at this call site for __kernel_ok()
+ *
+ */
+#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
+ (((addr)+(sz)) <= get_fs()))
+#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
+ likely(__user_ok((addr), (sz))))
+
+/*********** Single byte/hword/word copies ******************/
+
+#define __get_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
+ case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
+ case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
+ case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+/*
+ * Returns 0 on success, -EFAULT if not.
+ * @ret already contains 0 - given that errors will be less likely
+ * (hence +r asm constraint below).
+ * In case of error, fixup code will make it -EFAULT
+ */
+#define __arc_get_user_one(dst, src, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __arc_get_user_one_64(dst, src, ret) \
+ __asm__ __volatile__( \
+ "1: ld %1,[%2]\n" \
+ "4: ld %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __put_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
+ case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
+ case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
+ case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+#define __arc_put_user_one(src, dst, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+#define __arc_put_user_one_64(src, dst, ret) \
+ __asm__ __volatile__( \
+ "1: st %1,[%2]\n" \
+ "4: st %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+
+static inline unsigned long
+__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* unaligned */
+ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__ (
+ " mov.f lp_count, %0 \n"
+ " lpnz 2f \n"
+ "1: ldb.ab %1, [%3, 1] \n"
+ " stb.ab %1, [%2, 1] \n"
+ " sub %0,%0,1 \n"
+ "2: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 2b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 3b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /*
+ * Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return n;
+ }
+
+ /*
+ * Hand-crafted constant propagation to reduce code sz of the
+ * laddered copy 16x,8,4,2,1
+ */
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ "1: ld.ab %3, [%2, 4] \n"
+ "11: ld.ab %4, [%2, 4] \n"
+ "12: ld.ab %5, [%2, 4] \n"
+ "13: ld.ab %6, [%2, 4] \n"
+ " st.ab %3, [%1, 4] \n"
+ " st.ab %4, [%1, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ "14: ld.ab %3, [%2,4] \n"
+ "15: ld.ab %4, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " st.ab %4, [%1,4] \n"
+ " sub %0,%0,8 \n"
+ "31: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ "16: ld.ab %3, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " sub %0,%0,4 \n"
+ "32: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ "17: ldw.ab %3, [%2,2] \n"
+ " stw.ab %3, [%1,2] \n"
+ " sub %0,%0,2 \n"
+ "33: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ "18: ldb.ab %3, [%2,2] \n"
+ " stb.ab %3, [%1,2] \n"
+ " sub %0,%0,1 \n"
+ "34: ; nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ "1: ld.ab %5, [%2, 4] \n"
+ "11: ld.ab %6, [%2, 4] \n"
+ "12: ld.ab %7, [%2, 4] \n"
+ "13: ld.ab %8, [%2, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " st.ab %7, [%1, 4] \n"
+ " st.ab %8, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ "14: ld.ab %5, [%2,4] \n"
+ "15: ld.ab %6, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " st.ab %6, [%1,4] \n"
+ " sub.f %0,%0,8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ "16: ld.ab %5, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " sub.f %0,%0,4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ "17: ldw.ab %5, [%2,2] \n"
+ " stw.ab %5, [%1,2] \n"
+ " sub.f %0,%0,2 \n"
+ "33: bbit0 %3,0,34f \n"
+ "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ " stb.ab %5, [%1,1] \n"
+ " sub.f %0,%0,1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+
+static inline unsigned long
+__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* unaligned */
+ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__(
+ " mov.f lp_count, %0 \n"
+ " lpnz 3f \n"
+ " ldb.ab %1, [%3, 1] \n"
+ "1: stb.ab %1, [%2, 1] \n"
+ " sub %0, %0, 1 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /* Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return n;
+ }
+
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ " ld.ab %3, [%2, 4] \n"
+ " ld.ab %4, [%2, 4] \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ "1: st.ab %3, [%1, 4] \n"
+ "11: st.ab %4, [%1, 4] \n"
+ "12: st.ab %5, [%1, 4] \n"
+ "13: st.ab %6, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ " ld.ab %4, [%2,4] \n"
+ "14: st.ab %3, [%1,4] \n"
+ "15: st.ab %4, [%1,4] \n"
+ " sub %0, %0, 8 \n"
+ "31:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ "16: st.ab %3, [%1,4] \n"
+ " sub %0, %0, 4 \n"
+ "32:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ " ldw.ab %3, [%2,2] \n"
+ "17: stw.ab %3, [%1,2] \n"
+ " sub %0, %0, 2 \n"
+ "33:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ " ldb.ab %3, [%2,1] \n"
+ "18: stb.ab %3, [%1,1] \n"
+ " sub %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ " ld.ab %7, [%2, 4] \n"
+ " ld.ab %8, [%2, 4] \n"
+ "1: st.ab %5, [%1, 4] \n"
+ "11: st.ab %6, [%1, 4] \n"
+ "12: st.ab %7, [%1, 4] \n"
+ "13: st.ab %8, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ " ld.ab %6, [%2,4] \n"
+ "14: st.ab %5, [%1,4] \n"
+ "15: st.ab %6, [%1,4] \n"
+ " sub.f %0, %0, 8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ "16: st.ab %5, [%1,4] \n"
+ " sub.f %0, %0, 4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ " ldw.ab %5, [%2,2] \n"
+ "17: stw.ab %5, [%1,2] \n"
+ " sub.f %0, %0, 2 \n"
+ "33: bbit0 %3,0,34f \n"
+ " ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ "18: stb.ab %5, [%1,1] \n"
+ " sub.f %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+{
+ long res = n;
+ unsigned char *d_char = to;
+
+ __asm__ __volatile__(
+ " bbit0 %0, 0, 1f \n"
+ "75: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "1: bbit0 %0, 1, 2f \n"
+ "76: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "2: asr.f lp_count, %1, 2 \n"
+ " lpnz 3f \n"
+ "77: st.ab %2, [%0,4] \n"
+ " sub %1, %1, 4 \n"
+ "3: bbit0 %1, 1, 4f \n"
+ "78: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "4: bbit0 %1, 0, 5f \n"
+ "79: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "5: \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 5b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 75b, 3b \n"
+ " .word 76b, 3b \n"
+ " .word 77b, 3b \n"
+ " .word 78b, 3b \n"
+ " .word 79b, 3b \n"
+ " .previous \n"
+ : "+r"(d_char), "+r"(res)
+ : "i"(0)
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return res;
+}
+
+static inline long
+__arc_strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ long res = count;
+ char val;
+ unsigned int hw_count;
+
+ if (count == 0)
+ return 0;
+
+ __asm__ __volatile__(
+ " lp 2f \n"
+ "1: ldb.ab %3, [%2, 1] \n"
+ " breq.d %3, 0, 2f \n"
+ " stb.ab %3, [%1, 1] \n"
+ "2: sub %0, %6, %4 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: mov %0, %5 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+ : "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
+ : "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
+ : "memory");
+
+ return res;
+}
+
+static inline long __arc_strnlen_user(const char __user *s, long n)
+{
+ long res, tmp1, cnt;
+ char val;
+
+ __asm__ __volatile__(
+ " mov %2, %1 \n"
+ "1: ldb.ab %3, [%0, 1] \n"
+ " breq.d %3, 0, 2f \n"
+ " sub.f %2, %2, 1 \n"
+ " bnz 1b \n"
+ " sub %2, %2, 1 \n"
+ "2: sub %0, %1, %2 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: mov %0, 0 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+ : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
+ : "0"(s), "1"(n)
+ : "memory");
+
+ return res;
+}
+
+#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
+#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
+#define __clear_user(d, n) __arc_clear_user(d, n)
+#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
+#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
+#else
+extern long arc_copy_from_user_noinline(void *to, const void __user * from,
+ unsigned long n);
+extern long arc_copy_to_user_noinline(void __user *to, const void *from,
+ unsigned long n);
+extern unsigned long arc_clear_user_noinline(void __user *to,
+ unsigned long n);
+extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+ long count);
+extern long arc_strnlen_user_noinline(const char __user *src, long n);
+
+#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
+#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
+#define __clear_user(d, n) arc_clear_user_noinline(d, n)
+#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
+#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
+
+#endif
+
+#include <asm-generic/uaccess.h>
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
new file mode 100644
index 000000000000..5dbe63f17b66
--- /dev/null
+++ b/arch/arc/include/asm/unaligned.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNALIGNED_H
+#define _ASM_ARC_UNALIGNED_H
+
+/* ARC700 can't handle unaligned Data accesses. */
+
+#include <asm-generic/unaligned.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs);
+#else
+static inline int
+misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
new file mode 100644
index 000000000000..7ca628b6ee2a
--- /dev/null
+++ b/arch/arc/include/asm/unwind.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNWIND_H
+#define _ASM_ARC_UNWIND_H
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+#include <linux/sched.h>
+
+struct arc700_regs {
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r16;
+ unsigned long r17;
+ unsigned long r18;
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+ unsigned long r23;
+ unsigned long r24;
+ unsigned long r25;
+ unsigned long r26;
+ unsigned long r27; /* fp */
+ unsigned long r28; /* sp */
+ unsigned long r29;
+ unsigned long r30;
+ unsigned long r31; /* blink */
+ unsigned long r63; /* pc */
+};
+
+struct unwind_frame_info {
+ struct arc700_regs regs;
+ struct task_struct *task;
+ unsigned call_frame:1;
+};
+
+#define UNW_PC(frame) ((frame)->regs.r63)
+#define UNW_SP(frame) ((frame)->regs.r28)
+#define UNW_BLINK(frame) ((frame)->regs.r31)
+
+/* Rajesh FIXME */
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame) ((frame)->regs.r27)
+#define FRAME_RETADDR_OFFSET 4
+#define FRAME_LINK_OFFSET 0
+#define STACK_BOTTOM_UNW(tsk) STACK_LIMIT((tsk)->thread.ksp)
+#define STACK_TOP_UNW(tsk) ((tsk)->thread.ksp)
+#else
+#define UNW_FP(frame) ((void)(frame), 0)
+#endif
+
+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#define UNW_REGISTER_INFO \
+ PTREGS_INFO(r0), \
+ PTREGS_INFO(r1), \
+ PTREGS_INFO(r2), \
+ PTREGS_INFO(r3), \
+ PTREGS_INFO(r4), \
+ PTREGS_INFO(r5), \
+ PTREGS_INFO(r6), \
+ PTREGS_INFO(r7), \
+ PTREGS_INFO(r8), \
+ PTREGS_INFO(r9), \
+ PTREGS_INFO(r10), \
+ PTREGS_INFO(r11), \
+ PTREGS_INFO(r12), \
+ PTREGS_INFO(r13), \
+ PTREGS_INFO(r14), \
+ PTREGS_INFO(r15), \
+ PTREGS_INFO(r16), \
+ PTREGS_INFO(r17), \
+ PTREGS_INFO(r18), \
+ PTREGS_INFO(r19), \
+ PTREGS_INFO(r20), \
+ PTREGS_INFO(r21), \
+ PTREGS_INFO(r22), \
+ PTREGS_INFO(r23), \
+ PTREGS_INFO(r24), \
+ PTREGS_INFO(r25), \
+ PTREGS_INFO(r26), \
+ PTREGS_INFO(r27), \
+ PTREGS_INFO(r28), \
+ PTREGS_INFO(r29), \
+ PTREGS_INFO(r30), \
+ PTREGS_INFO(r31), \
+ PTREGS_INFO(r63)
+
+#define UNW_DEFAULT_RA(raItem, dataAlign) \
+ ((raItem).where == Memory && !((raItem).value * (dataAlign) + 4))
+
+extern int arc_unwind(struct unwind_frame_info *frame);
+extern void arc_unwind_init(void);
+extern void arc_unwind_setup(void);
+extern void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size);
+extern void unwind_remove_table(void *handle, int init_only);
+
+static inline int
+arch_unwind_init_running(struct unwind_frame_info *info,
+ int (*callback) (struct unwind_frame_info *info,
+ void *arg),
+ void *arg)
+{
+ return 0;
+}
+
+static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+{
+ return 0;
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+ return;
+}
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+ struct pt_regs *regs)
+{
+ return;
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0)
+#define UNW_SP(frame) ((void)(frame), 0)
+#define UNW_FP(frame) ((void)(frame), 0)
+
+static inline void arc_unwind_init(void)
+{
+}
+
+static inline void arc_unwind_setup(void)
+{
+}
+#define unwind_add_table(a, b, c)
+#define unwind_remove_table(a, b)
+
+#endif /* CONFIG_ARC_DW2_UNWIND */
+
+#endif /* _ASM_ARC_UNWIND_H */
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..18fefaea73fd
--- /dev/null
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -0,0 +1,12 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+header-y += elf.h
+header-y += page.h
+header-y += setup.h
+header-y += byteorder.h
+header-y += cachectl.h
+header-y += ptrace.h
+header-y += sigcontext.h
+header-y += signal.h
+header-y += swab.h
+header-y += unistd.h
diff --git a/arch/arc/include/uapi/asm/byteorder.h b/arch/arc/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000000..9da71d415c38
--- /dev/null
+++ b/arch/arc/include/uapi/asm/byteorder.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_BYTEORDER_H
+#define __ASM_ARC_BYTEORDER_H
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#include <linux/byteorder/big_endian.h>
+#else
+#include <linux/byteorder/little_endian.h>
+#endif
+
+#endif /* ASM_ARC_BYTEORDER_H */
diff --git a/arch/arc/include/uapi/asm/cachectl.h b/arch/arc/include/uapi/asm/cachectl.h
new file mode 100644
index 000000000000..51c73f0255b3
--- /dev/null
+++ b/arch/arc/include/uapi/asm/cachectl.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHECTL_H
+#define __ARC_ASM_CACHECTL_H
+
+/*
+ * ARC ABI flags defined for Android's finegrained cacheflush requirements
+ */
+#define CF_I_INV 0x0002
+#define CF_D_FLUSH 0x0010
+#define CF_D_FLUSH_INV 0x0020
+
+#define CF_DEFAULT (CF_I_INV | CF_D_FLUSH)
+
+/*
+ * Standard flags expected by cacheflush system call users
+ */
+#define ICACHE CF_I_INV
+#define DCACHE CF_D_FLUSH
+#define BCACHE (CF_I_INV | CF_D_FLUSH)
+
+#endif
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
new file mode 100644
index 000000000000..0f99ac8fcbb2
--- /dev/null
+++ b/arch/arc/include/uapi/asm/elf.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_ELF_H
+#define _UAPI__ASM_ARC_ELF_H
+
+#include <asm/ptrace.h> /* for user_regs_struct */
+
+/* Machine specific ELF Hdr flags */
+#define EF_ARC_OSABI_MSK 0x00000f00
+#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
+#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_fpregset_t;
+
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#endif
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
new file mode 100644
index 000000000000..e5d41e08240c
--- /dev/null
+++ b/arch/arc/include/uapi/asm/page.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_PAGE_H
+#define _UAPI__ASM_ARC_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define PAGE_SHIFT 14
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define PAGE_SHIFT 12
+#else
+/*
+ * Default 8k
+ * done this way (instead of under CONFIG_ARC_PAGE_SIZE_8K) because adhoc
+ * user code (busybox appletlib.h) expects PAGE_SHIFT to be defined w/o
+ * using the correct uClibc header and in their build our autoconf.h is
+ * not available
+ */
+#define PAGE_SHIFT 13
+#endif
+
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_OFFSET (0x80000000)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */
+#define PAGE_OFFSET (0x80000000UL) /* Kernel starts at 2G onwards */
+#endif
+
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+
+#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000000..6afa4f702075
--- /dev/null
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _UAPI__ASM_ARC_PTRACE_H
+#define _UAPI__ASM_ARC_PTRACE_H
+
+
+#ifndef __ASSEMBLY__
+/*
+ * Userspace ABI: Register state needed by
+ * -ptrace (gdbserver)
+ * -sigcontext (SA_SIGNINFO signal frame)
+ *
+ * This is to decouple pt_regs from user-space ABI, to be able to change it
+ * w/o affecting the ABI.
+ * Although the layout (initial padding) is similar to pt_regs to have some
+ * optimizations when copying pt_regs to/from user_regs_struct.
+ *
+ * Also, sigcontext only care about the scratch regs as that is what we really
+ * save/restore for signal handling.
+*/
+struct user_regs_struct {
+
+ struct scratch {
+ long pad;
+ long bta, lp_start, lp_end, lp_count;
+ long status32, ret, blink, fp, gp;
+ long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ long sp;
+ } scratch;
+ struct callee {
+ long pad;
+ long r25, r24, r23, r22, r21, r20;
+ long r19, r18, r17, r16, r15, r14, r13;
+ } callee;
+ long efa; /* break pt addr, for break points in delay slots */
+ long stop_pc; /* give dbg stop_pc directly after checking orig_r8 */
+};
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/include/uapi/asm/setup.h b/arch/arc/include/uapi/asm/setup.h
new file mode 100644
index 000000000000..a6d4e44938be
--- /dev/null
+++ b/arch/arc/include/uapi/asm/setup.h
@@ -0,0 +1,6 @@
+/*
+ * setup.h is part of userspace header ABI so UAPI scripts have to generate it
+ * even if there's nothing to export - causing empty <uapi/asm/setup.h>
+ * However to prevent "patch" from discarding it we add this placeholder
+ * comment
+ */
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000000..9678a11fc158
--- /dev/null
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SIGCONTEXT_H
+#define _ASM_ARC_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked.
+ */
+struct sigcontext {
+ struct user_regs_struct regs;
+};
+
+#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/include/uapi/asm/signal.h b/arch/arc/include/uapi/asm/signal.h
new file mode 100644
index 000000000000..fad62f7f42d6
--- /dev/null
+++ b/arch/arc/include/uapi/asm/signal.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_SIGNAL_H
+#define _ASM_ARC_SIGNAL_H
+
+/*
+ * This is much needed for ARC sigreturn optimization.
+ * This allows uClibc to piggback the addr of a sigreturn stub in sigaction,
+ * which allows sigreturn based re-entry into kernel after handling signal.
+ * W/o this kernel needs to "synthesize" the sigreturn trampoline on user
+ * mode stack which in turn forces the following:
+ * -TLB Flush (after making the stack page executable)
+ * -Cache line Flush (to make I/D Cache lines coherent)
+ */
+#define SA_RESTORER 0x04000000
+
+#include <asm-generic/signal.h>
+
+#endif /* _ASM_ARC_SIGNAL_H */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
new file mode 100644
index 000000000000..095599a73195
--- /dev/null
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Support single cycle endian-swap insn in ARC700 4.10
+ *
+ * vineetg: June 2009
+ * -Better htonl implementation (5 instead of 9 ALU instructions)
+ * -Hardware assisted single cycle bswap (Use Case of ARC custom instrn)
+ */
+
+#ifndef __ASM_ARC_SWAB_H
+#define __ASM_ARC_SWAB_H
+
+#include <linux/types.h>
+
+/* Native single cycle endian swap insn */
+#ifdef CONFIG_ARC_HAS_SWAPE
+
+#define __arch_swab32(x) \
+({ \
+ unsigned int tmp = x; \
+ __asm__( \
+ " swape %0, %1 \n" \
+ : "=r" (tmp) \
+ : "r" (tmp)); \
+ tmp; \
+})
+
+#else
+
+/* Several ways of Endian-Swap Emulation for ARC
+ * 0: kernel generic
+ * 1: ARC optimised "C"
+ * 2: ARC Custom instruction
+ */
+#define ARC_BSWAP_TYPE 1
+
+#if (ARC_BSWAP_TYPE == 1) /******* Software only ********/
+
+/* The kernel default implementation of htonl is
+ * return x<<24 | x>>24 |
+ * (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8;
+ *
+ * This generates 9 instructions on ARC (excluding the ld/st)
+ *
+ * 8051fd8c: ld r3,[r7,20] ; Mem op : Get the value to be swapped
+ * 8051fd98: asl r5,r3,24 ; get 3rd Byte
+ * 8051fd9c: lsr r2,r3,24 ; get 0th Byte
+ * 8051fda0: and r4,r3,0xff00
+ * 8051fda8: asl r4,r4,8 ; get 1st Byte
+ * 8051fdac: and r3,r3,0x00ff0000
+ * 8051fdb4: or r2,r2,r5 ; combine 0th and 3rd Bytes
+ * 8051fdb8: lsr r3,r3,8 ; 2nd Byte at correct place in Dst Reg
+ * 8051fdbc: or r2,r2,r4 ; combine 0,3 Bytes with 1st Byte
+ * 8051fdc0: or r2,r2,r3 ; combine 0,3,1 Bytes with 2nd Byte
+ * 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
+ *
+ * Joern suggested a better "C" algorithm which is great since
+ * (1) It is portable to any architecure
+ * (2) At the same time it takes advantage of ARC ISA (rotate intrns)
+ */
+
+#define __arch_swab32(x) \
+({ unsigned long __in = (x), __tmp; \
+ __tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */ \
+ __in = __in << 24 | __in >> 8; /* ror in,in,8 */ \
+ __tmp ^= __in; \
+ __tmp &= 0xff00ff; \
+ __tmp ^ __in; \
+})
+
+#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
+
+#define __arch_swab32(x) \
+({ \
+ unsigned int tmp = x; \
+ __asm__( \
+ " .extInstruction bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP \n"\
+ " bswap %0, %1 \n"\
+ : "=r" (tmp) \
+ : "r" (tmp)); \
+ tmp; \
+})
+
+#endif /* ARC_BSWAP_TYPE=zzz */
+
+#endif /* CONFIG_ARC_HAS_SWAPE */
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#define __SWAB_64_THRU_32__
+#endif
+
+#endif
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..6f30484f34b7
--- /dev/null
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/******** no-legacy-syscalls-ABI *******/
+
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define NR_syscalls __NR_syscalls
+
+/* ARC specific syscall */
+#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
+#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
+#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
+__SYSCALL(__NR_arc_settls, sys_arc_settls)
+__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
+
+
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_sysfs, sys_sysfs)
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
new file mode 100644
index 000000000000..c242ef07ba70
--- /dev/null
+++ b/arch/arc/kernel/Makefile
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+# Pass UTS_MACHINE for user_regset definition
+CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
+obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o entry.o process.o
+obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o
+obj-y += devtree.o
+
+obj-$(CONFIG_MODULES) += arcksyms.o module.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
+
+obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
+CFLAGS_fpu.o += -mdpfp
+
+ifdef CONFIG_ARC_DW2_UNWIND
+CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
+obj-y += ctx_sw.o
+else
+obj-y += ctx_sw_asm.o
+endif
+
+extra-y := vmlinux.lds head.o
diff --git a/arch/arc/kernel/arc_hostlink.c b/arch/arc/kernel/arc_hostlink.c
new file mode 100644
index 000000000000..47b2a17cc52a
--- /dev/null
+++ b/arch/arc/kernel/arc_hostlink.c
@@ -0,0 +1,58 @@
+/*
+ * arc_hostlink.c: Pseudo-driver for Metaware provided "hostlink" facility
+ *
+ * Allows Linux userland access to host in absence of any peripherals.
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h> /* file_operations */
+#include <linux/miscdevice.h>
+#include <linux/mm.h> /* VM_IO */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
+
+static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ pr_warn("Hostlink buffer mmap ERROR\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static long arc_hl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ /* we only support, returning the physical addr to mmap in user space */
+ put_user((unsigned int)__HOSTLINK__, (int __user *)arg);
+ return 0;
+}
+
+static const struct file_operations arc_hl_fops = {
+ .unlocked_ioctl = arc_hl_ioctl,
+ .mmap = arc_hl_mmap,
+};
+
+static struct miscdevice arc_hl_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hostlink",
+ .fops = &arc_hl_fops
+};
+
+static int __init arc_hl_init(void)
+{
+ pr_info("ARC Hostlink driver mmap at 0x%p\n", __HOSTLINK__);
+ return misc_register(&arc_hl_dev);
+}
+module_init(arc_hl_init);
diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c
new file mode 100644
index 000000000000..4d9e77724bed
--- /dev/null
+++ b/arch/arc/kernel/arcksyms.c
@@ -0,0 +1,56 @@
+/*
+ * arcksyms.c - Exporting symbols not exportable from their own sources
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+
+/* libgcc functions, not part of kernel sources */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __divsf3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __cmpdi2(void);
+extern void __fixunsdfsi(void);
+extern void __muldf3(void);
+extern void __divdf3(void);
+extern void __floatunsidf(void);
+extern void __floatunsisf(void);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__divsf3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__cmpdi2);
+EXPORT_SYMBOL(__fixunsdfsi);
+EXPORT_SYMBOL(__muldf3);
+EXPORT_SYMBOL(__divdf3);
+EXPORT_SYMBOL(__floatunsidf);
+EXPORT_SYMBOL(__floatunsisf);
+
+/* ARC optimised assembler routines */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strlen);
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..0dc148ebce74
--- /dev/null
+++ b/arch/arc/kernel/asm-offsets.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+#include <asm/hardirq.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+int main(void)
+{
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+
+ BLANK();
+
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+ DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
+#ifdef CONFIG_ARC_CURR_IN_REG
+ DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25));
+#endif
+ DEFINE(THREAD_FAULT_ADDR,
+ offsetof(struct thread_struct, fault_address));
+
+ BLANK();
+
+ DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(THREAD_INFO_PREEMPT_COUNT,
+ offsetof(struct thread_info, preempt_count));
+
+ BLANK();
+
+ DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
+
+ DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
+ DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
+
+ DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
+
+ BLANK();
+
+ DEFINE(PT_status32, offsetof(struct pt_regs, status32));
+ DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word));
+ DEFINE(PT_sp, offsetof(struct pt_regs, sp));
+ DEFINE(PT_r0, offsetof(struct pt_regs, r0));
+ DEFINE(PT_r1, offsetof(struct pt_regs, r1));
+ DEFINE(PT_r2, offsetof(struct pt_regs, r2));
+ DEFINE(PT_r3, offsetof(struct pt_regs, r3));
+ DEFINE(PT_r4, offsetof(struct pt_regs, r4));
+ DEFINE(PT_r5, offsetof(struct pt_regs, r5));
+ DEFINE(PT_r6, offsetof(struct pt_regs, r6));
+ DEFINE(PT_r7, offsetof(struct pt_regs, r7));
+
+ return 0;
+}
diff --git a/arch/arc/kernel/clk.c b/arch/arc/kernel/clk.c
new file mode 100644
index 000000000000..66ce0dc917fb
--- /dev/null
+++ b/arch/arc/kernel/clk.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/clk.h>
+
+unsigned long core_freq = 800000000;
+
+/*
+ * As of now we default to device-tree provided clock
+ * In future we can determine this in early boot
+ */
+int arc_set_core_freq(unsigned long freq)
+{
+ core_freq = freq;
+ return 0;
+}
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
new file mode 100644
index 000000000000..60844dac6132
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Aug 2009
+ * -"C" version of lowest level context switch asm macro called by schedular
+ * gcc doesn't generate the dward CFI info for hand written asm, hence can't
+ * backtrace out of it (e.g. tasks sleeping in kernel).
+ * So we cheat a bit by writing almost similar code in inline-asm.
+ * -This is a hacky way of doing things, but there is no other simple way.
+ * I don't want/intend to extend unwinding code to understand raw asm
+ */
+
+#include <asm/asm-offsets.h>
+#include <linux/sched.h>
+
+struct task_struct *__sched
+__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
+{
+ unsigned int tmp;
+ unsigned int prev = (unsigned int)prev_task;
+ unsigned int next = (unsigned int)next_task;
+ int num_words_to_skip = 1;
+#ifdef CONFIG_ARC_CURR_IN_REG
+ num_words_to_skip++;
+#endif
+
+ __asm__ __volatile__(
+ /* FP/BLINK save generated by gcc (standard function prologue */
+ "st.a r13, [sp, -4] \n\t"
+ "st.a r14, [sp, -4] \n\t"
+ "st.a r15, [sp, -4] \n\t"
+ "st.a r16, [sp, -4] \n\t"
+ "st.a r17, [sp, -4] \n\t"
+ "st.a r18, [sp, -4] \n\t"
+ "st.a r19, [sp, -4] \n\t"
+ "st.a r20, [sp, -4] \n\t"
+ "st.a r21, [sp, -4] \n\t"
+ "st.a r22, [sp, -4] \n\t"
+ "st.a r23, [sp, -4] \n\t"
+ "st.a r24, [sp, -4] \n\t"
+#ifndef CONFIG_ARC_CURR_IN_REG
+ "st.a r25, [sp, -4] \n\t"
+#endif
+ "sub sp, sp, %4 \n\t" /* create gutter at top */
+
+ /* set ksp of outgoing task in tsk->thread.ksp */
+ "st.as sp, [%3, %1] \n\t"
+
+ "sync \n\t"
+
+ /*
+ * setup _current_task with incoming tsk.
+ * optionally, set r25 to that as well
+ * For SMP extra work to get to &_current_task[cpu]
+ * (open coded SET_CURR_TASK_ON_CPU)
+ */
+#ifndef CONFIG_SMP
+ "st %2, [@_current_task] \n\t"
+#else
+ "lr r24, [identity] \n\t"
+ "lsr r24, r24, 8 \n\t"
+ "bmsk r24, r24, 7 \n\t"
+ "add2 r24, @_current_task, r24 \n\t"
+ "st %2, [r24] \n\t"
+#endif
+#ifdef CONFIG_ARC_CURR_IN_REG
+ "mov r25, %2 \n\t"
+#endif
+
+ /* get ksp of incoming task from tsk->thread.ksp */
+ "ld.as sp, [%2, %1] \n\t"
+
+ /* start loading it's CALLEE reg file */
+
+ "add sp, sp, %4 \n\t" /* skip gutter at top */
+
+#ifndef CONFIG_ARC_CURR_IN_REG
+ "ld.ab r25, [sp, 4] \n\t"
+#endif
+ "ld.ab r24, [sp, 4] \n\t"
+ "ld.ab r23, [sp, 4] \n\t"
+ "ld.ab r22, [sp, 4] \n\t"
+ "ld.ab r21, [sp, 4] \n\t"
+ "ld.ab r20, [sp, 4] \n\t"
+ "ld.ab r19, [sp, 4] \n\t"
+ "ld.ab r18, [sp, 4] \n\t"
+ "ld.ab r17, [sp, 4] \n\t"
+ "ld.ab r16, [sp, 4] \n\t"
+ "ld.ab r15, [sp, 4] \n\t"
+ "ld.ab r14, [sp, 4] \n\t"
+ "ld.ab r13, [sp, 4] \n\t"
+
+ /* last (ret value) = prev : although for ARC it mov r0, r0 */
+ "mov %0, %3 \n\t"
+
+ /* FP/BLINK restore generated by gcc (standard func epilogue */
+
+ : "=r"(tmp)
+ : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev),
+ "n"(num_words_to_skip * 4)
+ : "blink"
+ );
+
+ return (struct task_struct *)tmp;
+}
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
new file mode 100644
index 000000000000..d8972345e4c2
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Aug 2009
+ * -Moved core context switch macro out of entry.S into this file.
+ * -This is the more "natural" hand written assembler
+ */
+
+#include <asm/entry.h> /* For the SAVE_* macros */
+#include <asm/asm-offsets.h>
+#include <asm/linkage.h>
+
+;################### Low Level Context Switch ##########################
+
+ .section .sched.text,"ax",@progbits
+ .align 4
+ .global __switch_to
+ .type __switch_to, @function
+__switch_to:
+
+ /* Save regs on kernel mode stack of task */
+ st.a blink, [sp, -4]
+ st.a fp, [sp, -4]
+ SAVE_CALLEE_SAVED_KERNEL
+
+ /* Save the now KSP in task->thread.ksp */
+ st.as sp, [r0, (TASK_THREAD + THREAD_KSP)/4]
+
+ /*
+ * Return last task in r0 (return reg)
+ * On ARC, Return reg = First Arg reg = r0.
+ * Since we already have last task in r0,
+ * don't need to do anything special to return it
+ */
+
+ /* hardware memory barrier */
+ sync
+
+ /*
+ * switch to new task, contained in r1
+ * Temp reg r3 is required to get the ptr to store val
+ */
+ SET_CURR_TASK_ON_CPU r1, r3
+
+ /* reload SP with kernel mode stack pointer in task->thread.ksp */
+ ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
+
+ /* restore the registers */
+ RESTORE_CALLEE_SAVED_KERNEL
+ ld.ab fp, [sp, 4]
+ ld.ab blink, [sp, 4]
+ j [blink]
+
+ARC_EXIT __switch_to
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
new file mode 100644
index 000000000000..bdee3a812052
--- /dev/null
+++ b/arch/arc/kernel/devtree.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Based on reduced version of METAG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <asm/prom.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+/* called from unflatten_device_tree() to bootstrap devicetree itself */
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
+
+/**
+ * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
+ * @dt: virtual address pointer to dt blob
+ *
+ * If a dtb was passed to the kernel, then use it to choose the correct
+ * machine_desc and to setup the system.
+ */
+struct machine_desc * __init setup_machine_fdt(void *dt)
+{
+ struct boot_param_header *devtree = dt;
+ struct machine_desc *mdesc = NULL, *mdesc_best = NULL;
+ unsigned int score, mdesc_score = ~1;
+ unsigned long dt_root;
+ const char *model, *compat;
+ void *clk;
+ char manufacturer[16];
+ unsigned long len;
+
+ /* check device tree validity */
+ if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+ return NULL;
+
+ initial_boot_params = devtree;
+ dt_root = of_get_flat_dt_root();
+
+ /*
+ * The kernel could be multi-platform enabled, thus could have many
+ * "baked-in" machine descriptors. Search thru all for the best
+ * "compatible" string match.
+ */
+ for_each_machine_desc(mdesc) {
+ score = of_flat_dt_match(dt_root, mdesc->dt_compat);
+ if (score > 0 && score < mdesc_score) {
+ mdesc_best = mdesc;
+ mdesc_score = score;
+ }
+ }
+ if (!mdesc_best) {
+ const char *prop;
+ long size;
+
+ pr_err("\n unrecognized device tree list:\n[ ");
+
+ prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+ if (prop) {
+ while (size > 0) {
+ printk("'%s' ", prop);
+ size -= strlen(prop) + 1;
+ prop += strlen(prop) + 1;
+ }
+ }
+ printk("]\n\n");
+
+ machine_halt();
+ }
+
+ /* compat = "<manufacturer>,<model>" */
+ compat = mdesc_best->dt_compat[0];
+
+ model = strchr(compat, ',');
+ if (model)
+ model++;
+
+ strlcpy(manufacturer, compat, model ? model - compat : strlen(compat));
+
+ pr_info("Board \"%s\" from %s (Manufacturer)\n", model, manufacturer);
+
+ /* Retrieve various information from the /chosen node */
+ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+
+ /* Initialize {size,address}-cells info */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+
+ /* Setup memory, calling early_init_dt_add_memory_arch */
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len);
+ if (clk)
+ arc_set_core_freq(of_read_ulong(clk, len/4));
+
+ return mdesc_best;
+}
+
+/*
+ * Copy the flattened DT out of .init since unflattening doesn't copy strings
+ * and the normal DT APIs refs them from orig flat DT
+ */
+void __init copy_devtree(void)
+{
+ void *alloc = early_init_dt_alloc_memory_arch(
+ be32_to_cpu(initial_boot_params->totalsize), 64);
+ if (alloc) {
+ memcpy(alloc, initial_boot_params,
+ be32_to_cpu(initial_boot_params->totalsize));
+ initial_boot_params = alloc;
+ }
+}
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
new file mode 100644
index 000000000000..2f390289a792
--- /dev/null
+++ b/arch/arc/kernel/disasm.c
@@ -0,0 +1,538 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <asm/disasm.h>
+#include <asm/uaccess.h>
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \
+ defined(CONFIG_KPROBES)
+
+/* disasm_instr: Analyses instruction at addr, stores
+ * findings in *state
+ */
+void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs)
+{
+ int fieldA = 0;
+ int fieldC = 0, fieldCisReg = 0;
+ uint16_t word1 = 0, word0 = 0;
+ int subopcode, is_linked, op_format;
+ uint16_t *ins_ptr;
+ uint16_t ins_buf[4];
+ int bytes_not_copied = 0;
+
+ memset(state, 0, sizeof(struct disasm_state));
+
+ /* This fetches the upper part of the 32 bit instruction
+ * in both the cases of Little Endian or Big Endian configurations. */
+ if (userspace) {
+ bytes_not_copied = copy_from_user(ins_buf,
+ (const void __user *) addr, 8);
+ if (bytes_not_copied > 6)
+ goto fault;
+ ins_ptr = ins_buf;
+ } else {
+ ins_ptr = (uint16_t *) addr;
+ }
+
+ word1 = *((uint16_t *)addr);
+
+ state->major_opcode = (word1 >> 11) & 0x1F;
+
+ /* Check if the instruction is 32 bit or 16 bit instruction */
+ if (state->major_opcode < 0x0B) {
+ if (bytes_not_copied > 4)
+ goto fault;
+ state->instr_len = 4;
+ word0 = *((uint16_t *)(addr+2));
+ state->words[0] = (word1 << 16) | word0;
+ } else {
+ state->instr_len = 2;
+ state->words[0] = word1;
+ }
+
+ /* Read the second word in case of limm */
+ word1 = *((uint16_t *)(addr + state->instr_len));
+ word0 = *((uint16_t *)(addr + state->instr_len + 2));
+ state->words[1] = (word1 << 16) | word0;
+
+ switch (state->major_opcode) {
+ case op_Bcc:
+ state->is_branch = 1;
+
+ /* unconditional branch s25, conditional branch s21 */
+ fieldA = (IS_BIT(state->words[0], 16)) ?
+ FIELD_s25(state->words[0]) :
+ FIELD_s21(state->words[0]);
+
+ state->delay_slot = IS_BIT(state->words[0], 5);
+ state->target = fieldA + (addr & ~0x3);
+ state->flow = direct_jump;
+ break;
+
+ case op_BLcc:
+ if (IS_BIT(state->words[0], 16)) {
+ /* Branch and Link*/
+ /* unconditional branch s25, conditional branch s21 */
+ fieldA = (IS_BIT(state->words[0], 17)) ?
+ (FIELD_s25(state->words[0]) & ~0x3) :
+ FIELD_s21(state->words[0]);
+
+ state->flow = direct_call;
+ } else {
+ /*Branch On Compare */
+ fieldA = FIELD_s9(state->words[0]) & ~0x3;
+ state->flow = direct_jump;
+ }
+
+ state->delay_slot = IS_BIT(state->words[0], 5);
+ state->target = fieldA + (addr & ~0x3);
+ state->is_branch = 1;
+ break;
+
+ case op_LD: /* LD<zz> a,[b,s9] */
+ state->write = 0;
+ state->di = BITS(state->words[0], 11, 11);
+ if (state->di)
+ break;
+ state->x = BITS(state->words[0], 6, 6);
+ state->zz = BITS(state->words[0], 7, 8);
+ state->aa = BITS(state->words[0], 9, 10);
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->instr_len += 4;
+ state->aa = 0;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->wb_reg, regs, cregs);
+ }
+ state->src2 = FIELD_s9(state->words[0]);
+ state->dest = FIELD_A(state->words[0]);
+ state->pref = (state->dest == REG_LIMM);
+ break;
+
+ case op_ST:
+ state->write = 1;
+ state->di = BITS(state->words[0], 5, 5);
+ if (state->di)
+ break;
+ state->aa = BITS(state->words[0], 3, 4);
+ state->zz = BITS(state->words[0], 1, 2);
+ state->src1 = FIELD_C(state->words[0]);
+ if (state->src1 == REG_LIMM) {
+ state->instr_len += 4;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->src1, regs, cregs);
+ }
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->aa = 0;
+ state->instr_len += 4;
+ state->src2 = state->words[1];
+ } else {
+ state->src2 = get_reg(state->wb_reg, regs, cregs);
+ }
+ state->src3 = FIELD_s9(state->words[0]);
+ break;
+
+ case op_MAJOR_4:
+ subopcode = MINOR_OPCODE(state->words[0]);
+ switch (subopcode) {
+ case 32: /* Jcc */
+ case 33: /* Jcc.D */
+ case 34: /* JLcc */
+ case 35: /* JLcc.D */
+ is_linked = 0;
+
+ if (subopcode == 33 || subopcode == 35)
+ state->delay_slot = 1;
+
+ if (subopcode == 34 || subopcode == 35)
+ is_linked = 1;
+
+ fieldCisReg = 0;
+ op_format = BITS(state->words[0], 22, 23);
+ if (op_format == 0 || ((op_format == 3) &&
+ (!IS_BIT(state->words[0], 5)))) {
+ fieldC = FIELD_C(state->words[0]);
+
+ if (fieldC == REG_LIMM) {
+ fieldC = state->words[1];
+ state->instr_len += 4;
+ } else {
+ fieldCisReg = 1;
+ }
+ } else if (op_format == 1 || ((op_format == 3)
+ && (IS_BIT(state->words[0], 5)))) {
+ fieldC = FIELD_C(state->words[0]);
+ } else {
+ /* op_format == 2 */
+ fieldC = FIELD_s12(state->words[0]);
+ }
+
+ if (!fieldCisReg) {
+ state->target = fieldC;
+ state->flow = is_linked ?
+ direct_call : direct_jump;
+ } else {
+ state->target = get_reg(fieldC, regs, cregs);
+ state->flow = is_linked ?
+ indirect_call : indirect_jump;
+ }
+ state->is_branch = 1;
+ break;
+
+ case 40: /* LPcc */
+ if (BITS(state->words[0], 22, 23) == 3) {
+ /* Conditional LPcc u7 */
+ fieldC = FIELD_C(state->words[0]);
+
+ fieldC = fieldC << 1;
+ fieldC += (addr & ~0x03);
+ state->is_branch = 1;
+ state->flow = direct_jump;
+ state->target = fieldC;
+ }
+ /* For Unconditional lp, next pc is the fall through
+ * which is updated */
+ break;
+
+ case 48 ... 55: /* LD a,[b,c] */
+ state->di = BITS(state->words[0], 15, 15);
+ if (state->di)
+ break;
+ state->x = BITS(state->words[0], 16, 16);
+ state->zz = BITS(state->words[0], 17, 18);
+ state->aa = BITS(state->words[0], 22, 23);
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->instr_len += 4;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->wb_reg, regs,
+ cregs);
+ }
+ state->src2 = FIELD_C(state->words[0]);
+ if (state->src2 == REG_LIMM) {
+ state->instr_len += 4;
+ state->src2 = state->words[1];
+ } else {
+ state->src2 = get_reg(state->src2, regs,
+ cregs);
+ }
+ state->dest = FIELD_A(state->words[0]);
+ if (state->dest == REG_LIMM)
+ state->pref = 1;
+ break;
+
+ case 10: /* MOV */
+ /* still need to check for limm to extract instr len */
+ /* MOV is special case because it only takes 2 args */
+ switch (BITS(state->words[0], 22, 23)) {
+ case 0: /* OP a,b,c */
+ if (FIELD_C(state->words[0]) == REG_LIMM)
+ state->instr_len += 4;
+ break;
+ case 1: /* OP a,b,u6 */
+ break;
+ case 2: /* OP b,b,s12 */
+ break;
+ case 3: /* OP.cc b,b,c/u6 */
+ if ((!IS_BIT(state->words[0], 5)) &&
+ (FIELD_C(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+ }
+ break;
+
+
+ default:
+ /* Not a Load, Jump or Loop instruction */
+ /* still need to check for limm to extract instr len */
+ switch (BITS(state->words[0], 22, 23)) {
+ case 0: /* OP a,b,c */
+ if ((FIELD_B(state->words[0]) == REG_LIMM) ||
+ (FIELD_C(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+ case 1: /* OP a,b,u6 */
+ break;
+ case 2: /* OP b,b,s12 */
+ break;
+ case 3: /* OP.cc b,b,c/u6 */
+ if ((!IS_BIT(state->words[0], 5)) &&
+ ((FIELD_B(state->words[0]) == REG_LIMM) ||
+ (FIELD_C(state->words[0]) == REG_LIMM)))
+ state->instr_len += 4;
+ break;
+ }
+ break;
+ }
+ break;
+
+ /* 16 Bit Instructions */
+ case op_LD_ADD: /* LD_S|LDB_S|LDW_S a,[b,c] */
+ state->zz = BITS(state->words[0], 3, 4);
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->dest = FIELD_S_A(state->words[0]);
+ break;
+
+ case op_ADD_MOV_CMP:
+ /* check for limm, ignore mov_s h,b (== mov_s 0,b) */
+ if ((BITS(state->words[0], 3, 4) < 3) &&
+ (FIELD_S_H(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+
+ case op_S:
+ subopcode = BITS(state->words[0], 5, 7);
+ switch (subopcode) {
+ case 0: /* j_s */
+ case 1: /* j_s.d */
+ case 2: /* jl_s */
+ case 3: /* jl_s.d */
+ state->target = get_reg(FIELD_S_B(state->words[0]),
+ regs, cregs);
+ state->delay_slot = subopcode & 1;
+ state->flow = (subopcode >= 2) ?
+ direct_call : indirect_jump;
+ break;
+ case 7:
+ switch (BITS(state->words[0], 8, 10)) {
+ case 4: /* jeq_s [blink] */
+ case 5: /* jne_s [blink] */
+ case 6: /* j_s [blink] */
+ case 7: /* j_s.d [blink] */
+ state->delay_slot = (subopcode == 7);
+ state->flow = indirect_jump;
+ state->target = get_reg(31, regs, cregs);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+
+ case op_LD_S: /* LD_S c, [b, u7] */
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = FIELD_S_u7(state->words[0]);
+ state->dest = FIELD_S_C(state->words[0]);
+ break;
+
+ case op_LDB_S:
+ case op_STB_S:
+ /* no further handling required as byte accesses should not
+ * cause an unaligned access exception */
+ state->zz = 1;
+ break;
+
+ case op_LDWX_S: /* LDWX_S c, [b, u6] */
+ state->x = 1;
+ /* intentional fall-through */
+
+ case op_LDW_S: /* LDW_S c, [b, u6] */
+ state->zz = 2;
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = FIELD_S_u6(state->words[0]);
+ state->dest = FIELD_S_C(state->words[0]);
+ break;
+
+ case op_ST_S: /* ST_S c, [b, u7] */
+ state->write = 1;
+ state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src3 = FIELD_S_u7(state->words[0]);
+ break;
+
+ case op_STW_S: /* STW_S c,[b,u6] */
+ state->write = 1;
+ state->zz = 2;
+ state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src3 = FIELD_S_u6(state->words[0]);
+ break;
+
+ case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
+ /* note: we are ignoring possibility of:
+ * ADD_S, SUB_S, PUSH_S, POP_S as these should not
+ * cause unaliged exception anyway */
+ state->write = BITS(state->words[0], 6, 6);
+ state->zz = BITS(state->words[0], 5, 5);
+ if (state->zz)
+ break; /* byte accesses should not come here */
+ if (!state->write) {
+ state->src1 = get_reg(28, regs, cregs);
+ state->src2 = FIELD_S_u7(state->words[0]);
+ state->dest = FIELD_S_B(state->words[0]);
+ } else {
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs,
+ cregs);
+ state->src2 = get_reg(28, regs, cregs);
+ state->src3 = FIELD_S_u7(state->words[0]);
+ }
+ break;
+
+ case op_GP: /* LD_S|LDB_S|LDW_S r0,[gp,s11/s9/s10] */
+ /* note: ADD_S r0, gp, s11 is ignored */
+ state->zz = BITS(state->words[0], 9, 10);
+ state->src1 = get_reg(26, regs, cregs);
+ state->src2 = state->zz ? FIELD_S_s10(state->words[0]) :
+ FIELD_S_s11(state->words[0]);
+ state->dest = 0;
+ break;
+
+ case op_Pcl: /* LD_S b,[pcl,u10] */
+ state->src1 = regs->ret & ~3;
+ state->src2 = FIELD_S_u10(state->words[0]);
+ state->dest = FIELD_S_B(state->words[0]);
+ break;
+
+ case op_BR_S:
+ state->target = FIELD_S_s8(state->words[0]) + (addr & ~0x03);
+ state->flow = direct_jump;
+ state->is_branch = 1;
+ break;
+
+ case op_B_S:
+ fieldA = (BITS(state->words[0], 9, 10) == 3) ?
+ FIELD_S_s7(state->words[0]) :
+ FIELD_S_s10(state->words[0]);
+ state->target = fieldA + (addr & ~0x03);
+ state->flow = direct_jump;
+ state->is_branch = 1;
+ break;
+
+ case op_BL_S:
+ state->target = FIELD_S_s13(state->words[0]) + (addr & ~0x03);
+ state->flow = direct_call;
+ state->is_branch = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ if (bytes_not_copied <= (8 - state->instr_len))
+ return;
+
+fault: state->fault = 1;
+}
+
+long __kprobes get_reg(int reg, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ long *p;
+
+ if (reg <= 12) {
+ p = &regs->r0;
+ return p[-reg];
+ }
+
+ if (cregs && (reg <= 25)) {
+ p = &cregs->r13;
+ return p[13-reg];
+ }
+
+ if (reg == 26)
+ return regs->r26;
+ if (reg == 27)
+ return regs->fp;
+ if (reg == 28)
+ return regs->sp;
+ if (reg == 31)
+ return regs->blink;
+
+ return 0;
+}
+
+void __kprobes set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ long *p;
+
+ switch (reg) {
+ case 0 ... 12:
+ p = &regs->r0;
+ p[-reg] = val;
+ break;
+ case 13 ... 25:
+ if (cregs) {
+ p = &cregs->r13;
+ p[13-reg] = val;
+ }
+ break;
+ case 26:
+ regs->r26 = val;
+ break;
+ case 27:
+ regs->fp = val;
+ break;
+ case 28:
+ regs->sp = val;
+ break;
+ case 31:
+ regs->blink = val;
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Disassembles the insn at @pc and sets @next_pc to next PC (which could be
+ * @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns).
+ *
+ * If @pc is a branch
+ * -@tgt_if_br is set to branch target.
+ * -If branch has delay slot, @next_pc updated with actual next PC.
+ */
+int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
+ struct callee_regs *cregs,
+ unsigned long *next_pc, unsigned long *tgt_if_br)
+{
+ struct disasm_state instr;
+
+ memset(&instr, 0, sizeof(struct disasm_state));
+ disasm_instr(pc, &instr, 0, regs, cregs);
+
+ *next_pc = pc + instr.instr_len;
+
+ /* Instruction with possible two targets branch, jump and loop */
+ if (instr.is_branch)
+ *tgt_if_br = instr.target;
+
+ /* For the instructions with delay slots, the fall through is the
+ * instruction following the instruction in delay slot.
+ */
+ if (instr.delay_slot) {
+ struct disasm_state instr_d;
+
+ disasm_instr(*next_pc, &instr_d, 0, regs, cregs);
+
+ *next_pc += instr_d.instr_len;
+ }
+
+ /* Zero Overhead Loop - end of the loop */
+ if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end)
+ && (regs->lp_count > 1)) {
+ *next_pc = regs->lp_start;
+ }
+
+ return instr.is_branch;
+}
+
+#endif /* CONFIG_KGDB || CONFIG_MISALIGN_ACCESS || CONFIG_KPROBES */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
new file mode 100644
index 000000000000..ef6800ba2f03
--- /dev/null
+++ b/arch/arc/kernel/entry.S
@@ -0,0 +1,839 @@
+/*
+ * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Userspace unaligned access emulation
+ *
+ * vineetg: Feb 2011 (ptrace low level code fixes)
+ * -traced syscall return code (r0) was not saved into pt_regs for restoring
+ * into user reg-file when traded task rets to user space.
+ * -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
+ * were not invoking post-syscall trace hook (jumping directly into
+ * ret_from_system_call)
+ *
+ * vineetg: Nov 2010:
+ * -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
+ * -To maintain the slot size of 8 bytes/vector, added nop, which is
+ * not executed at runtime.
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
+ * -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't
+ * need ptregs anymore
+ *
+ * Vineetg: Oct 2009
+ * -In a rare scenario, Process gets a Priv-V exception and gets scheduled
+ * out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains
+ * active (AE bit enabled). This causes a double fault for a subseq valid
+ * exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
+ * Instr Error could also cause similar scenario, so same there as well.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
+ * setting it and not clearing it clears ZOL context
+ *
+ * Vineetg: May 16th, 2008
+ * - r25 now contains the Current Task when in kernel
+ *
+ * Vineetg: Dec 22, 2007
+ * Minor Surgery of Low Level ISR to make it SMP safe
+ * - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
+ * - _current_task is made an array of NR_CPUS
+ * - Access of _current_task wrapped inside a macro so that if hardware
+ * team agrees for a dedicated reg, no other code is touched
+ *
+ * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
+ */
+
+/*------------------------------------------------------------------
+ * Function ABI
+ *------------------------------------------------------------------
+ *
+ * Arguments r0 - r7
+ * Caller Saved Registers r0 - r12
+ * Callee Saved Registers r13- r25
+ * Global Pointer (gp) r26
+ * Frame Pointer (fp) r27
+ * Stack Pointer (sp) r28
+ * Interrupt link register (ilink1) r29
+ * Interrupt link register (ilink2) r30
+ * Branch link register (blink) r31
+ *------------------------------------------------------------------
+ */
+
+ .cpu A7
+
+;############################ Vector Table #################################
+
+.macro VECTOR lbl
+#if 1 /* Just in case, build breaks */
+ j \lbl
+#else
+ b \lbl
+ nop
+#endif
+.endm
+
+ .section .vector, "ax",@progbits
+ .align 4
+
+/* Each entry in the vector table must occupy 2 words. Since it is a jump
+ * across sections (.vector to .text) we are gauranteed that 'j somewhere'
+ * will use the 'j limm' form of the intrsuction as long as somewhere is in
+ * a section other than .vector.
+ */
+
+; ********* Critical System Events **********************
+VECTOR res_service ; 0x0, Restart Vector (0x0)
+VECTOR mem_service ; 0x8, Mem exception (0x1)
+VECTOR instr_service ; 0x10, Instrn Error (0x2)
+
+; ******************** Device ISRs **********************
+#ifdef CONFIG_ARC_IRQ3_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+VECTOR handle_interrupt_level1
+
+#ifdef CONFIG_ARC_IRQ5_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+#ifdef CONFIG_ARC_IRQ6_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+.rept 25
+VECTOR handle_interrupt_level1 ; Other devices
+.endr
+
+/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
+
+; ******************** Exceptions **********************
+VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20)
+VECTOR EV_TLBMissI ; 0x108, Intruction TLB miss (0x21)
+VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22)
+VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23)
+ ; or Misaligned Access
+VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24)
+VECTOR EV_Trap ; 0x128, Trap exception (0x25)
+VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26)
+
+.rept 24
+VECTOR reserved ; Reserved Exceptions
+.endr
+
+#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
+#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
+#include <asm/errno.h>
+#include <asm/arcregs.h>
+#include <asm/irqflags.h>
+
+;##################### Scratch Mem for IRQ stack switching #############
+
+ARCFP_DATA int1_saved_reg
+ .align 32
+ .type int1_saved_reg, @object
+ .size int1_saved_reg, 4
+int1_saved_reg:
+ .zero 4
+
+/* Each Interrupt level needs it's own scratch */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ARCFP_DATA int2_saved_reg
+ .type int2_saved_reg, @object
+ .size int2_saved_reg, 4
+int2_saved_reg:
+ .zero 4
+
+#endif
+
+; ---------------------------------------------
+ .section .text, "ax",@progbits
+
+res_service: ; processor restart
+ flag 0x1 ; not implemented
+ nop
+ nop
+
+reserved: ; processor restart
+ rtie ; jump to processor initializations
+
+;##################### Interrupt Handling ##############################
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+; ---------------------------------------------
+; Level 2 ISR: Can interrupt a Level 1 ISR
+; ---------------------------------------------
+ARC_ENTRY handle_interrupt_level2
+
+ ; TODO-vineetg for SMP this wont work
+ ; free up r9 as scratchpad
+ st r9, [@int2_saved_reg]
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [status32_l2]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_INT2
+
+ ;------------------------------------------------------
+ ; if L2 IRQ interrupted a L1 ISR, disable preemption
+ ;------------------------------------------------------
+
+ ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
+ bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
+
+ ; A1 is set in status32_l2
+ ; bump thread_info->preempt_count (Disable preemption)
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+ add r9, r9, 1
+ st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+1:
+ ;------------------------------------------------------
+ ; setup params for Linux common ISR and invoke it
+ ;------------------------------------------------------
+ lr r0, [icause2]
+ and r0, r0, 0x1f
+
+ bl.d @arch_do_IRQ
+ mov r1, sp
+
+ mov r8,0x2
+ sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
+
+ b ret_from_exception
+
+ARC_EXIT handle_interrupt_level2
+
+#endif
+
+; ---------------------------------------------
+; Level 1 ISR
+; ---------------------------------------------
+ARC_ENTRY handle_interrupt_level1
+
+ /* free up r9 as scratchpad */
+#ifdef CONFIG_SMP
+ sr r9, [ARC_REG_SCRATCH_DATA0]
+#else
+ st r9, [@int1_saved_reg]
+#endif
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [status32_l1]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_INT1
+
+ lr r0, [icause1]
+ and r0, r0, 0x1f
+
+ bl.d @arch_do_IRQ
+ mov r1, sp
+
+ mov r8,0x1
+ sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
+
+ b ret_from_exception
+ARC_EXIT handle_interrupt_level1
+
+;################### Non TLB Exception Handling #############################
+
+; ---------------------------------------------
+; Instruction Error Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY instr_service
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+
+ mov r2, sp
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_insterror_or_kprobe
+ b ret_from_exception
+ARC_EXIT instr_service
+
+; ---------------------------------------------
+; Memory Error Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY mem_service
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+ bl do_memory_error
+ b ret_from_exception
+ARC_EXIT mem_service
+
+; ---------------------------------------------
+; Machine Check Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY EV_MachineCheck
+
+ EXCPN_PROLOG_FREEUP_REG r9
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+
+ brne r0, 0x200100, 1f
+ bl do_tlb_overlap_fault
+ b ret_from_exception
+
+1:
+ ; DEAD END: can't do much, display Regs and HALT
+ SAVE_CALLEE_SAVED_USER
+
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ j do_machine_check_fault
+
+ARC_EXIT EV_MachineCheck
+
+; ---------------------------------------------
+; Protection Violation Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY EV_TLBProtV
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ ;Which mode (user/kernel) was the system in when Exception occured
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ ;---------(3) Save some more regs-----------------
+ ; vineetg: Mar 6th: Random Seg Fault issue #1
+ ; ecr and efa were not saved in case an Intr sneaks in
+ ; after fake rtie
+ ;
+ lr r3, [ecr]
+ lr r4, [efa]
+
+ ; --------(4) Return from CPU Exception Mode ---------
+ ; Fake a rtie, but rtie to next label
+ ; That way, subsequently, do_page_fault ( ) executes in pure kernel
+ ; mode with further Exceptions enabled
+
+ FAKE_RET_FROM_EXCPN r9
+
+ ;------ (5) Type of Protection Violation? ----------
+ ;
+ ; ProtV Hardware Exception is triggered for Access Faults of 2 types
+ ; -Access Violaton (WRITE to READ ONLY Page) - for linux COW
+ ; -Unaligned Access (READ/WRITE on odd boundary)
+ ;
+ cmp r3, 0x230400 ; Misaligned data access ?
+ beq 4f
+
+ ;========= (6a) Access Violation Processing ========
+ cmp r3, 0x230100
+ mov r1, 0x0 ; if LD exception ? write = 0
+ mov.ne r1, 0x1 ; else write = 1
+
+ mov r2, r4 ; faulting address
+ mov r0, sp ; pt_regs
+ bl do_page_fault
+ b ret_from_exception
+
+ ;========== (6b) Non aligned access ============
+4:
+ mov r0, r3 ; cause code
+ mov r1, r4 ; faulting address
+ mov r2, sp ; pt_regs
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+ SAVE_CALLEE_SAVED_USER
+ mov r3, sp ; callee_regs
+#endif
+
+ bl do_misaligned_access
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+ DISCARD_CALLEE_SAVED_USER
+#endif
+
+ b ret_from_exception
+
+ARC_EXIT EV_TLBProtV
+
+; ---------------------------------------------
+; Privilege Violation Exception Handler
+; ---------------------------------------------
+ARC_ENTRY EV_PrivilegeV
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_privilege_fault
+ b ret_from_exception
+ARC_EXIT EV_PrivilegeV
+
+; ---------------------------------------------
+; Extension Instruction Exception Handler
+; ---------------------------------------------
+ARC_ENTRY EV_Extension
+
+ EXCPN_PROLOG_FREEUP_REG r9
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+ bl do_extension_fault
+ b ret_from_exception
+ARC_EXIT EV_Extension
+
+;######################### System Call Tracing #########################
+
+tracesys:
+ ; save EFA in case tracer wants the PC of traced task
+ ; using ERET won't work since next-PC has already committed
+ lr r12, [efa]
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
+ st r12, [r11, THREAD_FAULT_ADDR]
+
+ ; PRE Sys Call Ptrace hook
+ mov r0, sp ; pt_regs needed
+ bl @syscall_trace_entry
+
+ ; Tracing code now returns the syscall num (orig or modif)
+ mov r8, r0
+
+ ; Do the Sys Call as we normally would.
+ ; Validate the Sys Call number
+ cmp r8, NR_syscalls
+ mov.hi r0, -ENOSYS
+ bhi tracesys_exit
+
+ ; Restore the sys-call args. Mere invocation of the hook abv could have
+ ; clobbered them (since they are in scratch regs). The tracer could also
+ ; have deliberately changed the syscall args: r0-r7
+ ld r0, [sp, PT_r0]
+ ld r1, [sp, PT_r1]
+ ld r2, [sp, PT_r2]
+ ld r3, [sp, PT_r3]
+ ld r4, [sp, PT_r4]
+ ld r5, [sp, PT_r5]
+ ld r6, [sp, PT_r6]
+ ld r7, [sp, PT_r7]
+ ld.as r9, [sys_call_table, r8]
+ jl [r9] ; Entry into Sys Call Handler
+
+tracesys_exit:
+ st r0, [sp, PT_r0] ; sys call return value in pt_regs
+
+ ;POST Sys Call Ptrace Hook
+ bl @syscall_trace_exit
+ b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
+ ; we'd done before calling post hook above
+
+;################### Break Point TRAP ##########################
+
+ ; ======= (5b) Trap is due to Break-Point =========
+
+trap_with_param:
+
+ ; stop_pc info by gdb needs this info
+ stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
+
+ mov r0, r12
+ lr r1, [efa]
+ mov r2, sp
+
+ ; Now that we have read EFA, its safe to do "fake" rtie
+ ; and get out of CPU exception mode
+ FAKE_RET_FROM_EXCPN r11
+
+ ; Save callee regs in case gdb wants to have a look
+ ; SP will grow up by size of CALLEE Reg-File
+ ; NOTE: clobbers r12
+ SAVE_CALLEE_SAVED_USER
+
+ ; save location of saved Callee Regs @ thread_struct->pc
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ ; Call the trap handler
+ bl do_non_swi_trap
+
+ ; unwind stack to discard Callee saved Regs
+ DISCARD_CALLEE_SAVED_USER
+
+ b ret_from_exception
+
+;##################### Trap Handling ##############################
+;
+; EV_Trap caused by TRAP_S and TRAP0 instructions.
+;------------------------------------------------------------------
+; (1) System Calls
+; :parameters in r0-r7.
+; :r8 has the system call number
+; (2) Break Points
+;------------------------------------------------------------------
+
+ARC_ENTRY EV_Trap
+
+ ; Need at least 1 reg to code the early exception prolog
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_TRAP
+
+ ;------- (4) What caused the Trap --------------
+ lr r12, [ecr]
+ and.f 0, r12, ECR_PARAM_MASK
+ bnz trap_with_param
+
+ ; ======= (5a) Trap is due to System Call ========
+
+ ; Before doing anything, return from CPU Exception Mode
+ FAKE_RET_FROM_EXCPN r11
+
+ ; If syscall tracing ongoing, invoke pre-pos-hooks
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys ; this never comes back
+
+ ;============ This is normal System Call case ==========
+ ; Sys-call num shd not exceed the total system calls avail
+ cmp r8, NR_syscalls
+ mov.hi r0, -ENOSYS
+ bhi ret_from_system_call
+
+ ; Offset into the syscall_table and call handler
+ ld.as r9,[sys_call_table, r8]
+ jl [r9] ; Entry into Sys Call Handler
+
+ ; fall through to ret_from_system_call
+ARC_EXIT EV_Trap
+
+ARC_ENTRY ret_from_system_call
+
+ st r0, [sp, PT_r0] ; sys call return value in pt_regs
+
+ ; fall through yet again to ret_from_exception
+
+;############# Return from Intr/Excp/Trap (Linux Specifics) ##############
+;
+; If ret to user mode do we need to handle signals, schedule() et al.
+
+ARC_ENTRY ret_from_exception
+
+ ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
+ ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
+
+#ifdef CONFIG_PREEMPT
+ bbit0 r8, STATUS_U_BIT, resume_kernel_mode
+#else
+ bbit0 r8, STATUS_U_BIT, restore_regs
+#endif
+
+ ; Before returning to User mode check-for-and-complete any pending work
+ ; such as rescheduling/signal-delivery etc.
+resume_user_mode_begin:
+
+ ; Disable IRQs to ensures that chk for pending work itself is atomic
+ ; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an
+ ; interim IRQ).
+ IRQ_DISABLE r10
+
+ ; Fast Path return to user mode if no pending work
+ GET_CURR_THR_INFO_FLAGS r9
+ and.f 0, r9, _TIF_WORK_MASK
+ bz restore_regs
+
+ ; --- (Slow Path #1) task preemption ---
+ bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals
+ mov blink, resume_user_mode_begin ; tail-call to U mode ret chks
+ b @schedule ; BTST+Bnz causes relo error in link
+
+.Lchk_pend_signals:
+ IRQ_ENABLE r10
+
+ ; --- (Slow Path #2) pending signal ---
+ mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
+
+ bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume
+
+ ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
+ ; in pt_reg since the "C" ABI (kernel code) will automatically
+ ; save/restore callee-saved regs.
+ ;
+ ; However, here we need to explicitly save callee regs because
+ ; (i) If this signal causes coredump - full regfile needed
+ ; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus
+ ; tracer might call PEEKUSR(CALLEE reg)
+ ;
+ ; NOTE: SP will grow up by size of CALLEE Reg-File
+ SAVE_CALLEE_SAVED_USER ; clobbers r12
+
+ ; save location of saved Callee Regs @ thread_struct->callee
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ bl @do_signal
+
+ ; Ideally we want to discard the Callee reg above, however if this was
+ ; a tracing signal, tracer could have done a POKEUSR(CALLEE reg)
+ RESTORE_CALLEE_SAVED_USER
+
+ b resume_user_mode_begin ; loop back to start of U mode ret
+
+ ; --- (Slow Path #3) notify_resume ---
+.Lchk_notify_resume:
+ btst r9, TIF_NOTIFY_RESUME
+ blnz @do_notify_resume
+ b resume_user_mode_begin ; unconditionally back to U mode ret chks
+ ; for single exit point from this block
+
+#ifdef CONFIG_PREEMPT
+
+resume_kernel_mode:
+
+ ; Can't preempt if preemption disabled
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
+ brne r8, 0, restore_regs
+
+ ; check if this task's NEED_RESCHED flag set
+ ld r9, [r10, THREAD_INFO_FLAGS]
+ bbit0 r9, TIF_NEED_RESCHED, restore_regs
+
+ IRQ_DISABLE r9
+
+ ; Invoke PREEMPTION
+ bl preempt_schedule_irq
+
+ ; preempt_schedule_irq() always returns with IRQ disabled
+#endif
+
+ ; fall through
+
+;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
+;
+; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
+; IRQ shd definitely not happen between now and rtie
+
+restore_regs :
+
+ ; Disable Interrupts while restoring reg-file back
+ ; XXX can this be optimised out
+ IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ; Restore User R25
+ ; Earlier this used to be only for returning to user mode
+ ; However with 2 levels of IRQ this can also happen even if
+ ; in kernel mode
+ ld r9, [sp, PT_sp]
+ brhs r9, VMALLOC_START, 8f
+ RESTORE_USER_R25
+8:
+#endif
+
+ ; Restore REG File. In case multiple Events outstanding,
+ ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
+ ; Note that we use realtime STATUS32 (not pt_regs->status32) to
+ ; decide that.
+
+ ; if Returning from Exception
+ bbit0 r10, STATUS_AE_BIT, not_exception
+ RESTORE_ALL_SYS
+ rtie
+
+ ; Not Exception so maybe Interrupts (Level 1 or 2)
+
+not_exception:
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ bbit0 r10, STATUS_A2_BIT, not_level2_interrupt
+
+ ;------------------------------------------------------------------
+ ; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier
+ ; so that sched doesnt move to new task, causing L1 to be delayed
+ ; undeterministically. Now that we've achieved that, lets reset
+ ; things to what they were, before returning from L2 context
+ ;----------------------------------------------------------------
+
+ ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
+ brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
+
+ ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
+ bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal
+
+ ; A1 is set in status32_l2
+ ; decrement thread_info->preempt_count (re-enable preemption)
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+ ; paranoid check, given A1 was active when A2 happened, preempt count
+ ; must not be 0 beccause we would have incremented it.
+ ; If this does happen we simply HALT as it means a BUG !!!
+ cmp r9, 0
+ bnz 2f
+ flag 1
+
+2:
+ sub r9, r9, 1
+ st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+149:
+ ;return from level 2
+ RESTORE_ALL_INT2
+debug_marker_l2:
+ rtie
+
+not_level2_interrupt:
+
+#endif
+
+ bbit0 r10, STATUS_A1_BIT, not_level1_interrupt
+
+ ;return from level 1
+
+ RESTORE_ALL_INT1
+debug_marker_l1:
+ rtie
+
+not_level1_interrupt:
+
+ ;this case is for syscalls or Exceptions (with fake rtie)
+
+ RESTORE_ALL_SYS
+debug_marker_syscall:
+ rtie
+
+ARC_EXIT ret_from_exception
+
+ARC_ENTRY ret_from_fork
+ ; when the forked child comes here from the __switch_to function
+ ; r0 has the last task pointer.
+ ; put last task in scheduler queue
+ bl @schedule_tail
+
+ ; If kernel thread, jump to it's entry-point
+ ld r9, [sp, PT_status32]
+ brne r9, 0, 1f
+
+ jl.d [r14]
+ mov r0, r13 ; arg to payload
+
+1:
+ ; special case of kernel_thread entry point returning back due to
+ ; kernel_execve() - pretend return from syscall to ret to userland
+ b ret_from_exception
+ARC_EXIT ret_from_fork
+
+;################### Special Sys Call Wrappers ##########################
+
+; TBD: call do_fork directly from here
+ARC_ENTRY sys_fork_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_fork
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_fork_wrapper
+
+ARC_ENTRY sys_vfork_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_vfork
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_vfork_wrapper
+
+ARC_ENTRY sys_clone_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_clone
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_clone_wrapper
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+; Workaround for bug 94179 (STAR ):
+; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder
+; section (.debug_frame) as loadable. So we force it here.
+; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag)
+; would not work after a clean build due to kernel build system dependencies.
+.section .debug_frame, "wa",@progbits
+#endif
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
new file mode 100644
index 000000000000..f352e512cbd1
--- /dev/null
+++ b/arch/arc/kernel/fpu.c
@@ -0,0 +1,55 @@
+/*
+ * fpu.c - save/restore of Floating Point Unit Registers on task switch
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <asm/switch_to.h>
+
+/*
+ * To save/restore FPU regs, simplest scheme would use LR/SR insns.
+ * However since SR serializes the pipeline, an alternate "hack" can be used
+ * which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
+ *
+ * Store to 64bit dpfp1 reg from a pair of core regs:
+ * dexcl1 0, r1, r0 ; where r1:r0 is the 64 bit val
+ *
+ * Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
+ * mov_s r3, 0
+ * daddh11 r1, r3, r3 ; get "hi" into r1 (dpfp1 unchanged)
+ * dexcl1 r0, r1, r3 ; get "low" into r0 (dpfp1 low clobbered)
+ * dexcl1 0, r1, r0 ; restore dpfp1 to orig value
+ *
+ * However we can tweak the read, so that read-out of outgoing task's FPU regs
+ * and write of incoming task's regs happen in one shot. So all the work is
+ * done before context switch
+ */
+
+void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
+{
+ unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
+ unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
+
+ const unsigned int zero = 0;
+
+ __asm__ __volatile__(
+ "daddh11 %0, %2, %2\n"
+ "dexcl1 %1, %3, %4\n"
+ : "=&r" (*(saveto + 1)), /* early clobber must here */
+ "=&r" (*(saveto))
+ : "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
+ );
+
+ __asm__ __volatile__(
+ "daddh22 %0, %2, %2\n"
+ "dexcl2 %1, %3, %4\n"
+ : "=&r"(*(saveto + 3)), /* early clobber must here */
+ "=&r"(*(saveto + 2))
+ : "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
+ );
+}
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
new file mode 100644
index 000000000000..006dec3fc353
--- /dev/null
+++ b/arch/arc/kernel/head.S
@@ -0,0 +1,111 @@
+/*
+ * ARC CPU startup Code
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Dec 2007
+ * -Check if we are running on Simulator or on real hardware
+ * to skip certain things during boot on simulator
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/entry.h>
+#include <linux/linkage.h>
+#include <asm/arcregs.h>
+
+ .cpu A7
+
+ .section .init.text, "ax",@progbits
+ .type stext, @function
+ .globl stext
+stext:
+ ;-------------------------------------------------------------------
+ ; Don't clobber r0-r4 yet. It might have bootloader provided info
+ ;-------------------------------------------------------------------
+
+#ifdef CONFIG_SMP
+ ; Only Boot (Master) proceeds. Others wait in platform dependent way
+ ; IDENTITY Reg [ 3 2 1 0 ]
+ ; (cpu-id) ^^^ => Zero for UP ARC700
+ ; => #Core-ID if SMP (Master 0)
+ GET_CPU_ID r5
+ cmp r5, 0
+ jnz arc_platform_smp_wait_to_boot
+#endif
+ ; Clear BSS before updating any globals
+ ; XXX: use ZOL here
+ mov r5, __bss_start
+ mov r6, __bss_stop
+1:
+ st.ab 0, [r5,4]
+ brlt r5, r6, 1b
+
+#ifdef CONFIG_CMDLINE_UBOOT
+ ; support for bootloader provided cmdline
+ ; If cmdline passed by u-boot, then
+ ; r0 = 1 (because ATAGS parsing, now retired, used to use 0)
+ ; r1 = magic number (board identity)
+ ; r2 = addr of cmdline string (somewhere in memory/flash)
+
+ brne r0, 1, .Lother_bootup_chores ; u-boot didn't pass cmdline
+ breq r2, 0, .Lother_bootup_chores ; or cmdline is NULL
+
+ mov r5, @command_line
+1:
+ ldb.ab r6, [r2, 1]
+ breq r6, 0, .Lother_bootup_chores
+ b.d 1b
+ stb.ab r6, [r5, 1]
+#endif
+
+.Lother_bootup_chores:
+
+ ; Identify if running on ISS vs Silicon
+ ; IDENTITY Reg [ 3 2 1 0 ]
+ ; (chip-id) ^^^^^ ==> 0xffff for ISS
+ lr r0, [identity]
+ lsr r3, r0, 16
+ cmp r3, 0xffff
+ mov.z r4, 0
+ mov.nz r4, 1
+ st r4, [@running_on_hw]
+
+ ; setup "current" tsk and optionally cache it in dedicated r25
+ mov r9, @init_task
+ SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch
+
+ ; setup stack (fp, sp)
+ mov fp, 0
+
+ ; tsk->thread_info is really a PAGE, whose bottom hoists stack
+ GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
+
+ j start_kernel ; "C" entry point
+
+#ifdef CONFIG_SMP
+;----------------------------------------------------------------
+; First lines of code run by secondary before jumping to 'C'
+;----------------------------------------------------------------
+ .section .init.text, "ax",@progbits
+ .type first_lines_of_secondary, @function
+ .globl first_lines_of_secondary
+
+first_lines_of_secondary:
+
+ ; setup per-cpu idle task as "current" on this CPU
+ ld r0, [@secondary_idle_tsk]
+ SET_CURR_TASK_ON_CPU r0, r1
+
+ ; setup stack (fp, sp)
+ mov fp, 0
+
+ ; set it's stack base to tsk->thread_info bottom
+ GET_TSK_STACK_BASE r0, sp
+
+ j start_kernel_secondary
+
+#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
new file mode 100644
index 000000000000..551c10dff481
--- /dev/null
+++ b/arch/arc/kernel/irq.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <asm/sections.h>
+#include <asm/irq.h>
+#include <asm/mach_desc.h>
+
+/*
+ * Early Hardware specific Interrupt setup
+ * -Called very early (start_kernel -> setup_arch -> setup_processor)
+ * -Platform Independent (must for any ARC700)
+ * -Needed for each CPU (hence not foldable into init_IRQ)
+ *
+ * what it does ?
+ * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
+ * -Disable all IRQs (on CPU side)
+ * -Optionally, setup the High priority Interrupts as Level 2 IRQs
+ */
+void __init arc_init_IRQ(void)
+{
+ int level_mask = 0;
+
+ write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
+
+ /* Disable all IRQs: enable them as devices request */
+ write_aux_reg(AUX_IENABLE, 0);
+
+ /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
+#ifdef CONFIG_ARC_IRQ3_LV2
+ level_mask |= (1 << 3);
+#endif
+#ifdef CONFIG_ARC_IRQ5_LV2
+ level_mask |= (1 << 5);
+#endif
+#ifdef CONFIG_ARC_IRQ6_LV2
+ level_mask |= (1 << 6);
+#endif
+
+ if (level_mask) {
+ pr_info("Level-2 interrupts bitset %x\n", level_mask);
+ write_aux_reg(AUX_IRQ_LEV, level_mask);
+ }
+}
+
+/*
+ * ARC700 core includes a simple on-chip intc supporting
+ * -per IRQ enable/disable
+ * -2 levels of interrupts (high/low)
+ * -all interrupts being level triggered
+ *
+ * To reduce platform code, we assume all IRQs directly hooked-up into intc.
+ * Platforms with external intc, hence cascaded IRQs, are free to over-ride
+ * below, per IRQ.
+ */
+
+static void arc_mask_irq(struct irq_data *data)
+{
+ arch_mask_irq(data->irq);
+}
+
+static void arc_unmask_irq(struct irq_data *data)
+{
+ arch_unmask_irq(data->irq);
+}
+
+static struct irq_chip onchip_intc = {
+ .name = "ARC In-core Intc",
+ .irq_mask = arc_mask_irq,
+ .irq_unmask = arc_unmask_irq,
+};
+
+static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (irq == TIMER0_IRQ)
+ irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
+ else
+ irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops arc_intc_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = arc_intc_domain_map,
+};
+
+static struct irq_domain *root_domain;
+
+void __init init_onchip_IRQ(void)
+{
+ struct device_node *intc = NULL;
+
+ intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc");
+ if(!intc)
+ panic("DeviceTree Missing incore intc\n");
+
+ root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
+ &arc_intc_domain_ops, NULL);
+
+ if (!root_domain)
+ panic("root irq domain not avail\n");
+
+ /* with this we don't need to export root_domain */
+ irq_set_default_host(root_domain);
+}
+
+/*
+ * Late Interrupt system init called from start_kernel for Boot CPU only
+ *
+ * Since slab must already be initialized, platforms can start doing any
+ * needed request_irq( )s
+ */
+void __init init_IRQ(void)
+{
+ init_onchip_IRQ();
+
+ /* Any external intc can be setup here */
+ if (machine_desc->init_irq)
+ machine_desc->init_irq();
+
+#ifdef CONFIG_SMP
+ /* Master CPU can initialize it's side of IPI */
+ if (machine_desc->init_smp)
+ machine_desc->init_smp(smp_processor_id());
+#endif
+}
+
+/*
+ * "C" Entry point for any ARC ISR, called from low level vector handler
+ * @irq is the vector number read from ICAUSE reg of on-chip intc
+ */
+void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ generic_handle_irq(irq);
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+int __init get_hw_config_num_irq(void)
+{
+ uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
+
+ switch (val & 0x03) {
+ case 0:
+ return 16;
+ case 1:
+ return 32;
+ case 2:
+ return 8;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/*
+ * arch_local_irq_enable - Enable interrupts.
+ *
+ * 1. Explicitly called to re-enable interrupts
+ * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
+ * which maybe in hard ISR itself
+ *
+ * Semantics of this function change depending on where it is called from:
+ *
+ * -If called from hard-ISR, it must not invert interrupt priorities
+ * e.g. suppose TIMER is high priority (Level 2) IRQ
+ * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
+ * Here local_irq_enable( ) shd not re-enable lower priority interrupts
+ * -If called from soft-ISR, it must re-enable all interrupts
+ * soft ISR are low prioity jobs which can be very slow, thus all IRQs
+ * must be enabled while they run.
+ * Now hardware context wise we may still be in L2 ISR (not done rtie)
+ * still we must re-enable both L1 and L2 IRQs
+ * Another twist is prev scenario with flow being
+ * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
+ * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
+ * over-written (this is deficiency in ARC700 Interrupt mechanism)
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
+
+void arch_local_irq_enable(void)
+{
+
+ unsigned long flags;
+ flags = arch_local_save_flags();
+
+ /* Allow both L1 and L2 at the onset */
+ flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+
+ /* Called from hard ISR (between irq_enter and irq_exit) */
+ if (in_irq()) {
+
+ /* If in L2 ISR, don't re-enable any further IRQs as this can
+ * cause IRQ priorities to get upside down. e.g. it could allow
+ * L1 be taken while in L2 hard ISR which is wrong not only in
+ * theory, it can also cause the dreaded L1-L2-L1 scenario
+ */
+ if (flags & STATUS_A2_MASK)
+ flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
+
+ /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
+ else if (flags & STATUS_A1_MASK)
+ flags &= ~(STATUS_E1_MASK);
+ }
+
+ /* called from soft IRQ, ideally we want to re-enable all levels */
+
+ else if (in_softirq()) {
+
+ /* However if this is case of L1 interrupted by L2,
+ * re-enabling both may cause whaco L1-L2-L1 scenario
+ * because ARC700 allows level 1 to interrupt an active L2 ISR
+ * Thus we disable both
+ * However some code, executing in soft ISR wants some IRQs
+ * to be enabled so we re-enable L2 only
+ *
+ * How do we determine L1 intr by L2
+ * -A2 is set (means in L2 ISR)
+ * -E1 is set in this ISR's pt_regs->status32 which is
+ * saved copy of status32_l2 when l2 ISR happened
+ */
+ struct pt_regs *pt = get_irq_regs();
+ if ((flags & STATUS_A2_MASK) && pt &&
+ (pt->status32 & STATUS_A1_MASK)) {
+ /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
+ flags &= ~(STATUS_E1_MASK);
+ }
+ }
+
+ arch_local_irq_restore(flags);
+}
+
+#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
+
+/*
+ * Simpler version for only 1 level of interrupt
+ * Here we only Worry about Level 1 Bits
+ */
+void arch_local_irq_enable(void)
+{
+ unsigned long flags;
+
+ /*
+ * ARC IDE Drivers tries to re-enable interrupts from hard-isr
+ * context which is simply wrong
+ */
+ if (in_irq()) {
+ WARN_ONCE(1, "IRQ enabled from hard-isr");
+ return;
+ }
+
+ flags = arch_local_save_flags();
+ flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+ arch_local_irq_restore(flags);
+}
+#endif
+EXPORT_SYMBOL(arch_local_irq_enable);
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
new file mode 100644
index 000000000000..2888ba5be47e
--- /dev/null
+++ b/arch/arc/kernel/kgdb.c
@@ -0,0 +1,205 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kgdb.h>
+#include <asm/disasm.h>
+#include <asm/cacheflush.h>
+
+static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+ struct callee_regs *cregs)
+{
+ int regno;
+
+ for (regno = 0; regno <= 26; regno++)
+ gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs);
+
+ for (regno = 27; regno < GDB_MAX_REGS; regno++)
+ gdb_regs[regno] = 0;
+
+ gdb_regs[_FP] = kernel_regs->fp;
+ gdb_regs[__SP] = kernel_regs->sp;
+ gdb_regs[_BLINK] = kernel_regs->blink;
+ gdb_regs[_RET] = kernel_regs->ret;
+ gdb_regs[_STATUS32] = kernel_regs->status32;
+ gdb_regs[_LP_COUNT] = kernel_regs->lp_count;
+ gdb_regs[_LP_END] = kernel_regs->lp_end;
+ gdb_regs[_LP_START] = kernel_regs->lp_start;
+ gdb_regs[_BTA] = kernel_regs->bta;
+ gdb_regs[_STOP_PC] = kernel_regs->ret;
+}
+
+static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+ struct callee_regs *cregs)
+{
+ int regno;
+
+ for (regno = 0; regno <= 26; regno++)
+ set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs);
+
+ kernel_regs->fp = gdb_regs[_FP];
+ kernel_regs->sp = gdb_regs[__SP];
+ kernel_regs->blink = gdb_regs[_BLINK];
+ kernel_regs->ret = gdb_regs[_RET];
+ kernel_regs->status32 = gdb_regs[_STATUS32];
+ kernel_regs->lp_count = gdb_regs[_LP_COUNT];
+ kernel_regs->lp_end = gdb_regs[_LP_END];
+ kernel_regs->lp_start = gdb_regs[_LP_START];
+ kernel_regs->bta = gdb_regs[_BTA];
+}
+
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+ to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+ current->thread.callee_reg);
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+ from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+ current->thread.callee_reg);
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
+ struct task_struct *task)
+{
+ if (task)
+ to_gdb_regs(gdb_regs, task_pt_regs(task),
+ (struct callee_regs *) task->thread.callee_reg);
+}
+
+struct single_step_data_t {
+ uint16_t opcode[2];
+ unsigned long address[2];
+ int is_branch;
+ int armed;
+} single_step_data;
+
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (single_step_data.armed) {
+ int i;
+
+ for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) {
+ memcpy((void *) single_step_data.address[i],
+ &single_step_data.opcode[i],
+ BREAK_INSTR_SIZE);
+
+ flush_icache_range(single_step_data.address[i],
+ single_step_data.address[i] +
+ BREAK_INSTR_SIZE);
+ }
+ single_step_data.armed = 0;
+ }
+}
+
+static void place_trap(unsigned long address, void *save)
+{
+ memcpy(save, (void *) address, BREAK_INSTR_SIZE);
+ memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+ flush_icache_range(address, address + BREAK_INSTR_SIZE);
+}
+
+static void do_single_step(struct pt_regs *regs)
+{
+ single_step_data.is_branch = disasm_next_pc((unsigned long)
+ regs->ret, regs, (struct callee_regs *)
+ current->thread.callee_reg,
+ &single_step_data.address[0],
+ &single_step_data.address[1]);
+
+ place_trap(single_step_data.address[0], &single_step_data.opcode[0]);
+
+ if (single_step_data.is_branch) {
+ place_trap(single_step_data.address[1],
+ &single_step_data.opcode[1]);
+ }
+
+ single_step_data.armed++;
+}
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ char *remcomInBuffer, char *remcomOutBuffer,
+ struct pt_regs *regs)
+{
+ unsigned long addr;
+ char *ptr;
+
+ undo_single_step(regs);
+
+ switch (remcomInBuffer[0]) {
+ case 's':
+ case 'c':
+ ptr = &remcomInBuffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->ret = addr;
+
+ case 'D':
+ case 'k':
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+ if (remcomInBuffer[0] == 's') {
+ do_single_step(regs);
+ atomic_set(&kgdb_cpu_doing_single_step,
+ smp_processor_id());
+ }
+
+ return 0;
+ }
+ return -1;
+}
+
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int kgdb_arch_init(void)
+{
+ single_step_data.armed = 0;
+ return 0;
+}
+
+void kgdb_trap(struct pt_regs *regs, int param)
+{
+ /* trap_s 3 is used for breakpoints that overwrite existing
+ * instructions, while trap_s 4 is used for compiled breakpoints.
+ *
+ * with trap_s 3 breakpoints the original instruction needs to be
+ * restored and continuation needs to start at the location of the
+ * breakpoint.
+ *
+ * with trap_s 4 (compiled) breakpoints, continuation needs to
+ * start after the breakpoint.
+ */
+ if (param == 3)
+ instruction_pointer(regs) -= BREAK_INSTR_SIZE;
+
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+}
+
+void kgdb_arch_exit(void)
+{
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ instruction_pointer(regs) = ip;
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+ /* breakpoint instruction: TRAP_S 0x3 */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ .gdb_bpt_instr = {0x78, 0x7e},
+#else
+ .gdb_bpt_instr = {0x7e, 0x78},
+#endif
+};
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
new file mode 100644
index 000000000000..3bfeacb674de
--- /dev/null
+++ b/arch/arc/kernel/kprobes.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/current.h>
+#include <asm/disasm.h>
+
+#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
+ (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ /* Attempt to probe at unaligned address */
+ if ((unsigned long)p->addr & 0x01)
+ return -EINVAL;
+
+ /* Address should not be in exception handling code */
+
+ p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
+ p->opcode = *p->addr;
+
+ return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = UNIMP_S_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ arch_disarm_kprobe(p);
+
+ /* Can we remove the kprobe in the middle of kprobe handling? */
+ if (p->ainsn.t1_addr) {
+ *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t1_addr = NULL;
+ }
+
+ if (p->ainsn.t2_addr) {
+ *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t2_addr = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static inline void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __get_cpu_var(current_kprobe) = p;
+}
+
+static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
+ struct pt_regs *regs)
+{
+ /* Remove the trap instructions inserted for single step and
+ * restore the original instructions
+ */
+ if (p->ainsn.t1_addr) {
+ *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t1_addr = NULL;
+ }
+
+ if (p->ainsn.t2_addr) {
+ *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t2_addr = NULL;
+ }
+
+ return;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long next_pc;
+ unsigned long tgt_if_br = 0;
+ int is_branch;
+ unsigned long bta;
+
+ /* Copy the opcode back to the kprobe location and execute the
+ * instruction. Because of this we will not be able to get into the
+ * same kprobe until this kprobe is done
+ */
+ *(p->addr) = p->opcode;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+
+ /* Now we insert the trap at the next location after this instruction to
+ * single step. If it is a branch we insert the trap at possible branch
+ * targets
+ */
+
+ bta = regs->bta;
+
+ if (regs->status32 & 0x40) {
+ /* We are in a delay slot with the branch taken */
+
+ next_pc = bta & ~0x01;
+
+ if (!p->ainsn.is_short) {
+ if (bta & 0x01)
+ regs->blink += 2;
+ else {
+ /* Branch not taken */
+ next_pc += 2;
+
+ /* next pc is taken from bta after executing the
+ * delay slot instruction
+ */
+ regs->bta += 2;
+ }
+ }
+
+ is_branch = 0;
+ } else
+ is_branch =
+ disasm_next_pc((unsigned long)p->addr, regs,
+ (struct callee_regs *) current->thread.callee_reg,
+ &next_pc, &tgt_if_br);
+
+ p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
+ p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
+ *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ if (is_branch) {
+ p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
+ p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
+ *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+ }
+}
+
+int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ preempt_disable();
+
+ kcb = get_kprobe_ctlblk();
+ p = get_kprobe((unsigned long *)addr);
+
+ if (p) {
+ /*
+ * We have reentered the kprobe_handler, since another kprobe
+ * was hit while within the handler, we save the original
+ * kprobes and single step on the instruction of the new probe
+ * without calling any user handlers to avoid recursive
+ * kprobes.
+ */
+ if (kprobe_running()) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ }
+
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /* If we have no pre-handler or it returned 0, we continue with
+ * normal processing. If we have a pre-handler and it returned
+ * non-zero - which is expected from setjmp_pre_handler for
+ * jprobe, we return without single stepping and leave that to
+ * the break-handler which is invoked by a kprobe from
+ * jprobe_return
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ return 1;
+ } else if (kprobe_running()) {
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs)) {
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ return 1;
+ }
+ }
+
+ /* no_kprobe: */
+ preempt_enable_no_resched();
+ return 0;
+}
+
+static int __kprobes arc_post_kprobe_handler(unsigned long addr,
+ struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ resume_execution(cur, addr, regs);
+
+ /* Rearm the kprobe */
+ arch_arm_kprobe(cur);
+
+ /*
+ * When we return from trap instruction we go to the next instruction
+ * We restored the actual instruction in resume_exectuiont and we to
+ * return to the same address and execute it
+ */
+ regs->ret = addr;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+
+ reset_current_kprobe();
+
+out:
+ preempt_enable_no_resched();
+ return 1;
+}
+
+/*
+ * Fault can be for the instruction being single stepped or for the
+ * pre/post handlers in the module.
+ * This is applicable for applications like user probes, where we have the
+ * probe in user space and the handlers in the kernel
+ */
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single stepped
+ * caused the fault. We reset the current kprobe and allow the
+ * exception handler as if it is regular exception. In our
+ * case it doesn't matter because the system will be halted
+ */
+ resume_execution(cur, (unsigned long)cur->addr, regs);
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ preempt_enable_no_resched();
+ break;
+
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We are here because the instructions in the pre/post handler
+ * caused the fault.
+ */
+
+ /* We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned zero,
+ * try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ unsigned long addr = args->err;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_IERR:
+ if (arc_kprobe_handler(addr, args->regs))
+ return NOTIFY_STOP;
+ break;
+
+ case DIE_TRAP:
+ if (arc_post_kprobe_handler(addr, args->regs))
+ return NOTIFY_STOP;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long sp_addr = regs->sp;
+
+ kcb->jprobe_saved_regs = *regs;
+ memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
+ regs->ret = (unsigned long)(jp->entry);
+
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ __asm__ __volatile__("unimp_s");
+ return;
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long sp_addr;
+
+ *regs = kcb->jprobe_saved_regs;
+ sp_addr = regs->sp;
+ memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+static void __used kretprobe_trampoline_holder(void)
+{
+ __asm__ __volatile__(".global kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n" "nop\n");
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+
+ ri->ret_addr = (kprobe_opcode_t *) regs->blink;
+
+ /* Replace the return addr with trampoline addr */
+ regs->blink = (unsigned long)&kretprobe_trampoline;
+}
+
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more than one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address) {
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ regs->ret = orig_ret_address;
+
+ reset_current_kprobe();
+ kretprobe_hash_unlock(current, &flags);
+ preempt_enable_no_resched();
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+
+ /* By returning a non zero value, we are telling the kprobe handler
+ * that we don't want the post_handler to run
+ */
+ return 1;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ /* Registering the trampoline code for the kret probe */
+ return register_kprobe(&trampoline_p);
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP);
+}
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
new file mode 100644
index 000000000000..cdd359352c0a
--- /dev/null
+++ b/arch/arc/kernel/module.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <asm/unwind.h>
+
+static inline void arc_write_me(unsigned short *addr, unsigned long value)
+{
+ *addr = (value & 0xffff0000) >> 16;
+ *(addr + 1) = (value & 0xffff);
+}
+
+/* ARC specific section quirks - before relocation loop in generic loader
+ *
+ * For dwarf unwinding out of modules, this needs to
+ * 1. Ensure the .debug_frame is allocatable (ARC Linker bug: despite
+ * -fasynchronous-unwind-tables it doesn't).
+ * 2. Since we are iterating thru sec hdr tbl anyways, make a note of
+ * the exact section index, for later use.
+ */
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstr, struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ int i;
+
+ mod->arch.unw_sec_idx = 0;
+ mod->arch.unw_info = NULL;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if (strcmp(secstr+sechdrs[i].sh_name, ".debug_frame") == 0) {
+ sechdrs[i].sh_flags |= SHF_ALLOC;
+ mod->arch.unw_sec_idx = i;
+ break;
+ }
+ }
+#endif
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ if (mod->arch.unw_info)
+ unwind_remove_table(mod->arch.unw_info, 0);
+#endif
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex, /* sec index for sym tbl */
+ unsigned int relsec, /* sec index for relo sec */
+ struct module *module)
+{
+ int i, n;
+ Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym_entry, *sym_sec;
+ Elf32_Addr relocation;
+ Elf32_Addr location;
+ Elf32_Addr sec_to_patch;
+ int relo_type;
+
+ sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
+ n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
+
+ pr_debug("\n========== Module Sym reloc ===========================\n");
+ pr_debug("Section to fixup %x\n", sec_to_patch);
+ pr_debug("=========================================================\n");
+ pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
+ pr_debug("=========================================================\n");
+
+ /* Loop thru entries in relocation section */
+ for (i = 0; i < n; i++) {
+
+ /* This is where to make the change */
+ location = sec_to_patch + rel_entry[i].r_offset;
+
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym_entry = sym_sec + ELF32_R_SYM(rel_entry[i].r_info);
+
+ relocation = sym_entry->st_value + rel_entry[i].r_addend;
+
+ pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
+ rel_entry[i].r_offset, rel_entry[i].r_addend,
+ sym_entry->st_value, location, relocation,
+ strtab + sym_entry->st_name);
+
+ /* This assumes modules are built with -mlong-calls
+ * so any branches/jumps are absolute 32 bit jmps
+ * global data access again is abs 32 bit.
+ * Both of these are handled by same relocation type
+ */
+ relo_type = ELF32_R_TYPE(rel_entry[i].r_info);
+
+ if (likely(R_ARC_32_ME == relo_type))
+ arc_write_me((unsigned short *)location, relocation);
+ else if (R_ARC_32 == relo_type)
+ *((Elf32_Addr *) location) = relocation;
+ else
+ goto relo_err;
+
+ }
+ return 0;
+
+relo_err:
+ pr_err("%s: unknown relocation: %u\n",
+ module->name, ELF32_R_TYPE(rel_entry[i].r_info));
+ return -ENOEXEC;
+
+}
+
+/* Just before lift off: After sections have been relocated, we add the
+ * dwarf section to unwinder table pool
+ * This couldn't be done in module_frob_arch_sections() because
+ * relocations had not been applied by then
+ */
+int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ void *unw;
+ int unwsec = mod->arch.unw_sec_idx;
+
+ if (unwsec) {
+ unw = unwind_add_table(mod, (void *)sechdrs[unwsec].sh_addr,
+ sechdrs[unwsec].sh_size);
+ mod->arch.unw_info = unw;
+ }
+#endif
+ return 0;
+}
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
new file mode 100644
index 000000000000..0a7531d99294
--- /dev/null
+++ b/arch/arc/kernel/process.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/elf.h>
+#include <linux/tick.h>
+
+SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
+{
+ task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
+ return 0;
+}
+
+/*
+ * We return the user space TLS data ptr as sys-call return code
+ * Ideally it should be copy to user.
+ * However we can cheat by the fact that some sys-calls do return
+ * absurdly high values
+ * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
+ * it won't be considered a sys-call error
+ * and it will be loads better than copy-to-user, which is a definite
+ * D-TLB Miss
+ */
+SYSCALL_DEFINE0(arc_gettls)
+{
+ return task_thread_info(current)->thr_ptr;
+}
+
+static inline void arch_idle(void)
+{
+ /* sleep, but enable all interrupts before committing */
+ __asm__("sleep 0x3");
+}
+
+void cpu_idle(void)
+{
+ /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
+
+doze:
+ local_irq_disable();
+ if (!need_resched()) {
+ arch_idle();
+ goto doze;
+ } else {
+ local_irq_enable();
+ }
+
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
+
+ schedule_preempt_disabled();
+ }
+}
+
+asmlinkage void ret_from_fork(void);
+
+/* Layout of Child kernel mode stack as setup at the end of this function is
+ *
+ * | ... |
+ * | ... |
+ * | unused |
+ * | |
+ * ------------------ <==== top of Stack (thread.ksp)
+ * | UNUSED 1 word|
+ * ------------------
+ * | r25 |
+ * ~ ~
+ * | --to-- | (CALLEE Regs of user mode)
+ * | r13 |
+ * ------------------
+ * | fp |
+ * | blink | @ret_from_fork
+ * ------------------
+ * | |
+ * ~ ~
+ * ~ ~
+ * | |
+ * ------------------
+ * | r12 |
+ * ~ ~
+ * | --to-- | (scratch Regs of user mode)
+ * | r0 |
+ * ------------------
+ * | UNUSED 1 word|
+ * ------------------ <===== END of PAGE
+ */
+int copy_thread(unsigned long clone_flags,
+ unsigned long usp, unsigned long arg,
+ struct task_struct *p)
+{
+ struct pt_regs *c_regs; /* child's pt_regs */
+ unsigned long *childksp; /* to unwind out of __switch_to() */
+ struct callee_regs *c_callee; /* child's callee regs */
+ struct callee_regs *parent_callee; /* paren't callee */
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Mark the specific anchors to begin with (see pic above) */
+ c_regs = task_pt_regs(p);
+ childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
+ c_callee = ((struct callee_regs *)childksp) - 1;
+
+ /*
+ * __switch_to() uses thread.ksp to start unwinding stack
+ * For kernel threads we don't need to create callee regs, the
+ * stack layout nevertheless needs to remain the same.
+ * Also, since __switch_to anyways unwinds callee regs, we use
+ * this to populate kernel thread entry-pt/args into callee regs,
+ * so that ret_from_kernel_thread() becomes simpler.
+ */
+ p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
+
+ /* __switch_to expects FP(0), BLINK(return addr) at top */
+ childksp[0] = 0; /* fp */
+ childksp[1] = (unsigned long)ret_from_fork; /* blink */
+
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(c_regs, 0, sizeof(struct pt_regs));
+
+ c_callee->r13 = arg; /* argument to kernel thread */
+ c_callee->r14 = usp; /* function */
+
+ return 0;
+ }
+
+ /*--------- User Task Only --------------*/
+
+ /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
+ childksp[0] = 0; /* for POP fp */
+ childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
+
+ /* Copy parents pt regs on child's kernel mode stack */
+ *c_regs = *regs;
+
+ if (usp)
+ c_regs->sp = usp;
+
+ c_regs->r0 = 0; /* fork returns 0 in child */
+
+ parent_callee = ((struct callee_regs *)regs) - 1;
+ *c_callee = *parent_callee;
+
+ if (unlikely(clone_flags & CLONE_SETTLS)) {
+ /*
+ * set task's userland tls data ptr from 4th arg
+ * clone C-lib call is difft from clone sys-call
+ */
+ task_thread_info(p)->thr_ptr = regs->r3;
+ } else {
+ /* Normal fork case: set parent's TLS ptr in child */
+ task_thread_info(p)->thr_ptr =
+ task_thread_info(current)->thr_ptr;
+ }
+
+ return 0;
+}
+
+/*
+ * Some archs flush debug and FPU info here
+ */
+void flush_thread(void)
+{
+}
+
+/*
+ * Free any architecture-specific thread data structures, etc.
+ */
+void exit_thread(void)
+{
+}
+
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+ return 0;
+}
+
+/*
+ * API: expected by schedular Code: If thread is sleeping where is that.
+ * What is this good for? it will be always the scheduler or ret_from_fork.
+ * So we hard code that anyways.
+ */
+unsigned long thread_saved_pc(struct task_struct *t)
+{
+ struct pt_regs *regs = task_pt_regs(t);
+ unsigned long blink = 0;
+
+ /*
+ * If the thread being queried for in not itself calling this, then it
+ * implies it is not executing, which in turn implies it is sleeping,
+ * which in turn implies it got switched OUT by the schedular.
+ * In that case, it's kernel mode blink can reliably retrieved as per
+ * the picture above (right above pt_regs).
+ */
+ if (t != current && t->state != TASK_RUNNING)
+ blink = *((unsigned int *)regs - 1);
+
+ return blink;
+}
+
+int elf_check_arch(const struct elf32_hdr *x)
+{
+ unsigned int eflags;
+
+ if (x->e_machine != EM_ARCOMPACT)
+ return 0;
+
+ eflags = x->e_flags;
+ if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
+ pr_err("ABI mismatch - you need newer toolchain\n");
+ force_sigsegv(SIGSEGV, current);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(elf_check_arch);
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
new file mode 100644
index 000000000000..c6a81c58d0f3
--- /dev/null
+++ b/arch/arc/kernel/ptrace.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/regset.h>
+#include <linux/unistd.h>
+#include <linux/elf.h>
+
+static struct callee_regs *task_callee_regs(struct task_struct *tsk)
+{
+ struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
+ return tmp;
+}
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_regs *ptregs = task_pt_regs(target);
+ const struct callee_regs *cregs = task_callee_regs(target);
+ int ret = 0;
+ unsigned int stop_pc_val;
+
+#define REG_O_CHUNK(START, END, PTR) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+ offsetof(struct user_regs_struct, START), \
+ offsetof(struct user_regs_struct, END));
+
+#define REG_O_ONE(LOC, PTR) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+ REG_O_CHUNK(scratch, callee, ptregs);
+ REG_O_CHUNK(callee, efa, cregs);
+ REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address);
+
+ if (!ret) {
+ if (in_brkpt_trap(ptregs)) {
+ stop_pc_val = target->thread.fault_address;
+ pr_debug("\t\tstop_pc (brk-pt)\n");
+ } else {
+ stop_pc_val = ptregs->ret;
+ pr_debug("\t\tstop_pc (others)\n");
+ }
+
+ REG_O_ONE(stop_pc, &stop_pc_val);
+ }
+
+ return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const struct pt_regs *ptregs = task_pt_regs(target);
+ const struct callee_regs *cregs = task_callee_regs(target);
+ int ret = 0;
+
+#define REG_IN_CHUNK(FIRST, NEXT, PTR) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), \
+ offsetof(struct user_regs_struct, FIRST), \
+ offsetof(struct user_regs_struct, NEXT));
+
+#define REG_IN_ONE(LOC, PTR) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+#define REG_IGNORE_ONE(LOC) \
+ if (!ret) \
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+ /* TBD: disallow updates to STATUS32, orig_r8 etc*/
+ REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */
+ REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
+ REG_IGNORE_ONE(efa); /* efa update invalid */
+ REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
+
+ return ret;
+}
+
+enum arc_getset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset arc_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = genregs_get,
+ .set = genregs_set,
+ }
+};
+
+static const struct user_regset_view user_arc_view = {
+ .name = UTS_MACHINE,
+ .e_machine = EM_ARCOMPACT,
+ .regsets = arc_regsets,
+ .n = ARRAY_SIZE(arc_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_arc_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = -EIO;
+
+ pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
+
+ switch (request) {
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int syscall_trace_entry(struct pt_regs *regs)
+{
+ if (tracehook_report_syscall_entry(regs))
+ return ULONG_MAX;
+
+ return regs->r8;
+}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ tracehook_report_syscall_exit(regs, 0);
+}
diff --git a/arch/arc/kernel/reset.c b/arch/arc/kernel/reset.c
new file mode 100644
index 000000000000..e227a2b1c943
--- /dev/null
+++ b/arch/arc/kernel/reset.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+
+void machine_halt(void)
+{
+ /* Halt the processor */
+ __asm__ __volatile__("flag 1\n");
+}
+
+void machine_restart(char *__unused)
+{
+ /* Soft reset : jump to reset vector */
+ pr_info("Put your restart handler here\n");
+ machine_halt();
+}
+
+void machine_power_off(void)
+{
+ /* FIXME :: power off ??? */
+ machine_halt();
+}
+
+void (*pm_power_off) (void) = NULL;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
new file mode 100644
index 000000000000..dc0f968dae0a
--- /dev/null
+++ b/arch/arc/kernel/setup.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/of_fdt.h>
+#include <asm/sections.h>
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+#include <asm/cache.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/irq.h>
+#include <asm/arcregs.h>
+#include <asm/prom.h>
+#include <asm/unwind.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
+
+int running_on_hw = 1; /* vs. on ISS */
+
+char __initdata command_line[COMMAND_LINE_SIZE];
+struct machine_desc *machine_desc __initdata;
+
+struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
+
+struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+
+
+void __init read_arc_build_cfg_regs(void)
+{
+ struct bcr_perip uncached_space;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ FIX_PTR(cpu);
+
+ READ_BCR(AUX_IDENTITY, cpu->core);
+
+ cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
+
+ cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
+ if (cpu->vec_base == 0)
+ cpu->vec_base = (unsigned int)_int_vec_base_lds;
+
+ READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
+ cpu->uncached_base = uncached_space.start << 24;
+
+ cpu->extn.mul = read_aux_reg(ARC_REG_MUL_BCR);
+ cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR);
+ cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR);
+ cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR);
+ cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR);
+ READ_BCR(ARC_REG_MAC_BCR, cpu->extn_mac_mul);
+
+ cpu->extn.ext_arith = read_aux_reg(ARC_REG_EXTARITH_BCR);
+ cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR);
+
+ /* Note that we read the CCM BCRs independent of kernel config
+ * This is to catch the cases where user doesn't know that
+ * CCMs are present in hardware build
+ */
+ {
+ struct bcr_iccm iccm;
+ struct bcr_dccm dccm;
+ struct bcr_dccm_base dccm_base;
+ unsigned int bcr_32bit_val;
+
+ bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
+ if (bcr_32bit_val) {
+ iccm = *((struct bcr_iccm *)&bcr_32bit_val);
+ cpu->iccm.base_addr = iccm.base << 16;
+ cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
+ }
+
+ bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
+ if (bcr_32bit_val) {
+ dccm = *((struct bcr_dccm *)&bcr_32bit_val);
+ cpu->dccm.sz = 0x800 << (dccm.sz);
+
+ READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
+ cpu->dccm.base_addr = dccm_base.addr << 8;
+ }
+ }
+
+ READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
+
+ read_decode_mmu_bcr();
+ read_decode_cache_bcr();
+
+ READ_BCR(ARC_REG_FP_BCR, cpu->fp);
+ READ_BCR(ARC_REG_DPFP_BCR, cpu->dpfp);
+}
+
+static const struct cpuinfo_data arc_cpu_tbl[] = {
+ { {0x10, "ARCTangent A5"}, 0x1F},
+ { {0x20, "ARC 600" }, 0x2F},
+ { {0x30, "ARC 700" }, 0x33},
+ { {0x34, "ARC 700 R4.10"}, 0x34},
+ { {0x00, NULL } }
+};
+
+char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+ struct bcr_identity *core = &cpu->core;
+ const struct cpuinfo_data *tbl;
+ int be = 0;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ be = 1;
+#endif
+ FIX_PTR(cpu);
+
+ n += scnprintf(buf + n, len - n,
+ "\nARC IDENTITY\t: Family [%#02x]"
+ " Cpu-id [%#02x] Chip-id [%#4x]\n",
+ core->family, core->cpu_id,
+ core->chip_id);
+
+ for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
+ if ((core->family >= tbl->info.id) &&
+ (core->family <= tbl->up_range)) {
+ n += scnprintf(buf + n, len - n,
+ "processor\t: %s %s\n",
+ tbl->info.str,
+ be ? "[Big Endian]" : "");
+ break;
+ }
+ }
+
+ if (tbl->info.id == 0)
+ n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
+
+ n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n",
+ (unsigned int)(arc_get_core_freq() / 1000000),
+ (unsigned int)(arc_get_core_freq() / 10000) % 100);
+
+ n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n",
+ (cpu->timers & 0x200) ? "TIMER1" : "",
+ (cpu->timers & 0x100) ? "TIMER0" : "");
+
+ n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n",
+ cpu->vec_base);
+
+ n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n",
+ cpu->uncached_base);
+
+ return buf;
+}
+
+static const struct id_to_str mul_type_nm[] = {
+ { 0x0, "N/A"},
+ { 0x1, "32x32 (spl Result Reg)" },
+ { 0x2, "32x32 (ANY Result Reg)" }
+};
+
+static const struct id_to_str mac_mul_nm[] = {
+ {0x0, "N/A"},
+ {0x1, "N/A"},
+ {0x2, "Dual 16 x 16"},
+ {0x3, "N/A"},
+ {0x4, "32x16"},
+ {0x5, "N/A"},
+ {0x6, "Dual 16x16 and 32x16"}
+};
+
+char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+
+ FIX_PTR(cpu);
+#define IS_AVAIL1(var, str) ((var) ? str : "")
+#define IS_AVAIL2(var, str) ((var == 0x2) ? str : "")
+#define IS_USED(var) ((var) ? "(in-use)" : "(not used)")
+
+ n += scnprintf(buf + n, len - n,
+ "Extn [700-Base]\t: %s %s %s %s %s %s\n",
+ IS_AVAIL2(cpu->extn.norm, "norm,"),
+ IS_AVAIL2(cpu->extn.barrel, "barrel-shift,"),
+ IS_AVAIL1(cpu->extn.swap, "swap,"),
+ IS_AVAIL2(cpu->extn.minmax, "minmax,"),
+ IS_AVAIL1(cpu->extn.crc, "crc,"),
+ IS_AVAIL2(cpu->extn.ext_arith, "ext-arith"));
+
+ n += scnprintf(buf + n, len - n, "Extn [700-MPY]\t: %s",
+ mul_type_nm[cpu->extn.mul].str);
+
+ n += scnprintf(buf + n, len - n, " MAC MPY: %s\n",
+ mac_mul_nm[cpu->extn_mac_mul.type].str);
+
+ if (cpu->core.family == 0x34) {
+ n += scnprintf(buf + n, len - n,
+ "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n",
+ IS_USED(__CONFIG_ARC_HAS_LLSC_VAL),
+ IS_USED(__CONFIG_ARC_HAS_SWAPE_VAL),
+ IS_USED(__CONFIG_ARC_HAS_RTSC_VAL));
+ }
+
+ n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s",
+ !(cpu->dccm.sz || cpu->iccm.sz) ? "N/A" : "");
+
+ if (cpu->dccm.sz)
+ n += scnprintf(buf + n, len - n, "DCCM: @ %x, %d KB ",
+ cpu->dccm.base_addr, TO_KB(cpu->dccm.sz));
+
+ if (cpu->iccm.sz)
+ n += scnprintf(buf + n, len - n, "ICCM: @ %x, %d KB",
+ cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
+
+ n += scnprintf(buf + n, len - n, "\nExtn [FPU]\t: %s",
+ !(cpu->fp.ver || cpu->dpfp.ver) ? "N/A" : "");
+
+ if (cpu->fp.ver)
+ n += scnprintf(buf + n, len - n, "SP [v%d] %s",
+ cpu->fp.ver, cpu->fp.fast ? "(fast)" : "");
+
+ if (cpu->dpfp.ver)
+ n += scnprintf(buf + n, len - n, "DP [v%d] %s",
+ cpu->dpfp.ver, cpu->dpfp.fast ? "(fast)" : "");
+
+ n += scnprintf(buf + n, len - n, "\n");
+
+#ifdef _ASM_GENERIC_UNISTD_H
+ n += scnprintf(buf + n, len - n,
+ "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
+#endif
+
+ return buf;
+}
+
+void __init arc_chk_ccms(void)
+{
+#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+
+#ifdef CONFIG_ARC_HAS_DCCM
+ /*
+ * DCCM can be arbit placed in hardware.
+ * Make sure it's placement/sz matches what Linux is built with
+ */
+ if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
+ panic("Linux built with incorrect DCCM Base address\n");
+
+ if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
+ panic("Linux built with incorrect DCCM Size\n");
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICCM
+ if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
+ panic("Linux built with incorrect ICCM Size\n");
+#endif
+#endif
+}
+
+/*
+ * Ensure that FP hardware and kernel config match
+ * -If hardware contains DPFP, kernel needs to save/restore FPU state
+ * across context switches
+ * -If hardware lacks DPFP, but kernel configured to save FPU state then
+ * kernel trying to access non-existant DPFP regs will crash
+ *
+ * We only check for Dbl precision Floating Point, because only DPFP
+ * hardware has dedicated regs which need to be saved/restored on ctx-sw
+ * (Single Precision uses core regs), thus kernel is kind of oblivious to it
+ */
+void __init arc_chk_fpu(void)
+{
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+
+ if (cpu->dpfp.ver) {
+#ifndef CONFIG_ARC_FPU_SAVE_RESTORE
+ pr_warn("DPFP support broken in this kernel...\n");
+#endif
+ } else {
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+ panic("H/w lacks DPFP support, apps won't work\n");
+#endif
+ }
+}
+
+/*
+ * Initialize and setup the processor core
+ * This is called by all the CPUs thus should not do special case stuff
+ * such as only for boot CPU etc
+ */
+
+void __init setup_processor(void)
+{
+ char str[512];
+ int cpu_id = smp_processor_id();
+
+ read_arc_build_cfg_regs();
+ arc_init_IRQ();
+
+ printk(arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
+
+ arc_mmu_init();
+ arc_cache_init();
+ arc_chk_ccms();
+
+ printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
+
+#ifdef CONFIG_SMP
+ printk(arc_platform_smp_cpuinfo());
+#endif
+
+ arc_chk_fpu();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+#ifdef CONFIG_CMDLINE_UBOOT
+ /* Make sure that a whitespace is inserted before */
+ strlcat(command_line, " ", sizeof(command_line));
+#endif
+ /*
+ * Append .config cmdline to base command line, which might already
+ * contain u-boot "bootargs" (handled by head.S, if so configured)
+ */
+ strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ machine_desc = setup_machine_fdt(__dtb_start);
+ if (!machine_desc)
+ panic("Embedded DT invalid\n");
+
+ /* To force early parsing of things like mem=xxx */
+ parse_early_param();
+
+ /* Platform/board specific: e.g. early console registration */
+ if (machine_desc->init_early)
+ machine_desc->init_early();
+
+ setup_processor();
+
+#ifdef CONFIG_SMP
+ smp_init_cpus();
+#endif
+
+ setup_arch_memory();
+
+ /* copy flat DT out of .init and then unflatten it */
+ copy_devtree();
+ unflatten_device_tree();
+
+ /* Can be issue if someone passes cmd line arg "ro"
+ * But that is unlikely so keeping it as it is
+ */
+ root_mountflags &= ~MS_RDONLY;
+
+ console_verbose();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+
+ arc_unwind_init();
+ arc_unwind_setup();
+}
+
+static int __init customize_machine(void)
+{
+ /* Add platform devices */
+ if (machine_desc->init_machine)
+ machine_desc->init_machine();
+
+ return 0;
+}
+arch_initcall(customize_machine);
+
+static int __init init_late_machine(void)
+{
+ if (machine_desc->init_late)
+ machine_desc->init_late();
+
+ return 0;
+}
+late_initcall(init_late_machine);
+/*
+ * Get CPU information for use by the procfs.
+ */
+
+#define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
+#define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ char *str;
+ int cpu_id = ptr_to_cpu(v);
+
+ str = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!str)
+ goto done;
+
+ seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, "Bogo MIPS : \t%lu.%02lu\n",
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100);
+
+ seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+#ifdef CONFIG_SMP
+ seq_printf(m, arc_platform_smp_cpuinfo());
+#endif
+
+ free_page((unsigned long)str);
+done:
+ seq_printf(m, "\n\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ /*
+ * Callback returns cpu-id to iterator for show routine, NULL to stop.
+ * However since NULL is also a valid cpu-id (0), we use a round-about
+ * way to pass it w/o having to kmalloc/free a 2 byte string.
+ * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
+ */
+ return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo
+};
+
+static DEFINE_PER_CPU(struct cpu, cpu_topology);
+
+static int __init topology_init(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu)
+ register_cpu(&per_cpu(cpu_topology, cpu), cpu);
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
new file mode 100644
index 000000000000..ee6ef2f60a28
--- /dev/null
+++ b/arch/arc/kernel/signal.c
@@ -0,0 +1,360 @@
+/*
+ * Signal Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Jan 2010 (Restarting of timer related syscalls)
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ * -do_signal() supports TIF_RESTORE_SIGMASK
+ * -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
+ * -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
+ * -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
+ * -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
+ * the job to do_signal()
+ *
+ * vineetg: July 2009
+ * -Modified Code to support the uClibc provided userland sigreturn stub
+ * to avoid kernel synthesing it on user stack at runtime, costing TLB
+ * probes and Cache line flushes.
+ *
+ * vineetg: July 2009
+ * -In stash_usr_regs( ) and restore_usr_regs( ), save/restore of user regs
+ * in done in block copy rather than one word at a time.
+ * This saves around 2K of code and improves LMBench lat_sig <catch>
+ *
+ * rajeshwarr: Feb 2009
+ * - Support for Realtime Signals
+ *
+ * vineetg: Aug 11th 2008: Bug #94183
+ * -ViXS were still seeing crashes when using insmod to load drivers.
+ * It turned out that the code to change Execute permssions for TLB entries
+ * of user was not guarded for interrupts (mod_tlb_permission)
+ * This was cauing TLB entries to be overwritten on unrelated indexes
+ *
+ * Vineetg: July 15th 2008: Bug #94183
+ * -Exception happens in Delay slot of a JMP, and before user space resumes,
+ * Signal is delivered (Ctrl + C) = >SIGINT.
+ * setup_frame( ) sets up PC,SP,BLINK to enable user space signal handler
+ * to run, but doesn't clear the Delay slot bit from status32. As a result,
+ * on resuming user mode, signal handler branches off to BTA of orig JMP
+ * -FIX: clear the DE bit from status32 in setup_frame( )
+ *
+ * Rahul Trivedi, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <asm/ucontext.h>
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+#define MAGIC_SIGALTSTK 0x07302004
+ unsigned int sigret_magic;
+};
+
+static int
+stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ sigset_t *set)
+{
+ int err;
+ err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+
+ return err;
+}
+
+static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+{
+ sigset_t set;
+ int err;
+
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ if (!err)
+ set_current_blocked(&set);
+
+ err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ return err;
+}
+
+static inline int is_do_ss_needed(unsigned int magic)
+{
+ if (MAGIC_SIGALTSTK == magic)
+ return 1;
+ else
+ return 0;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct rt_sigframe __user *sf;
+ unsigned int magic;
+ int err;
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /* Since we stacked the signal on a word boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->sp & 3)
+ goto badframe;
+
+ sf = (struct rt_sigframe __force __user *)(regs->sp);
+
+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+ goto badframe;
+
+ err = restore_usr_regs(regs, sf);
+ err |= __get_user(magic, &sf->sigret_magic);
+ if (err)
+ goto badframe;
+
+ if (unlikely(is_do_ss_needed(magic)))
+ if (restore_altstack(&sf->uc.uc_stack))
+ goto badframe;
+
+ /* Don't restart from sigreturn */
+ syscall_wont_restart(regs);
+
+ return regs->r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+ struct pt_regs *regs,
+ unsigned long framesize)
+{
+ unsigned long sp = regs->sp;
+ void __user *frame;
+
+ /* This is the X/Open sanctioned signal stack switching */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ /* No matter what happens, 'sp' must be word
+ * aligned otherwise nasty things could happen
+ */
+
+ /* ATPCS B01 mandates 8-byte alignment */
+ frame = (void __user *)((sp - framesize) & ~7);
+
+ /* Check that we can actually write to the signal frame */
+ if (!access_ok(VERIFY_WRITE, frame, framesize))
+ frame = NULL;
+
+ return frame;
+}
+
+/*
+ * translate the signal
+ */
+static inline int map_sig(int sig)
+{
+ struct thread_info *thread = current_thread_info();
+ if (thread->exec_domain && thread->exec_domain->signal_invmap
+ && sig < 32)
+ sig = thread->exec_domain->signal_invmap[sig];
+ return sig;
+}
+
+static int
+setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *sf;
+ unsigned int magic = 0;
+ int err = 0;
+
+ sf = get_sigframe(ka, regs, sizeof(struct rt_sigframe));
+ if (!sf)
+ return 1;
+
+ /*
+ * SA_SIGINFO requires 3 args to signal handler:
+ * #1: sig-no (common to any handler)
+ * #2: struct siginfo
+ * #3: struct ucontext (completely populated)
+ */
+ if (unlikely(ka->sa.sa_flags & SA_SIGINFO)) {
+ err |= copy_siginfo_to_user(&sf->info, info);
+ err |= __put_user(0, &sf->uc.uc_flags);
+ err |= __put_user(NULL, &sf->uc.uc_link);
+ err |= __save_altstack(&sf->uc.uc_stack, regs->sp);
+
+ /* setup args 2 and 3 for user mode handler */
+ regs->r1 = (unsigned long)&sf->info;
+ regs->r2 = (unsigned long)&sf->uc;
+
+ /*
+ * small optim to avoid unconditonally calling do_sigaltstack
+ * in sigreturn path, now that we only have rt_sigreturn
+ */
+ magic = MAGIC_SIGALTSTK;
+ }
+
+ /*
+ * w/o SA_SIGINFO, struct ucontext is partially populated (only
+ * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+ * during signal handler execution. This works for SA_SIGINFO as well
+ * although the semantics are now overloaded (the same reg state can be
+ * inspected by userland: but are they allowed to fiddle with it ?
+ */
+ err |= stash_usr_regs(sf, regs, set);
+ err |= __put_user(magic, &sf->sigret_magic);
+ if (err)
+ return err;
+
+ /* #1 arg to the user Signal handler */
+ regs->r0 = map_sig(signo);
+
+ /* setup PC of user space signal handler */
+ regs->ret = (unsigned long)ka->sa.sa_handler;
+
+ /*
+ * handler returns using sigreturn stub provided already by userpsace
+ */
+ BUG_ON(!(ka->sa.sa_flags & SA_RESTORER));
+ regs->blink = (unsigned long)ka->sa.sa_restorer;
+
+ /* User Stack for signal handler will be above the frame just carved */
+ regs->sp = (unsigned long)sf;
+
+ /*
+ * Bug 94183, Clear the DE bit, so that when signal handler
+ * starts to run, it doesn't use BTA
+ */
+ regs->status32 &= ~STATUS_DE_MASK;
+ regs->status32 |= STATUS_L_MASK;
+
+ return err;
+}
+
+static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
+{
+ switch (regs->r0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ /*
+ * ERESTARTNOHAND means that the syscall should
+ * only be restarted if there was no handler for
+ * the signal, and since we only get here if there
+ * is a handler, we don't restart
+ */
+ regs->r0 = -EINTR; /* ERESTART_xxx is internal */
+ break;
+
+ case -ERESTARTSYS:
+ /*
+ * ERESTARTSYS means to restart the syscall if
+ * there is no handler or the handler was
+ * registered with SA_RESTART
+ */
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->r0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+
+ case -ERESTARTNOINTR:
+ /*
+ * ERESTARTNOINTR means that the syscall should
+ * be called again after the signal handler returns.
+ * Setup reg state just as it was before doing the trap
+ * r0 has been clobbered with sys call ret code thus it
+ * needs to be reloaded with orig first arg to syscall
+ * in orig_r0. Rest of relevant reg-file:
+ * r8 (syscall num) and (r1 - r7) will be reset to
+ * their orig user space value when we ret from kernel
+ */
+ regs->r0 = regs->orig_r0;
+ regs->ret -= 4;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
+ struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+
+ /* Set up the stack frame */
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (ret)
+ force_sigsegv(sig, current);
+ else
+ signal_delivered(sig, info, ka, regs, 0);
+}
+
+void do_signal(struct pt_regs *regs)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
+ int restart_scall;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+ restart_scall = in_syscall(regs) && syscall_restartable(regs);
+
+ if (signr > 0) {
+ if (restart_scall) {
+ arc_restart_syscall(&ka, regs);
+ syscall_wont_restart(regs); /* No more restarts */
+ }
+ handle_signal(signr, &ka, &info, regs);
+ return;
+ }
+
+ if (restart_scall) {
+ /* No handler for syscall: restart it */
+ if (regs->r0 == -ERESTARTNOHAND ||
+ regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) {
+ regs->r0 = regs->orig_r0;
+ regs->ret -= 4;
+ } else if (regs->r0 == -ERESTART_RESTARTBLOCK) {
+ regs->r8 = __NR_restart_syscall;
+ regs->ret -= 4;
+ }
+ syscall_wont_restart(regs); /* No more restarts */
+ }
+
+ /* If there's no signal to deliver, restore the saved sigmask back */
+ restore_saved_sigmask();
+}
+
+void do_notify_resume(struct pt_regs *regs)
+{
+ /*
+ * ASM glue gaurantees that this is only called when returning to
+ * user mode
+ */
+ if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+ tracehook_notify_resume(regs);
+}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
new file mode 100644
index 000000000000..3af3e06dcf02
--- /dev/null
+++ b/arch/arc/kernel/smp.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * RajeshwarR: Dec 11, 2007
+ * -- Added support for Inter Processor Interrupts
+ *
+ * Vineetg: Nov 1st, 2007
+ * -- Initial Write (Borrowed heavily from ARM)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock_types.h>
+#include <linux/reboot.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/mach_desc.h>
+
+arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+struct plat_smp_ops plat_smp_ops;
+
+/* XXX: per cpu ? Only needed once in early seconday boot */
+struct task_struct *secondary_idle_tsk;
+
+/* Called from start_kernel */
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ set_cpu_possible(i, true);
+}
+
+/* called from init ( ) => process 1 */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
+
+ /*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time.
+ */
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+
+}
+
+/*
+ * After power-up, a non Master CPU needs to wait for Master to kick start it
+ *
+ * The default implementation halts
+ *
+ * This relies on platform specific support allowing Master to directly set
+ * this CPU's PC (to be @first_lines_of_secondary() and kick start it.
+ *
+ * In lack of such h/w assist, platforms can override this function
+ * - make this function busy-spin on a token, eventually set by Master
+ * (from arc_platform_smp_wakeup_cpu())
+ * - Once token is available, jump to @first_lines_of_secondary
+ * (using inline asm).
+ *
+ * Alert: can NOT use stack here as it has not been determined/setup for CPU.
+ * If it turns out to be elaborate, it's better to code it in assembly
+ *
+ */
+void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu)
+{
+ /*
+ * As a hack for debugging - since debugger will single-step over the
+ * FLAG insn - wrap the halt itself it in a self loop
+ */
+ __asm__ __volatile__(
+ "1: \n"
+ " flag 1 \n"
+ " b 1b \n");
+}
+
+const char *arc_platform_smp_cpuinfo(void)
+{
+ return plat_smp_ops.info;
+}
+
+/*
+ * The very first "C" code executed by secondary
+ * Called from asm stub in head.S
+ * "current"/R25 already setup by low level boot code
+ */
+void __cpuinit start_kernel_secondary(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ /* MMU, Caches, Vector Table, Interrupts etc */
+ setup_processor();
+
+ atomic_inc(&mm->mm_users);
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+
+ notify_cpu_starting(cpu);
+ set_cpu_online(cpu, true);
+
+ pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
+
+ if (machine_desc->init_smp)
+ machine_desc->init_smp(smp_processor_id());
+
+ arc_local_timer_setup(cpu);
+
+ local_irq_enable();
+ preempt_disable();
+ cpu_idle();
+}
+
+/*
+ * Called from kernel_init( ) -> smp_init( ) - for each CPU
+ *
+ * At this point, Secondary Processor is "HALT"ed:
+ * -It booted, but was halted in head.S
+ * -It was configured to halt-on-reset
+ * So need to wake it up.
+ *
+ * Essential requirements being where to run from (PC) and stack (SP)
+*/
+int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long wait_till;
+
+ secondary_idle_tsk = idle;
+
+ pr_info("Idle Task [%d] %p", cpu, idle);
+ pr_info("Trying to bring up CPU%u ...\n", cpu);
+
+ if (plat_smp_ops.cpu_kick)
+ plat_smp_ops.cpu_kick(cpu,
+ (unsigned long)first_lines_of_secondary);
+
+ /* wait for 1 sec after kicking the secondary */
+ wait_till = jiffies + HZ;
+ while (time_before(jiffies, wait_till)) {
+ if (cpu_online(cpu))
+ break;
+ }
+
+ if (!cpu_online(cpu)) {
+ pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
+ return -1;
+ }
+
+ secondary_idle_tsk = NULL;
+
+ return 0;
+}
+
+/*
+ * not supported here
+ */
+int __init setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+/*****************************************************************************/
+/* Inter Processor Interrupt Handling */
+/*****************************************************************************/
+
+/*
+ * structures for inter-processor calls
+ * A Collection of single bit ipi messages
+ *
+ */
+
+/*
+ * TODO_rajesh investigate tlb message types.
+ * IPI Timer not needed because each ARC has an individual Interrupting Timer
+ */
+enum ipi_msg_type {
+ IPI_NOP = 0,
+ IPI_RESCHEDULE = 1,
+ IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
+ IPI_CPU_STOP
+};
+
+struct ipi_data {
+ unsigned long bits;
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
+{
+ unsigned long flags;
+ unsigned int cpu;
+
+ local_irq_save(flags);
+
+ for_each_cpu(cpu, callmap) {
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ set_bit(msg, &ipi->bits);
+ }
+
+ /* Call the platform specific cross-CPU call function */
+ if (plat_smp_ops.ipi_send)
+ plat_smp_ops.ipi_send((void *)callmap);
+
+ local_irq_restore(flags);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ ipi_send_msg(&targets, IPI_CPU_STOP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ ipi_send_msg(mask, IPI_CALL_FUNC);
+}
+
+/*
+ * ipi_cpu_stop - handle IPI from smp_send_stop()
+ */
+static void ipi_cpu_stop(unsigned int cpu)
+{
+ machine_halt();
+}
+
+static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu)
+{
+ unsigned long msg = 0;
+
+ do {
+ msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
+
+ switch (msg) {
+ case IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+
+ case IPI_CPU_STOP:
+ ipi_cpu_stop(cpu);
+ break;
+ }
+ } while (msg < BITS_PER_LONG);
+
+}
+
+/*
+ * arch-common ISR to handle for inter-processor interrupts
+ * Has hooks for platform specific IPI
+ */
+irqreturn_t do_IPI(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ unsigned long ops;
+
+ if (plat_smp_ops.ipi_clear)
+ plat_smp_ops.ipi_clear(cpu, irq);
+
+ /*
+ * XXX: is this loop really needed
+ * And do we need to move ipi_clean inside
+ */
+ while ((ops = xchg(&ipi->bits, 0)) != 0)
+ __do_IPI(&ops, ipi, cpu);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * API called by platform code to hookup arch-common ISR to their IPI IRQ
+ */
+static DEFINE_PER_CPU(int, ipi_dev);
+int smp_ipi_irq_setup(int cpu, int irq)
+{
+ int *dev_id = &per_cpu(ipi_dev, smp_processor_id());
+ return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id);
+}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
new file mode 100644
index 000000000000..a63ff842564b
--- /dev/null
+++ b/arch/arc/kernel/stacktrace.c
@@ -0,0 +1,254 @@
+/*
+ * stacktrace.c : stacktracing APIs needed by rest of kernel
+ * (wrappers over ARC dwarf based unwinder)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: aug 2009
+ * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
+ * for displaying task's kernel mode call stack in /proc/<pid>/stack
+ * -Iterator based approach to have single copy of unwinding core and APIs
+ * needing unwinding, implement the logic in iterator regarding:
+ * = which frame onwards to start capture
+ * = which frame to stop capturing (wchan)
+ * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
+ *
+ * vineetg: March 2009
+ * -Implemented correct versions of thread_saved_pc() and get_wchan()
+ *
+ * rajeshwarr: 2008
+ * -Initial implementation
+ */
+
+#include <linux/ptrace.h>
+#include <linux/export.h>
+#include <linux/stacktrace.h>
+#include <linux/kallsyms.h>
+#include <asm/arcregs.h>
+#include <asm/unwind.h>
+#include <asm/switch_to.h>
+
+/*-------------------------------------------------------------------------
+ * Unwinder Iterator
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+static void seed_unwind_frame_info(struct task_struct *tsk,
+ struct pt_regs *regs,
+ struct unwind_frame_info *frame_info)
+{
+ if (tsk == NULL && regs == NULL) {
+ unsigned long fp, sp, blink, ret;
+ frame_info->task = current;
+
+ __asm__ __volatile__(
+ "mov %0,r27\n\t"
+ "mov %1,r28\n\t"
+ "mov %2,r31\n\t"
+ "mov %3,r63\n\t"
+ : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
+ );
+
+ frame_info->regs.r27 = fp;
+ frame_info->regs.r28 = sp;
+ frame_info->regs.r31 = blink;
+ frame_info->regs.r63 = ret;
+ frame_info->call_frame = 0;
+ } else if (regs == NULL) {
+
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = KSTK_FP(tsk);
+ frame_info->regs.r28 = KSTK_ESP(tsk);
+ frame_info->regs.r31 = KSTK_BLINK(tsk);
+ frame_info->regs.r63 = (unsigned int)__switch_to;
+
+ /* In the prologue of __switch_to, first FP is saved on stack
+ * and then SP is copied to FP. Dwarf assumes cfa as FP based
+ * but we didn't save FP. The value retrieved above is FP's
+ * state in previous frame.
+ * As a work around for this, we unwind from __switch_to start
+ * and adjust SP accordingly. The other limitation is that
+ * __switch_to macro is dwarf rules are not generated for inline
+ * assembly code
+ */
+ frame_info->regs.r27 = 0;
+ frame_info->regs.r28 += 64;
+ frame_info->call_frame = 0;
+
+ } else {
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = regs->fp;
+ frame_info->regs.r28 = regs->sp;
+ frame_info->regs.r31 = regs->blink;
+ frame_info->regs.r63 = regs->ret;
+ frame_info->call_frame = 0;
+ }
+}
+
+#endif
+
+static noinline unsigned int
+arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ int (*consumer_fn) (unsigned int, void *), void *arg)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ int ret = 0;
+ unsigned int address;
+ struct unwind_frame_info frame_info;
+
+ seed_unwind_frame_info(tsk, regs, &frame_info);
+
+ while (1) {
+ address = UNW_PC(&frame_info);
+
+ if (address && __kernel_text_address(address)) {
+ if (consumer_fn(address, arg) == -1)
+ break;
+ }
+
+ ret = arc_unwind(&frame_info);
+
+ if (ret == 0) {
+ frame_info.regs.r63 = frame_info.regs.r31;
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ return address; /* return the last address it saw */
+#else
+ /* On ARC, only Dward based unwinder works. fp based backtracing is
+ * not possible (-fno-omit-frame-pointer) because of the way function
+ * prelogue is setup (callee regs saved and then fp set and not other
+ * way around
+ */
+ pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ return 0;
+
+#endif
+}
+
+/*-------------------------------------------------------------------------
+ * callbacks called by unwinder iterator to implement kernel APIs
+ *
+ * The callback can return -1 to force the iterator to stop, which by default
+ * keeps going till the bottom-most frame.
+ *-------------------------------------------------------------------------
+ */
+
+/* Call-back which plugs into unwinding core to dump the stack in
+ * case of panic/OOPs/BUG etc
+ */
+static int __print_sym(unsigned int address, void *unused)
+{
+ __print_symbol(" %s\n", address);
+ return 0;
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/* Call-back which plugs into unwinding core to capture the
+ * traces needed by kernel on /proc/<pid>/stack
+ */
+static int __collect_all(unsigned int address, void *arg)
+{
+ struct stack_trace *trace = arg;
+
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = address;
+
+ if (trace->nr_entries >= trace->max_entries)
+ return -1;
+
+ return 0;
+}
+
+static int __collect_all_but_sched(unsigned int address, void *arg)
+{
+ struct stack_trace *trace = arg;
+
+ if (in_sched_functions(address))
+ return 0;
+
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = address;
+
+ if (trace->nr_entries >= trace->max_entries)
+ return -1;
+
+ return 0;
+}
+
+#endif
+
+static int __get_first_nonsched(unsigned int address, void *unused)
+{
+ if (in_sched_functions(address))
+ return 0;
+
+ return -1;
+}
+
+/*-------------------------------------------------------------------------
+ * APIs expected by various kernel sub-systems
+ *-------------------------------------------------------------------------
+ */
+
+noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
+{
+ pr_info("\nStack Trace:\n");
+ arc_unwind_core(tsk, regs, __print_sym, NULL);
+}
+EXPORT_SYMBOL(show_stacktrace);
+
+/* Expected by sched Code */
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ show_stacktrace(tsk, NULL);
+}
+
+/* Expected by Rest of kernel code */
+void dump_stack(void)
+{
+ show_stacktrace(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+/* Another API expected by schedular, shows up in "ps" as Wait Channel
+ * Ofcourse just returning schedule( ) would be pointless so unwind until
+ * the function is not in schedular code
+ */
+unsigned int get_wchan(struct task_struct *tsk)
+{
+ return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/*
+ * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
+ * A typical use is when /proc/<pid>/stack is queried by userland
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ arc_unwind_core(current, NULL, __collect_all, trace);
+}
+#endif
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
new file mode 100644
index 000000000000..f6bdd07583f3
--- /dev/null
+++ b/arch/arc/kernel/sys.c
@@ -0,0 +1,18 @@
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#define sys_clone sys_clone_wrapper
+#define sys_fork sys_fork_wrapper
+#define sys_vfork sys_vfork_wrapper
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[NR_syscalls] = {
+ [0 ... NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
new file mode 100644
index 000000000000..f13f72807aa5
--- /dev/null
+++ b/arch/arc/kernel/time.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Jan 1011
+ * -sched_clock( ) no longer jiffies based. Uses the same clocksource
+ * as gtod
+ *
+ * Rajeshwarr/Vineetg: Mar 2008
+ * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
+ * for arch independent gettimeofday()
+ * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
+ *
+ * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
+ */
+
+/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
+ * Each can programmed to go from @count to @limit and optionally
+ * interrupt when that happens.
+ * A write to Control Register clears the Interrupt
+ *
+ * We've designated TIMER0 for events (clockevents)
+ * while TIMER1 for free running (clocksource)
+ *
+ * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
+ */
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <asm/irq.h>
+#include <asm/arcregs.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+#define ARC_TIMER_MAX 0xFFFFFFFF
+
+/********** Clock Source Device *********/
+
+#ifdef CONFIG_ARC_HAS_RTSC
+
+int __cpuinit arc_counter_setup(void)
+{
+ /* RTSC insn taps into cpu clk, needs no setup */
+
+ /* For SMP, only allowed if cross-core-sync, hence usable as cs */
+ return 1;
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+ unsigned long flags;
+ union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ struct { u32 high, low; };
+#else
+ struct { u32 low, high; };
+#endif
+ cycle_t full;
+ } stamp;
+
+ flags = arch_local_irq_save();
+
+ __asm__ __volatile(
+ " .extCoreRegister tsch, 58, r, cannot_shortcut \n"
+ " rtsc %0, 0 \n"
+ " mov %1, 0 \n"
+ : "=r" (stamp.low), "=r" (stamp.high));
+
+ arch_local_irq_restore(flags);
+
+ return stamp.full;
+}
+
+static struct clocksource arc_counter = {
+ .name = "ARC RTSC",
+ .rating = 300,
+ .read = arc_counter_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#else /* !CONFIG_ARC_HAS_RTSC */
+
+static bool is_usable_as_clocksource(void)
+{
+#ifdef CONFIG_SMP
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+/*
+ * set 32bit TIMER1 to keep counting monotonically and wraparound
+ */
+int __cpuinit arc_counter_setup(void)
+{
+ write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
+ write_aux_reg(ARC_REG_TIMER1_CNT, 0);
+ write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+
+ return is_usable_as_clocksource();
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+ return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
+}
+
+static struct clocksource arc_counter = {
+ .name = "ARC Timer1",
+ .rating = 300,
+ .read = arc_counter_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#endif
+
+/********** Clock Event Device *********/
+
+/*
+ * Arm the timer to interrupt after @limit cycles
+ * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
+ */
+static void arc_timer_event_setup(unsigned int limit)
+{
+ write_aux_reg(ARC_REG_TIMER0_LIMIT, limit);
+ write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
+
+ write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+}
+
+/*
+ * Acknowledge the interrupt (oneshot) and optionally re-arm it (periodic)
+ * -Any write to CTRL Reg will ack the intr (NH bit: Count when not halted)
+ * -Rearming is done by setting the IE bit
+ *
+ * Small optimisation: Normal code would have been
+ * if (irq_reenable)
+ * CTRL_REG = (IE | NH);
+ * else
+ * CTRL_REG = NH;
+ * However since IE is BIT0 we can fold the branch
+ */
+static void arc_timer_event_ack(unsigned int irq_reenable)
+{
+ write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+}
+
+static int arc_clkevent_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ arc_timer_event_setup(delta);
+ return 0;
+}
+
+static void arc_clkevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ arc_timer_event_setup(arc_get_core_freq() / HZ);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
+ .name = "ARC Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .mode = CLOCK_EVT_MODE_UNUSED,
+ .rating = 300,
+ .irq = TIMER0_IRQ, /* hardwired, no need for resources */
+ .set_next_event = arc_clkevent_set_next_event,
+ .set_mode = arc_clkevent_set_mode,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device);
+
+ arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC);
+ clk->event_handler(clk);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction arc_timer_irq = {
+ .name = "Timer0 (clock-evt-dev)",
+ .flags = IRQF_TIMER | IRQF_PERCPU,
+ .handler = timer_irq_handler,
+};
+
+/*
+ * Setup the local event timer for @cpu
+ * N.B. weak so that some exotic ARC SoCs can completely override it
+ */
+void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu)
+{
+ struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
+
+ clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
+
+ clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
+ clk->cpumask = cpumask_of(cpu);
+
+ clockevents_register_device(clk);
+
+ /*
+ * setup the per-cpu timer IRQ handler - for all cpus
+ * For non boot CPU explicitly unmask at intc
+ * setup_irq() -> .. -> irq_startup() already does this on boot-cpu
+ */
+ if (!cpu)
+ setup_irq(TIMER0_IRQ, &arc_timer_irq);
+ else
+ arch_unmask_irq(TIMER0_IRQ);
+}
+
+/*
+ * Called from start_kernel() - boot CPU only
+ *
+ * -Sets up h/w timers as applicable on boot cpu
+ * -Also sets up any global state needed for timer subsystem:
+ * - for "counting" timer, registers a clocksource, usable across CPUs
+ * (provided that underlying counter h/w is synchronized across cores)
+ * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
+ */
+void __init time_init(void)
+{
+ /*
+ * sets up the timekeeping free-flowing counter which also returns
+ * whether the counter is usable as clocksource
+ */
+ if (arc_counter_setup())
+ /*
+ * CLK upto 4.29 GHz can be safely represented in 32 bits
+ * because Max 32 bit number is 4,294,967,295
+ */
+ clocksource_register_hz(&arc_counter, arc_get_core_freq());
+
+ /* sets up the periodic event timer */
+ arc_local_timer_setup(smp_processor_id());
+
+ if (machine_desc->init_time)
+ machine_desc->init_time();
+}
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
new file mode 100644
index 000000000000..7496995371e8
--- /dev/null
+++ b/arch/arc/kernel/traps.c
@@ -0,0 +1,170 @@
+/*
+ * Traps/Non-MMU Exception handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -user-space unaligned access emulation
+ *
+ * Rahul Trivedi: Codito Technologies 2004
+ */
+
+#include <linux/sched.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+#include <asm/kprobes.h>
+#include <asm/unaligned.h>
+#include <asm/kgdb.h>
+
+void __init trap_init(void)
+{
+ return;
+}
+
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+ unsigned long cause_reg)
+{
+ show_kernel_fault_diag(str, regs, address, cause_reg);
+
+ /* DEAD END */
+ __asm__("flag 1");
+}
+
+/*
+ * Helper called for bulk of exceptions NOT needing specific handling
+ * -for user faults enqueues requested signal
+ * -for kernel, chk if due to copy_(to|from)_user, otherwise die()
+ */
+static noinline int handle_exception(unsigned long cause, char *str,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ if (user_mode(regs)) {
+ struct task_struct *tsk = current;
+
+ tsk->thread.fault_address = (__force unsigned int)info->si_addr;
+ tsk->thread.cause_code = cause;
+
+ force_sig_info(info->si_signo, info, tsk);
+
+ } else {
+ /* If not due to copy_(to|from)_user, we are doomed */
+ if (fixup_exception(regs))
+ return 0;
+
+ die(str, regs, (unsigned long)info->si_addr, cause);
+ }
+
+ return 1;
+}
+
+#define DO_ERROR_INFO(signr, str, name, sicode) \
+int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \
+{ \
+ siginfo_t info = { \
+ .si_signo = signr, \
+ .si_errno = 0, \
+ .si_code = sicode, \
+ .si_addr = (void __user *)address, \
+ }; \
+ return handle_exception(cause, str, regs, &info);\
+}
+
+/*
+ * Entry points for exceptions NOT needing specific handling
+ */
+DO_ERROR_INFO(SIGILL, "Priv Op/Disabled Extn", do_privilege_fault, ILL_PRVOPC)
+DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC)
+DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
+DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR)
+DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+/*
+ * Entry Point for Misaligned Data access Exception, for emulating in software
+ */
+int do_misaligned_access(unsigned long cause, unsigned long address,
+ struct pt_regs *regs, struct callee_regs *cregs)
+{
+ if (misaligned_fixup(address, regs, cause, cregs) != 0) {
+ siginfo_t info;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void __user *)address;
+ return handle_exception(cause, "Misaligned Access", regs,
+ &info);
+ }
+ return 0;
+}
+
+#else
+DO_ERROR_INFO(SIGSEGV, "Misaligned Access", do_misaligned_access, SEGV_ACCERR)
+#endif
+
+/*
+ * Entry point for miscll errors such as Nested Exceptions
+ * -Duplicate TLB entry is handled seperately though
+ */
+void do_machine_check_fault(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ die("Machine Check Exception", regs, address, cause);
+}
+
+
+/*
+ * Entry point for traps induced by ARCompact TRAP_S <n> insn
+ * This is same family as TRAP0/SWI insn (use the same vector).
+ * The only difference being SWI insn take no operand, while TRAP_S does
+ * which reflects in ECR Reg as 8 bit param.
+ * Thus TRAP_S <n> can be used for specific purpose
+ * -1 used for software breakpointing (gdb)
+ * -2 used by kprobes
+ */
+void do_non_swi_trap(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ unsigned int param = cause & 0xff;
+
+ switch (param) {
+ case 1:
+ trap_is_brkpt(cause, address, regs);
+ break;
+
+ case 2:
+ trap_is_kprobe(param, address, regs);
+ break;
+
+ case 3:
+ case 4:
+ kgdb_trap(regs, param);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Entry point for Instruction Error Exception
+ * -For a corner case, ARC kprobes implementation resorts to using
+ * this exception, hence the check
+ */
+void do_insterror_or_kprobe(unsigned long cause,
+ unsigned long address,
+ struct pt_regs *regs)
+{
+ /* Check if this exception is caused by kprobes */
+ if (notify_die(DIE_IERR, "kprobe_ierr", regs, address,
+ cause, SIGILL) == NOTIFY_STOP)
+ return;
+
+ insterror_is_error(cause, address, regs);
+}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
new file mode 100644
index 000000000000..7c10873c311f
--- /dev/null
+++ b/arch/arc/kernel/troubleshoot.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ */
+
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/fs_struct.h>
+#include <linux/proc_fs.h>
+#include <linux/file.h>
+#include <asm/arcregs.h>
+
+/*
+ * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
+ * -Prints 3 regs per line and a CR.
+ * -To continue, callee regs right after scratch, special handling of CR
+ */
+static noinline void print_reg_file(long *reg_rev, int start_num)
+{
+ unsigned int i;
+ char buf[512];
+ int n = 0, len = sizeof(buf);
+
+ /* weird loop because pt_regs regs rev r12..r0, r25..r13 */
+ for (i = start_num; i < start_num + 13; i++) {
+ n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
+ i, (unsigned long)*reg_rev);
+
+ if (((i + 1) % 3) == 0)
+ n += scnprintf(buf + n, len - n, "\n");
+
+ reg_rev--;
+ }
+
+ if (start_num != 0)
+ n += scnprintf(buf + n, len - n, "\n\n");
+
+ pr_info("%s", buf);
+}
+
+static void show_callee_regs(struct callee_regs *cregs)
+{
+ print_reg_file(&(cregs->r13), 13);
+}
+
+void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+{
+ struct path path;
+ char *path_nm = NULL;
+ struct mm_struct *mm;
+ struct file *exe_file;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ goto done;
+
+ exe_file = get_mm_exe_file(mm);
+ mmput(mm);
+
+ if (exe_file) {
+ path = exe_file->f_path;
+ path_get(&exe_file->f_path);
+ fput(exe_file);
+ path_nm = d_path(&path, buf, 255);
+ path_put(&path);
+ }
+
+done:
+ pr_info("%s, TGID %u\n", path_nm, tsk->tgid);
+}
+EXPORT_SYMBOL(print_task_path_n_nm);
+
+static void show_faulting_vma(unsigned long address, char *buf)
+{
+ struct vm_area_struct *vma;
+ struct inode *inode;
+ unsigned long ino = 0;
+ dev_t dev = 0;
+ char *nm = buf;
+
+ vma = find_vma(current->active_mm, address);
+
+ /* check against the find_vma( ) behaviour which returns the next VMA
+ * if the container VMA is not found
+ */
+ if (vma && (vma->vm_start <= address)) {
+ struct file *file = vma->vm_file;
+ if (file) {
+ struct path *path = &file->f_path;
+ nm = d_path(path, buf, PAGE_SIZE - 1);
+ inode = vma->vm_file->f_path.dentry->d_inode;
+ dev = inode->i_sb->s_dev;
+ ino = inode->i_ino;
+ }
+ pr_info(" @off 0x%lx in [%s]\n"
+ " VMA: 0x%08lx to 0x%08lx\n\n",
+ address - vma->vm_start, nm, vma->vm_start, vma->vm_end);
+ } else
+ pr_info(" @No matching VMA found\n");
+}
+
+static void show_ecr_verbose(struct pt_regs *regs)
+{
+ unsigned int vec, cause_code, cause_reg;
+ unsigned long address;
+
+ cause_reg = current->thread.cause_code;
+ pr_info("\n[ECR]: 0x%08x => ", cause_reg);
+
+ /* For Data fault, this is data address not instruction addr */
+ address = current->thread.fault_address;
+
+ vec = cause_reg >> 16;
+ cause_code = (cause_reg >> 8) & 0xFF;
+
+ /* For DTLB Miss or ProtV, display the memory involved too */
+ if (vec == ECR_V_DTLB_MISS) {
+ pr_cont("Invalid (%s) @ 0x%08lx by insn @ 0x%08lx\n",
+ (cause_code == 0x01) ? "Read From" :
+ ((cause_code == 0x02) ? "Write to" : "EX"),
+ address, regs->ret);
+ } else if (vec == ECR_V_ITLB_MISS) {
+ pr_cont("Insn could not be fetched\n");
+ } else if (vec == ECR_V_MACH_CHK) {
+ pr_cont("%s\n", (cause_code == 0x0) ?
+ "Double Fault" : "Other Fatal Err");
+
+ } else if (vec == ECR_V_PROTV) {
+ if (cause_code == ECR_C_PROTV_INST_FETCH)
+ pr_cont("Execute from Non-exec Page\n");
+ else if (cause_code == ECR_C_PROTV_LOAD)
+ pr_cont("Read from Non-readable Page\n");
+ else if (cause_code == ECR_C_PROTV_STORE)
+ pr_cont("Write to Non-writable Page\n");
+ else if (cause_code == ECR_C_PROTV_XCHG)
+ pr_cont("Data exchange protection violation\n");
+ else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
+ pr_cont("Misaligned r/w from 0x%08lx\n", address);
+ } else if (vec == ECR_V_INSN_ERR) {
+ pr_cont("Illegal Insn\n");
+ } else {
+ pr_cont("Check Programmer's Manual\n");
+ }
+}
+
+/************************************************************************
+ * API called by rest of kernel
+ ***********************************************************************/
+
+void show_regs(struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+ struct callee_regs *cregs;
+ char *buf;
+
+ buf = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!buf)
+ return;
+
+ print_task_path_n_nm(tsk, buf);
+
+ if (current->thread.cause_code)
+ show_ecr_verbose(regs);
+
+ pr_info("[EFA]: 0x%08lx\n", current->thread.fault_address);
+ pr_info("[ERET]: 0x%08lx (PC of Faulting Instr)\n", regs->ret);
+
+ show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+
+ /* can't use print_vma_addr() yet as it doesn't check for
+ * non-inclusive vma
+ */
+
+ /* print special regs */
+ pr_info("status32: 0x%08lx\n", regs->status32);
+ pr_info(" SP: 0x%08lx\tFP: 0x%08lx\n", regs->sp, regs->fp);
+ pr_info("BTA: 0x%08lx\tBLINK: 0x%08lx\n",
+ regs->bta, regs->blink);
+ pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
+
+ /* print regs->r0 thru regs->r12
+ * Sequential printing was generating horrible code
+ */
+ print_reg_file(&(regs->r0), 0);
+
+ /* If Callee regs were saved, display them too */
+ cregs = (struct callee_regs *)current->thread.callee_reg;
+ if (cregs)
+ show_callee_regs(cregs);
+
+ free_page((unsigned long)buf);
+}
+
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address, unsigned long cause_reg)
+{
+ current->thread.fault_address = address;
+ current->thread.cause_code = cause_reg;
+
+ /* Caller and Callee regs */
+ show_regs(regs);
+
+ /* Show stack trace if this Fatality happened in kernel mode */
+ if (!user_mode(regs))
+ show_stacktrace(current, regs);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/debugfs.h>
+
+static struct dentry *test_dentry;
+static struct dentry *test_dir;
+static struct dentry *test_u32_dentry;
+
+static u32 clr_on_read = 1;
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+u32 numitlb, numdtlb, num_pte_not_present;
+
+static int fill_display_data(char *kbuf)
+{
+ size_t num = 0;
+ num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
+ num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
+ num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
+
+ if (clr_on_read)
+ numitlb = numdtlb = num_pte_not_present = 0;
+
+ return num;
+}
+
+static int tlb_stats_open(struct inode *inode, struct file *file)
+{
+ file->private_data = (void *)__get_free_page(GFP_KERNEL);
+ return 0;
+}
+
+/* called on user read(): display the couters */
+static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
+ char __user *user_buf, /* user buffer */
+ size_t len, /* length of buffer */
+ loff_t *offset) /* offset in the file */
+{
+ size_t num;
+ char *kbuf = (char *)file->private_data;
+
+ /* All of the data can he shoved in one iteration */
+ if (*offset != 0)
+ return 0;
+
+ num = fill_display_data(kbuf);
+
+ /* simple_read_from_buffer() is helper for copy to user space
+ It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
+ @3 (offset) into the user space address starting at @1 (user_buf).
+ @5 (len) is max size of user buffer
+ */
+ return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
+}
+
+/* called on user write : clears the counters */
+static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
+ size_t length, loff_t *offset)
+{
+ numitlb = numdtlb = num_pte_not_present = 0;
+ return length;
+}
+
+static int tlb_stats_close(struct inode *inode, struct file *file)
+{
+ free_page((unsigned long)(file->private_data));
+ return 0;
+}
+
+static const struct file_operations tlb_stats_file_ops = {
+ .read = tlb_stats_output,
+ .write = tlb_stats_clear,
+ .open = tlb_stats_open,
+ .release = tlb_stats_close
+};
+#endif
+
+static int __init arc_debugfs_init(void)
+{
+ test_dir = debugfs_create_dir("arc", NULL);
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
+ &tlb_stats_file_ops);
+#endif
+
+ test_u32_dentry =
+ debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
+
+ return 0;
+}
+
+module_init(arc_debugfs_init);
+
+static void __exit arc_debugfs_exit(void)
+{
+ debugfs_remove(test_u32_dentry);
+ debugfs_remove(test_dentry);
+ debugfs_remove(test_dir);
+}
+module_exit(arc_debugfs_exit);
+
+#endif
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
new file mode 100644
index 000000000000..4cd81633febd
--- /dev/null
+++ b/arch/arc/kernel/unaligned.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg : May 2011
+ * -Adapted (from .26 to .35)
+ * -original contribution by Tim.yao@amlogic.com
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <asm/disasm.h>
+
+#define __get8_unaligned_check(val, addr, err) \
+ __asm__( \
+ "1: ldb.ab %1, [%2, 1]\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, 1\n" \
+ " b 2b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 3b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (val), "=r" (addr) \
+ : "0" (err), "2" (addr))
+
+#define get16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_unaligned_check(v, a, err); \
+ val = v ; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define get32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_unaligned_check(v, a, err); \
+ val = v << 0; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 16; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 24; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr;\
+ \
+ __asm__( \
+ "1: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "2: stb %1, [%2]\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "4: mov %0, 1\n" \
+ " b 3b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr;\
+ __asm__( \
+ \
+ "1: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "2: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "3: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "4: stb %1, [%2]\n" \
+ "5:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "6: mov %0, 1\n" \
+ " b 5b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 6b\n" \
+ " .long 2b, 6b\n" \
+ " .long 3b, 6b\n" \
+ " .long 4b, 6b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+/* sysctl hooks */
+int unaligned_enabled __read_mostly = 1; /* Enabled by default */
+int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
+
+static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ int val;
+
+ /* register write back */
+ if ((state->aa == 1) || (state->aa == 2)) {
+ set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
+
+ if (state->aa == 2)
+ state->src2 = 0;
+ }
+
+ if (state->zz == 0) {
+ get32_unaligned_check(val, state->src1 + state->src2);
+ } else {
+ get16_unaligned_check(val, state->src1 + state->src2);
+
+ if (state->x)
+ val = (val << 16) >> 16;
+ }
+
+ if (state->pref == 0)
+ set_reg(state->dest, val, regs, cregs);
+
+ return;
+
+fault: state->fault = 1;
+}
+
+static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ /* register write back */
+ if ((state->aa == 1) || (state->aa == 2)) {
+ set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
+
+ if (state->aa == 3)
+ state->src3 = 0;
+ } else if (state->aa == 3) {
+ if (state->zz == 2) {
+ set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
+ regs, cregs);
+ } else if (!state->zz) {
+ set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
+ regs, cregs);
+ } else {
+ goto fault;
+ }
+ }
+
+ /* write fix-up */
+ if (!state->zz)
+ put32_unaligned_check(state->src1, state->src2 + state->src3);
+ else
+ put16_unaligned_check(state->src1, state->src2 + state->src3);
+
+ return;
+
+fault: state->fault = 1;
+}
+
+/*
+ * Handle an unaligned access
+ * Returns 0 if successfully handled, 1 if some error happened
+ */
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs)
+{
+ struct disasm_state state;
+ char buf[TASK_COMM_LEN];
+
+ /* handle user mode only and only if enabled by sysadmin */
+ if (!user_mode(regs) || !unaligned_enabled)
+ return 1;
+
+ if (no_unaligned_warning) {
+ pr_warn_once("%s(%d) made unaligned access which was emulated"
+ " by kernel assist\n. This can degrade application"
+ " performance significantly\n. To enable further"
+ " logging of such instances, please \n"
+ " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
+ get_task_comm(buf, current), task_pid_nr(current));
+ } else {
+ /* Add rate limiting if it gets down to it */
+ pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
+ get_task_comm(buf, current), task_pid_nr(current),
+ address, regs->ret);
+
+ }
+
+ disasm_instr(regs->ret, &state, 1, regs, cregs);
+
+ if (state.fault)
+ goto fault;
+
+ /* ldb/stb should not have unaligned exception */
+ if ((state.zz == 1) || (state.di))
+ goto fault;
+
+ if (!state.write)
+ fixup_load(&state, regs, cregs);
+ else
+ fixup_store(&state, regs, cregs);
+
+ if (state.fault)
+ goto fault;
+
+ if (delay_mode(regs)) {
+ regs->ret = regs->bta;
+ regs->status32 &= ~STATUS_DE_MASK;
+ } else {
+ regs->ret += state.instr_len;
+ }
+
+ return 0;
+
+fault:
+ pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
+ state.words[0], address);
+
+ return 1;
+}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
new file mode 100644
index 000000000000..a8d02223da44
--- /dev/null
+++ b/arch/arc/kernel/unwind.c
@@ -0,0 +1,1329 @@
+/*
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2002-2006 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whistles, so there
+ * is not much point in implementing the full Dwarf2 unwind API.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <asm/unwind.h>
+
+extern char __start_unwind[], __end_unwind[];
+/* extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];*/
+
+/* #define UNWIND_DEBUG */
+
+#ifdef UNWIND_DEBUG
+int dbg_unw;
+#define unw_debug(fmt, ...) \
+do { \
+ if (dbg_unw) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0);
+#else
+#define unw_debug(fmt, ...)
+#endif
+
+#define MAX_STACK_DEPTH 8
+
+#define EXTRA_INFO(f) { \
+ BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
+ % FIELD_SIZEOF(struct unwind_frame_info, f)) \
+ + offsetof(struct unwind_frame_info, f) \
+ / FIELD_SIZEOF(struct unwind_frame_info, f), \
+ FIELD_SIZEOF(struct unwind_frame_info, f) \
+ }
+#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
+
+static const struct {
+ unsigned offs:BITS_PER_LONG / 2;
+ unsigned width:BITS_PER_LONG / 2;
+} reg_info[] = {
+UNW_REGISTER_INFO};
+
+#undef PTREGS_INFO
+#undef EXTRA_INFO
+
+#ifndef REG_INVALID
+#define REG_INVALID(r) (reg_info[r].width == 0)
+#endif
+
+#define DW_CFA_nop 0x00
+#define DW_CFA_set_loc 0x01
+#define DW_CFA_advance_loc1 0x02
+#define DW_CFA_advance_loc2 0x03
+#define DW_CFA_advance_loc4 0x04
+#define DW_CFA_offset_extended 0x05
+#define DW_CFA_restore_extended 0x06
+#define DW_CFA_undefined 0x07
+#define DW_CFA_same_value 0x08
+#define DW_CFA_register 0x09
+#define DW_CFA_remember_state 0x0a
+#define DW_CFA_restore_state 0x0b
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_register 0x0d
+#define DW_CFA_def_cfa_offset 0x0e
+#define DW_CFA_def_cfa_expression 0x0f
+#define DW_CFA_expression 0x10
+#define DW_CFA_offset_extended_sf 0x11
+#define DW_CFA_def_cfa_sf 0x12
+#define DW_CFA_def_cfa_offset_sf 0x13
+#define DW_CFA_val_offset 0x14
+#define DW_CFA_val_offset_sf 0x15
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_lo_user 0x1c
+#define DW_CFA_GNU_window_save 0x2d
+#define DW_CFA_GNU_args_size 0x2e
+#define DW_CFA_GNU_negative_offset_extended 0x2f
+#define DW_CFA_hi_user 0x3f
+
+#define DW_EH_PE_FORM 0x07
+#define DW_EH_PE_native 0x00
+#define DW_EH_PE_leb128 0x01
+#define DW_EH_PE_data2 0x02
+#define DW_EH_PE_data4 0x03
+#define DW_EH_PE_data8 0x04
+#define DW_EH_PE_signed 0x08
+#define DW_EH_PE_ADJUST 0x70
+#define DW_EH_PE_abs 0x00
+#define DW_EH_PE_pcrel 0x10
+#define DW_EH_PE_textrel 0x20
+#define DW_EH_PE_datarel 0x30
+#define DW_EH_PE_funcrel 0x40
+#define DW_EH_PE_aligned 0x50
+#define DW_EH_PE_indirect 0x80
+#define DW_EH_PE_omit 0xff
+
+typedef unsigned long uleb128_t;
+typedef signed long sleb128_t;
+
+static struct unwind_table {
+ struct {
+ unsigned long pc;
+ unsigned long range;
+ } core, init;
+ const void *address;
+ unsigned long size;
+ const unsigned char *header;
+ unsigned long hdrsz;
+ struct unwind_table *link;
+ const char *name;
+} root_table;
+
+struct unwind_item {
+ enum item_location {
+ Nowhere,
+ Memory,
+ Register,
+ Value
+ } where;
+ uleb128_t value;
+};
+
+struct unwind_state {
+ uleb128_t loc, org;
+ const u8 *cieStart, *cieEnd;
+ uleb128_t codeAlign;
+ sleb128_t dataAlign;
+ struct cfa {
+ uleb128_t reg, offs;
+ } cfa;
+ struct unwind_item regs[ARRAY_SIZE(reg_info)];
+ unsigned stackDepth:8;
+ unsigned version:8;
+ const u8 *label;
+ const u8 *stack[MAX_STACK_DEPTH];
+};
+
+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
+
+static struct unwind_table *find_table(unsigned long pc)
+{
+ struct unwind_table *table;
+
+ for (table = &root_table; table; table = table->link)
+ if ((pc >= table->core.pc
+ && pc < table->core.pc + table->core.range)
+ || (pc >= table->init.pc
+ && pc < table->init.pc + table->init.range))
+ break;
+
+ return table;
+}
+
+static unsigned long read_pointer(const u8 **pLoc,
+ const void *end, signed ptrType);
+
+static void init_unwind_table(struct unwind_table *table, const char *name,
+ const void *core_start, unsigned long core_size,
+ const void *init_start, unsigned long init_size,
+ const void *table_start, unsigned long table_size,
+ const u8 *header_start, unsigned long header_size)
+{
+ const u8 *ptr = header_start + 4;
+ const u8 *end = header_start + header_size;
+
+ table->core.pc = (unsigned long)core_start;
+ table->core.range = core_size;
+ table->init.pc = (unsigned long)init_start;
+ table->init.range = init_size;
+ table->address = table_start;
+ table->size = table_size;
+
+ /* See if the linker provided table looks valid. */
+ if (header_size <= 4
+ || header_start[0] != 1
+ || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
+ || header_start[2] == DW_EH_PE_omit
+ || read_pointer(&ptr, end, header_start[2]) <= 0
+ || header_start[3] == DW_EH_PE_omit)
+ header_start = NULL;
+
+ table->hdrsz = header_size;
+ smp_wmb();
+ table->header = header_start;
+ table->link = NULL;
+ table->name = name;
+}
+
+void __init arc_unwind_init(void)
+{
+ init_unwind_table(&root_table, "kernel", _text, _end - _text, NULL, 0,
+ __start_unwind, __end_unwind - __start_unwind,
+ NULL, 0);
+ /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
+}
+
+static const u32 bad_cie, not_fde;
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
+static signed fde_pointer_type(const u32 *cie);
+
+struct eh_frame_hdr_table_entry {
+ unsigned long start, fde;
+};
+
+static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
+{
+ const struct eh_frame_hdr_table_entry *e1 = p1;
+ const struct eh_frame_hdr_table_entry *e2 = p2;
+
+ return (e1->start > e2->start) - (e1->start < e2->start);
+}
+
+static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
+{
+ struct eh_frame_hdr_table_entry *e1 = p1;
+ struct eh_frame_hdr_table_entry *e2 = p2;
+ unsigned long v;
+
+ v = e1->start;
+ e1->start = e2->start;
+ e2->start = v;
+ v = e1->fde;
+ e1->fde = e2->fde;
+ e2->fde = v;
+}
+
+static void __init setup_unwind_table(struct unwind_table *table,
+ void *(*alloc) (unsigned long))
+{
+ const u8 *ptr;
+ unsigned long tableSize = table->size, hdrSize;
+ unsigned n;
+ const u32 *fde;
+ struct {
+ u8 version;
+ u8 eh_frame_ptr_enc;
+ u8 fde_count_enc;
+ u8 table_enc;
+ unsigned long eh_frame_ptr;
+ unsigned int fde_count;
+ struct eh_frame_hdr_table_entry table[];
+ } __attribute__ ((__packed__)) *header;
+
+ if (table->header)
+ return;
+
+ if (table->hdrsz)
+ pr_warn(".eh_frame_hdr for '%s' present but unusable\n",
+ table->name);
+
+ if (tableSize & (sizeof(*fde) - 1))
+ return;
+
+ for (fde = table->address, n = 0;
+ tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ const u32 *cie = cie_for_fde(fde, table);
+ signed ptrType;
+
+ if (cie == &not_fde)
+ continue;
+ if (cie == NULL || cie == &bad_cie)
+ return;
+ ptrType = fde_pointer_type(cie);
+ if (ptrType < 0)
+ return;
+
+ ptr = (const u8 *)(fde + 2);
+ if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
+ ptrType)) {
+ /* FIXME_Rajesh We have 4 instances of null addresses
+ * instead of the initial loc addr
+ * return;
+ */
+ }
+ ++n;
+ }
+
+ if (tableSize || !n)
+ return;
+
+ hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+ + 2 * n * sizeof(unsigned long);
+ header = alloc(hdrSize);
+ if (!header)
+ return;
+ header->version = 1;
+ header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
+ header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
+ header->table_enc = DW_EH_PE_abs | DW_EH_PE_native;
+ put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
+ BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
+ % __alignof(typeof(header->fde_count)));
+ header->fde_count = n;
+
+ BUILD_BUG_ON(offsetof(typeof(*header), table)
+ % __alignof(typeof(*header->table)));
+ for (fde = table->address, tableSize = table->size, n = 0;
+ tableSize;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ /* const u32 *cie = fde + 1 - fde[1] / sizeof(*fde); */
+ const u32 *cie = (const u32 *)(fde[1]);
+
+ if (fde[1] == 0xffffffff)
+ continue; /* this is a CIE */
+ ptr = (const u8 *)(fde + 2);
+ header->table[n].start = read_pointer(&ptr,
+ (const u8 *)(fde + 1) +
+ *fde,
+ fde_pointer_type(cie));
+ header->table[n].fde = (unsigned long)fde;
+ ++n;
+ }
+ WARN_ON(n != header->fde_count);
+
+ sort(header->table,
+ n,
+ sizeof(*header->table),
+ cmp_eh_frame_hdr_table_entries, swap_eh_frame_hdr_table_entries);
+
+ table->hdrsz = hdrSize;
+ smp_wmb();
+ table->header = (const void *)header;
+}
+
+static void *__init balloc(unsigned long sz)
+{
+ return __alloc_bootmem_nopanic(sz,
+ sizeof(unsigned int),
+ __pa(MAX_DMA_ADDRESS));
+}
+
+void __init arc_unwind_setup(void)
+{
+ setup_unwind_table(&root_table, balloc);
+}
+
+#ifdef CONFIG_MODULES
+
+static struct unwind_table *last_table;
+
+/* Must be called with module_mutex held. */
+void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size)
+{
+ struct unwind_table *table;
+
+ if (table_size <= 0)
+ return NULL;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ init_unwind_table(table, module->name,
+ module->module_core, module->core_size,
+ module->module_init, module->init_size,
+ table_start, table_size,
+ NULL, 0);
+
+#ifdef UNWIND_DEBUG
+ unw_debug("Table added for [%s] %lx %lx\n",
+ module->name, table->core.pc, table->core.range);
+#endif
+ if (last_table)
+ last_table->link = table;
+ else
+ root_table.link = table;
+ last_table = table;
+
+ return table;
+}
+
+struct unlink_table_info {
+ struct unwind_table *table;
+ int init_only;
+};
+
+static int unlink_table(void *arg)
+{
+ struct unlink_table_info *info = arg;
+ struct unwind_table *table = info->table, *prev;
+
+ for (prev = &root_table; prev->link && prev->link != table;
+ prev = prev->link)
+ ;
+
+ if (prev->link) {
+ if (info->init_only) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ info->table = NULL;
+ } else {
+ prev->link = table->link;
+ if (!prev->link)
+ last_table = prev;
+ }
+ } else
+ info->table = NULL;
+
+ return 0;
+}
+
+/* Must be called with module_mutex held. */
+void unwind_remove_table(void *handle, int init_only)
+{
+ struct unwind_table *table = handle;
+ struct unlink_table_info info;
+
+ if (!table || table == &root_table)
+ return;
+
+ if (init_only && table == last_table) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ return;
+ }
+
+ info.table = table;
+ info.init_only = init_only;
+
+ unlink_table(&info); /* XXX: SMP */
+ kfree(table);
+}
+
+#endif /* CONFIG_MODULES */
+
+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ uleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (uleb128_t) (*cur & 0x7f) << shift;
+ if (!(*cur++ & 0x80))
+ break;
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ sleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (sleb128_t) (*cur & 0x7f) << shift;
+ if (!(*cur & 0x80)) {
+ value |= -(*cur++ & 0x40) << shift;
+ break;
+ }
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
+{
+ const u32 *cie;
+
+ if (!*fde || (*fde & (sizeof(*fde) - 1)))
+ return &bad_cie;
+
+ if (fde[1] == 0xffffffff)
+ return &not_fde; /* this is a CIE */
+
+ if ((fde[1] & (sizeof(*fde) - 1)))
+/* || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) */
+ return NULL; /* this is not a valid FDE */
+
+ /* cie = fde + 1 - fde[1] / sizeof(*fde); */
+ cie = (u32 *) fde[1];
+
+ if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
+ || (*cie & (sizeof(*cie) - 1))
+ || (cie[1] != 0xffffffff))
+ return NULL; /* this is not a (valid) CIE */
+ return cie;
+}
+
+static unsigned long read_pointer(const u8 **pLoc, const void *end,
+ signed ptrType)
+{
+ unsigned long value = 0;
+ union {
+ const u8 *p8;
+ const u16 *p16u;
+ const s16 *p16s;
+ const u32 *p32u;
+ const s32 *p32s;
+ const unsigned long *pul;
+ } ptr;
+
+ if (ptrType < 0 || ptrType == DW_EH_PE_omit)
+ return 0;
+ ptr.p8 = *pLoc;
+ switch (ptrType & DW_EH_PE_FORM) {
+ case DW_EH_PE_data2:
+ if (end < (const void *)(ptr.p16u + 1))
+ return 0;
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned((u16 *) ptr.p16s++);
+ else
+ value = get_unaligned((u16 *) ptr.p16u++);
+ break;
+ case DW_EH_PE_data4:
+#ifdef CONFIG_64BIT
+ if (end < (const void *)(ptr.p32u + 1))
+ return 0;
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned(ptr.p32s++);
+ else
+ value = get_unaligned(ptr.p32u++);
+ break;
+ case DW_EH_PE_data8:
+ BUILD_BUG_ON(sizeof(u64) != sizeof(value));
+#else
+ BUILD_BUG_ON(sizeof(u32) != sizeof(value));
+#endif
+ case DW_EH_PE_native:
+ if (end < (const void *)(ptr.pul + 1))
+ return 0;
+ value = get_unaligned((unsigned long *)ptr.pul++);
+ break;
+ case DW_EH_PE_leb128:
+ BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
+ value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end)
+ : get_uleb128(&ptr.p8, end);
+ if ((const void *)ptr.p8 > end)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ switch (ptrType & DW_EH_PE_ADJUST) {
+ case DW_EH_PE_abs:
+ break;
+ case DW_EH_PE_pcrel:
+ value += (unsigned long)*pLoc;
+ break;
+ default:
+ return 0;
+ }
+ if ((ptrType & DW_EH_PE_indirect)
+ && __get_user(value, (unsigned long __user *)value))
+ return 0;
+ *pLoc = ptr.p8;
+
+ return value;
+}
+
+static signed fde_pointer_type(const u32 *cie)
+{
+ const u8 *ptr = (const u8 *)(cie + 2);
+ unsigned version = *ptr;
+
+ if (version != 1)
+ return -1; /* unsupported */
+
+ if (*++ptr) {
+ const char *aug;
+ const u8 *end = (const u8 *)(cie + 1) + *cie;
+ uleb128_t len;
+
+ /* check if augmentation size is first (and thus present) */
+ if (*ptr != 'z')
+ return -1;
+
+ /* check if augmentation string is nul-terminated */
+ aug = (const void *)ptr;
+ ptr = memchr(aug, 0, end - ptr);
+ if (ptr == NULL)
+ return -1;
+
+ ++ptr; /* skip terminator */
+ get_uleb128(&ptr, end); /* skip code alignment */
+ get_sleb128(&ptr, end); /* skip data alignment */
+ /* skip return address column */
+ version <= 1 ? (void) ++ptr : (void)get_uleb128(&ptr, end);
+ len = get_uleb128(&ptr, end); /* augmentation length */
+
+ if (ptr + len < ptr || ptr + len > end)
+ return -1;
+
+ end = ptr + len;
+ while (*++aug) {
+ if (ptr >= end)
+ return -1;
+ switch (*aug) {
+ case 'L':
+ ++ptr;
+ break;
+ case 'P':{
+ signed ptrType = *ptr++;
+
+ if (!read_pointer(&ptr, end, ptrType)
+ || ptr > end)
+ return -1;
+ }
+ break;
+ case 'R':
+ return *ptr;
+ default:
+ return -1;
+ }
+ }
+ }
+ return DW_EH_PE_native | DW_EH_PE_abs;
+}
+
+static int advance_loc(unsigned long delta, struct unwind_state *state)
+{
+ state->loc += delta * state->codeAlign;
+
+ /* FIXME_Rajesh: Probably we are defining for the initial range as well;
+ return delta > 0;
+ */
+ unw_debug("delta %3lu => loc 0x%lx: ", delta, state->loc);
+ return 1;
+}
+
+static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value,
+ struct unwind_state *state)
+{
+ if (reg < ARRAY_SIZE(state->regs)) {
+ state->regs[reg].where = where;
+ state->regs[reg].value = value;
+
+#ifdef UNWIND_DEBUG
+ unw_debug("r%lu: ", reg);
+ switch (where) {
+ case Nowhere:
+ unw_debug("s ");
+ break;
+ case Memory:
+ unw_debug("c(%lu) ", value);
+ break;
+ case Register:
+ unw_debug("r(%lu) ", value);
+ break;
+ case Value:
+ unw_debug("v(%lu) ", value);
+ break;
+ default:
+ break;
+ }
+#endif
+ }
+}
+
+static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
+ signed ptrType, struct unwind_state *state)
+{
+ union {
+ const u8 *p8;
+ const u16 *p16;
+ const u32 *p32;
+ } ptr;
+ int result = 1;
+ u8 opcode;
+
+ if (start != state->cieStart) {
+ state->loc = state->org;
+ result =
+ processCFI(state->cieStart, state->cieEnd, 0, ptrType,
+ state);
+ if (targetLoc == 0 && state->label == NULL)
+ return result;
+ }
+ for (ptr.p8 = start; result && ptr.p8 < end;) {
+ switch (*ptr.p8 >> 6) {
+ uleb128_t value;
+
+ case 0:
+ opcode = *ptr.p8++;
+
+ switch (opcode) {
+ case DW_CFA_nop:
+ unw_debug("cfa nop ");
+ break;
+ case DW_CFA_set_loc:
+ state->loc = read_pointer(&ptr.p8, end,
+ ptrType);
+ if (state->loc == 0)
+ result = 0;
+ unw_debug("cfa_set_loc: 0x%lx ", state->loc);
+ break;
+ case DW_CFA_advance_loc1:
+ unw_debug("\ncfa advance loc1:");
+ result = ptr.p8 < end
+ && advance_loc(*ptr.p8++, state);
+ break;
+ case DW_CFA_advance_loc2:
+ value = *ptr.p8++;
+ value += *ptr.p8++ << 8;
+ unw_debug("\ncfa advance loc2:");
+ result = ptr.p8 <= end + 2
+ /* && advance_loc(*ptr.p16++, state); */
+ && advance_loc(value, state);
+ break;
+ case DW_CFA_advance_loc4:
+ unw_debug("\ncfa advance loc4:");
+ result = ptr.p8 <= end + 4
+ && advance_loc(*ptr.p32++, state);
+ break;
+ case DW_CFA_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_offset_extended: ");
+ set_rule(value, Memory,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_offset_extended_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Memory,
+ get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value,
+ get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_restore_extended:
+ unw_debug("cfa_restore_extended: ");
+ case DW_CFA_undefined:
+ unw_debug("cfa_undefined: ");
+ case DW_CFA_same_value:
+ unw_debug("cfa_same_value: ");
+ set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0,
+ state);
+ break;
+ case DW_CFA_register:
+ unw_debug("cfa_register: ");
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Register,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_remember_state:
+ unw_debug("cfa_remember_state: ");
+ if (ptr.p8 == state->label) {
+ state->label = NULL;
+ return 1;
+ }
+ if (state->stackDepth >= MAX_STACK_DEPTH)
+ return 0;
+ state->stack[state->stackDepth++] = ptr.p8;
+ break;
+ case DW_CFA_restore_state:
+ unw_debug("cfa_restore_state: ");
+ if (state->stackDepth) {
+ const uleb128_t loc = state->loc;
+ const u8 *label = state->label;
+
+ state->label =
+ state->stack[state->stackDepth - 1];
+ memcpy(&state->cfa, &badCFA,
+ sizeof(state->cfa));
+ memset(state->regs, 0,
+ sizeof(state->regs));
+ state->stackDepth = 0;
+ result =
+ processCFI(start, end, 0, ptrType,
+ state);
+ state->loc = loc;
+ state->label = label;
+ } else
+ return 0;
+ break;
+ case DW_CFA_def_cfa:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
+ /*nobreak*/
+ case DW_CFA_def_cfa_offset:
+ state->cfa.offs = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_def_cfa_offset: 0x%lx ",
+ state->cfa.offs);
+ break;
+ case DW_CFA_def_cfa_sf:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ /*nobreak */
+ case DW_CFA_def_cfa_offset_sf:
+ state->cfa.offs = get_sleb128(&ptr.p8, end)
+ * state->dataAlign;
+ break;
+ case DW_CFA_def_cfa_register:
+ unw_debug("cfa_def_cfa_regsiter: ");
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ break;
+ /*todo case DW_CFA_def_cfa_expression: */
+ /*todo case DW_CFA_expression: */
+ /*todo case DW_CFA_val_expression: */
+ case DW_CFA_GNU_args_size:
+ get_uleb128(&ptr.p8, end);
+ break;
+ case DW_CFA_GNU_negative_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Memory,
+ (uleb128_t) 0 - get_uleb128(&ptr.p8,
+ end),
+ state);
+ break;
+ case DW_CFA_GNU_window_save:
+ default:
+ unw_debug("UNKNOW OPCODE 0x%x\n", opcode);
+ result = 0;
+ break;
+ }
+ break;
+ case 1:
+ unw_debug("\ncfa_adv_loc: ");
+ result = advance_loc(*ptr.p8++ & 0x3f, state);
+ break;
+ case 2:
+ unw_debug("cfa_offset: ");
+ value = *ptr.p8++ & 0x3f;
+ set_rule(value, Memory, get_uleb128(&ptr.p8, end),
+ state);
+ break;
+ case 3:
+ unw_debug("cfa_restore: ");
+ set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
+ break;
+ }
+
+ if (ptr.p8 > end)
+ result = 0;
+ if (result && targetLoc != 0 && targetLoc < state->loc)
+ return 1;
+ }
+
+ return result && ptr.p8 == end && (targetLoc == 0 || (
+ /*todo While in theory this should apply, gcc in practice omits
+ everything past the function prolog, and hence the location
+ never reaches the end of the function.
+ targetLoc < state->loc && */ state->label == NULL));
+}
+
+/* Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error. */
+int arc_unwind(struct unwind_frame_info *frame)
+{
+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
+ const u32 *fde = NULL, *cie = NULL;
+ const u8 *ptr = NULL, *end = NULL;
+ unsigned long pc = UNW_PC(frame) - frame->call_frame;
+ unsigned long startLoc = 0, endLoc = 0, cfa;
+ unsigned i;
+ signed ptrType = -1;
+ uleb128_t retAddrReg = 0;
+ const struct unwind_table *table;
+ struct unwind_state state;
+ unsigned long *fptr;
+ unsigned long addr;
+
+ unw_debug("\n\nUNWIND FRAME:\n");
+ unw_debug("PC: 0x%lx BLINK: 0x%lx, SP: 0x%lx, FP: 0x%x\n",
+ UNW_PC(frame), UNW_BLINK(frame), UNW_SP(frame),
+ UNW_FP(frame));
+
+ if (UNW_PC(frame) == 0)
+ return -EINVAL;
+
+#ifdef UNWIND_DEBUG
+ {
+ unsigned long *sptr = (unsigned long *)UNW_SP(frame);
+ unw_debug("\nStack Dump:\n");
+ for (i = 0; i < 20; i++, sptr++)
+ unw_debug("0x%p: 0x%lx\n", sptr, *sptr);
+ unw_debug("\n");
+ }
+#endif
+
+ table = find_table(pc);
+ if (table != NULL
+ && !(table->size & (sizeof(*fde) - 1))) {
+ const u8 *hdr = table->header;
+ unsigned long tableSize;
+
+ smp_rmb();
+ if (hdr && hdr[0] == 1) {
+ switch (hdr[3] & DW_EH_PE_FORM) {
+ case DW_EH_PE_native:
+ tableSize = sizeof(unsigned long);
+ break;
+ case DW_EH_PE_data2:
+ tableSize = 2;
+ break;
+ case DW_EH_PE_data4:
+ tableSize = 4;
+ break;
+ case DW_EH_PE_data8:
+ tableSize = 8;
+ break;
+ default:
+ tableSize = 0;
+ break;
+ }
+ ptr = hdr + 4;
+ end = hdr + table->hdrsz;
+ if (tableSize && read_pointer(&ptr, end, hdr[1])
+ == (unsigned long)table->address
+ && (i = read_pointer(&ptr, end, hdr[2])) > 0
+ && i == (end - ptr) / (2 * tableSize)
+ && !((end - ptr) % (2 * tableSize))) {
+ do {
+ const u8 *cur =
+ ptr + (i / 2) * (2 * tableSize);
+
+ startLoc = read_pointer(&cur,
+ cur + tableSize,
+ hdr[3]);
+ if (pc < startLoc)
+ i /= 2;
+ else {
+ ptr = cur - tableSize;
+ i = (i + 1) / 2;
+ }
+ } while (startLoc && i > 1);
+ if (i == 1
+ && (startLoc = read_pointer(&ptr,
+ ptr + tableSize,
+ hdr[3])) != 0
+ && pc >= startLoc)
+ fde = (void *)read_pointer(&ptr,
+ ptr +
+ tableSize,
+ hdr[3]);
+ }
+ }
+
+ if (fde != NULL) {
+ cie = cie_for_fde(fde, table);
+ ptr = (const u8 *)(fde + 2);
+ if (cie != NULL
+ && cie != &bad_cie
+ && cie != &not_fde
+ && (ptrType = fde_pointer_type(cie)) >= 0
+ && read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType) == startLoc) {
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &=
+ DW_EH_PE_FORM | DW_EH_PE_signed;
+ endLoc =
+ startLoc + read_pointer(&ptr,
+ (const u8 *)(fde +
+ 1) +
+ *fde, ptrType);
+ if (pc >= endLoc)
+ fde = NULL;
+ } else
+ fde = NULL;
+ }
+ if (fde == NULL) {
+ for (fde = table->address, tableSize = table->size;
+ cie = NULL, tableSize > sizeof(*fde)
+ && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde,
+ fde += 1 + *fde / sizeof(*fde)) {
+ cie = cie_for_fde(fde, table);
+ if (cie == &bad_cie) {
+ cie = NULL;
+ break;
+ }
+ if (cie == NULL
+ || cie == &not_fde
+ || (ptrType = fde_pointer_type(cie)) < 0)
+ continue;
+ ptr = (const u8 *)(fde + 2);
+ startLoc = read_pointer(&ptr,
+ (const u8 *)(fde + 1) +
+ *fde, ptrType);
+ if (!startLoc)
+ continue;
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &=
+ DW_EH_PE_FORM | DW_EH_PE_signed;
+ endLoc =
+ startLoc + read_pointer(&ptr,
+ (const u8 *)(fde +
+ 1) +
+ *fde, ptrType);
+ if (pc >= startLoc && pc < endLoc)
+ break;
+ }
+ }
+ }
+ if (cie != NULL) {
+ memset(&state, 0, sizeof(state));
+ state.cieEnd = ptr; /* keep here temporarily */
+ ptr = (const u8 *)(cie + 2);
+ end = (const u8 *)(cie + 1) + *cie;
+ frame->call_frame = 1;
+ if ((state.version = *ptr) != 1)
+ cie = NULL; /* unsupported version */
+ else if (*++ptr) {
+ /* check if augmentation size is first (thus present) */
+ if (*ptr == 'z') {
+ while (++ptr < end && *ptr) {
+ switch (*ptr) {
+ /* chk for ignorable or already handled
+ * nul-terminated augmentation string */
+ case 'L':
+ case 'P':
+ case 'R':
+ continue;
+ case 'S':
+ frame->call_frame = 0;
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ if (ptr >= end || *ptr)
+ cie = NULL;
+ }
+ ++ptr;
+ }
+ if (cie != NULL) {
+ /* get code aligment factor */
+ state.codeAlign = get_uleb128(&ptr, end);
+ /* get data aligment factor */
+ state.dataAlign = get_sleb128(&ptr, end);
+ if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
+ cie = NULL;
+ else {
+ retAddrReg =
+ state.version <= 1 ? *ptr++ : get_uleb128(&ptr,
+ end);
+ unw_debug("CIE Frame Info:\n");
+ unw_debug("return Address register 0x%lx\n",
+ retAddrReg);
+ unw_debug("data Align: %ld\n", state.dataAlign);
+ unw_debug("code Align: %lu\n", state.codeAlign);
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ ptr += augSize;
+ }
+ if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(retAddrReg)
+ || reg_info[retAddrReg].width !=
+ sizeof(unsigned long))
+ cie = NULL;
+ }
+ }
+ if (cie != NULL) {
+ state.cieStart = ptr;
+ ptr = state.cieEnd;
+ state.cieEnd = end;
+ end = (const u8 *)(fde + 1) + *fde;
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ if ((ptr += augSize) > end)
+ fde = NULL;
+ }
+ }
+ if (cie == NULL || fde == NULL) {
+#ifdef CONFIG_FRAME_POINTER
+ unsigned long top, bottom;
+
+ top = STACK_TOP_UNW(frame->task);
+ bottom = STACK_BOTTOM_UNW(frame->task);
+#if FRAME_RETADDR_OFFSET < 0
+ if (UNW_SP(frame) < top && UNW_FP(frame) <= UNW_SP(frame)
+ && bottom < UNW_FP(frame)
+#else
+ if (UNW_SP(frame) > top && UNW_FP(frame) >= UNW_SP(frame)
+ && bottom > UNW_FP(frame)
+#endif
+ && !((UNW_SP(frame) | UNW_FP(frame))
+ & (sizeof(unsigned long) - 1))) {
+ unsigned long link;
+
+ if (!__get_user(link, (unsigned long *)
+ (UNW_FP(frame) + FRAME_LINK_OFFSET))
+#if FRAME_RETADDR_OFFSET < 0
+ && link > bottom && link < UNW_FP(frame)
+#else
+ && link > UNW_FP(frame) && link < bottom
+#endif
+ && !(link & (sizeof(link) - 1))
+ && !__get_user(UNW_PC(frame),
+ (unsigned long *)(UNW_FP(frame)
+ + FRAME_RETADDR_OFFSET)))
+ {
+ UNW_SP(frame) =
+ UNW_FP(frame) + FRAME_RETADDR_OFFSET
+#if FRAME_RETADDR_OFFSET < 0
+ -
+#else
+ +
+#endif
+ sizeof(UNW_PC(frame));
+ UNW_FP(frame) = link;
+ return 0;
+ }
+ }
+#endif
+ return -ENXIO;
+ }
+ state.org = startLoc;
+ memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
+
+ unw_debug("\nProcess instructions\n");
+
+ /* process instructions
+ * For ARC, we optimize by having blink(retAddrReg) with
+ * the sameValue in the leaf function, so we should not check
+ * state.regs[retAddrReg].where == Nowhere
+ */
+ if (!processCFI(ptr, end, pc, ptrType, &state)
+ || state.loc > endLoc
+/* || state.regs[retAddrReg].where == Nowhere */
+ || state.cfa.reg >= ARRAY_SIZE(reg_info)
+ || reg_info[state.cfa.reg].width != sizeof(unsigned long)
+ || state.cfa.offs % sizeof(unsigned long))
+ return -EIO;
+
+#ifdef UNWIND_DEBUG
+ unw_debug("\n");
+
+ unw_debug("\nRegister State Based on the rules parsed from FDE:\n");
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+
+ if (REG_INVALID(i))
+ continue;
+
+ switch (state.regs[i].where) {
+ case Nowhere:
+ break;
+ case Memory:
+ unw_debug(" r%d: c(%lu),", i, state.regs[i].value);
+ break;
+ case Register:
+ unw_debug(" r%d: r(%lu),", i, state.regs[i].value);
+ break;
+ case Value:
+ unw_debug(" r%d: v(%lu),", i, state.regs[i].value);
+ break;
+ }
+ }
+
+ unw_debug("\n");
+#endif
+
+ /* update frame */
+#ifndef CONFIG_AS_CFI_SIGNAL_FRAME
+ if (frame->call_frame
+ && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
+ frame->call_frame = 0;
+#endif
+ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
+ startLoc = min_t(unsigned long, UNW_SP(frame), cfa);
+ endLoc = max_t(unsigned long, UNW_SP(frame), cfa);
+ if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
+ startLoc = min(STACK_LIMIT(cfa), cfa);
+ endLoc = max(STACK_LIMIT(cfa), cfa);
+ }
+
+ unw_debug("\nCFA reg: 0x%lx, offset: 0x%lx => 0x%lx\n",
+ state.cfa.reg, state.cfa.offs, cfa);
+
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+ if (REG_INVALID(i)) {
+ if (state.regs[i].where == Nowhere)
+ continue;
+ return -EIO;
+ }
+ switch (state.regs[i].where) {
+ default:
+ break;
+ case Register:
+ if (state.regs[i].value >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(state.regs[i].value)
+ || reg_info[i].width >
+ reg_info[state.regs[i].value].width)
+ return -EIO;
+ switch (reg_info[state.regs[i].value].width) {
+ case sizeof(u8):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u8);
+ break;
+ case sizeof(u16):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u16);
+ break;
+ case sizeof(u32):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u32);
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u64);
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+ break;
+ }
+ }
+
+ unw_debug("\nRegister state after evaluation with realtime Stack:\n");
+ fptr = (unsigned long *)(&frame->regs);
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i, fptr++) {
+
+ if (REG_INVALID(i))
+ continue;
+ switch (state.regs[i].where) {
+ case Nowhere:
+ if (reg_info[i].width != sizeof(UNW_SP(frame))
+ || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
+ != &UNW_SP(frame))
+ continue;
+ UNW_SP(frame) = cfa;
+ break;
+ case Register:
+ switch (reg_info[i].width) {
+ case sizeof(u8):
+ FRAME_REG(i, u8) = state.regs[i].value;
+ break;
+ case sizeof(u16):
+ FRAME_REG(i, u16) = state.regs[i].value;
+ break;
+ case sizeof(u32):
+ FRAME_REG(i, u32) = state.regs[i].value;
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ FRAME_REG(i, u64) = state.regs[i].value;
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+ break;
+ case Value:
+ if (reg_info[i].width != sizeof(unsigned long))
+ return -EIO;
+ FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
+ * state.dataAlign;
+ break;
+ case Memory:
+ addr = cfa + state.regs[i].value * state.dataAlign;
+
+ if ((state.regs[i].value * state.dataAlign)
+ % sizeof(unsigned long)
+ || addr < startLoc
+ || addr + sizeof(unsigned long) < addr
+ || addr + sizeof(unsigned long) > endLoc)
+ return -EIO;
+
+ switch (reg_info[i].width) {
+ case sizeof(u8):
+ __get_user(FRAME_REG(i, u8),
+ (u8 __user *)addr);
+ break;
+ case sizeof(u16):
+ __get_user(FRAME_REG(i, u16),
+ (u16 __user *)addr);
+ break;
+ case sizeof(u32):
+ __get_user(FRAME_REG(i, u32),
+ (u32 __user *)addr);
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ __get_user(FRAME_REG(i, u64),
+ (u64 __user *)addr);
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+
+ break;
+ }
+ unw_debug("r%d: 0x%lx ", i, *fptr);
+ }
+
+ return 0;
+#undef FRAME_REG
+}
+EXPORT_SYMBOL(arc_unwind);
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..d3c92f52d444
--- /dev/null
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+OUTPUT_ARCH(arc)
+ENTRY(_stext)
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+SECTIONS
+{
+ /*
+ * ICCM starts at 0x8000_0000. So if kernel is relocated to some other
+ * address, make sure peripheral at 0x8z doesn't clash with ICCM
+ * Essentially vector is also in ICCM.
+ */
+
+ . = CONFIG_LINUX_LINK_BASE;
+
+ _int_vec_base_lds = .;
+ .vector : {
+ *(.vector)
+ . = ALIGN(PAGE_SIZE);
+ }
+
+#ifdef CONFIG_ARC_HAS_ICCM
+ .text.arcfp : {
+ *(.text.arcfp)
+ . = ALIGN(CONFIG_ARC_ICCM_SZ * 1024);
+ }
+#endif
+
+ /*
+ * The reason for having a seperate subsection .init.ramfs is to
+ * prevent objump from including it in kernel dumps
+ *
+ * Reason for having .init.ramfs above .init is to make sure that the
+ * binary blob is tucked away to one side, reducing the displacement
+ * between .init.text and .text, avoiding any possible relocation
+ * errors because of calls from .init.text to .text
+ * Yes such calls do exist. e.g.
+ * decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( )
+ */
+
+ __init_begin = .;
+
+ .init.ramfs : { INIT_RAM_FS }
+
+ . = ALIGN(PAGE_SIZE);
+ _stext = .;
+
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(L1_CACHE_BYTES)
+
+ /* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */
+ .init.data : {
+ INIT_DATA
+ INIT_SETUP(L1_CACHE_BYTES)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ }
+
+ .init.arch.info : {
+ __arch_info_begin = .;
+ *(.arch.info.init)
+ __arch_info_end = .;
+ }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ /*
+ * .exit.text is discard at runtime, not link time, to deal with
+ * references from .debug_frame
+ * It will be init freed, being inside [__init_start : __init_end]
+ */
+ .exit.text : { EXIT_TEXT }
+ .exit.data : { EXIT_DATA }
+
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
+ .text : {
+ _text = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ }
+ EXCEPTION_TABLE(L1_CACHE_BYTES)
+ _etext = .;
+
+ _sdata = .;
+ RO_DATA_SECTION(PAGE_SIZE)
+
+ /*
+ * 1. this is .data essentially
+ * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned
+ */
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+
+ _edata = .;
+
+ BSS_SECTION(0, 0, 0)
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+ . = ALIGN(PAGE_SIZE);
+ .debug_frame : {
+ __start_unwind = .;
+ *(.debug_frame)
+ __end_unwind = .;
+ }
+#else
+ /DISCARD/ : { *(.debug_frame) }
+#endif
+
+ NOTES
+
+ . = ALIGN(PAGE_SIZE);
+ _end = . ;
+
+ STABS_DEBUG
+ DISCARDS
+
+ .arcextmap 0 : {
+ *(.gnu.linkonce.arcextmap.*)
+ *(.arcextmap.*)
+ }
+
+ /* open-coded because we need .debug_frame seperately for unwinding */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+
+#ifdef CONFIG_ARC_HAS_DCCM
+ . = CONFIG_ARC_DCCM_BASE;
+ __arc_dccm_base = .;
+ .data.arcfp : {
+ *(.data.arcfp)
+ }
+ . = ALIGN(CONFIG_ARC_DCCM_SZ * 1024);
+#endif
+}
diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile
new file mode 100644
index 000000000000..db46e200baba
--- /dev/null
+++ b/arch/arc/lib/Makefile
@@ -0,0 +1,9 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+lib-y := strchr-700.o strcmp.o strcpy-700.o strlen.o
+lib-y += memcmp.o memcpy-700.o memset.o
diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
new file mode 100644
index 000000000000..bc813d55b6c3
--- /dev/null
+++ b/arch/arc/lib/memcmp.S
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+#ifdef __LITTLE_ENDIAN__
+#define WORD2 r2
+#define SHIFT r3
+#else /* BIG ENDIAN */
+#define WORD2 r3
+#define SHIFT r2
+#endif
+
+ARC_ENTRY memcmp
+ or r12,r0,r1
+ asl_s r12,r12,30
+ sub r3,r2,1
+ brls r2,r12,.Lbytewise
+ ld r4,[r0,0]
+ ld r5,[r1,0]
+ lsr.f lp_count,r3,3
+ lpne .Loop_end
+ ld_s WORD2,[r0,4]
+ ld_s r12,[r1,4]
+ brne r4,r5,.Leven
+ ld.a r4,[r0,8]
+ ld.a r5,[r1,8]
+ brne WORD2,r12,.Lodd
+.Loop_end:
+ asl_s SHIFT,SHIFT,3
+ bhs_s .Last_cmp
+ brne r4,r5,.Leven
+ ld r4,[r0,4]
+ ld r5,[r1,4]
+#ifdef __LITTLE_ENDIAN__
+ nop_s
+ ; one more load latency cycle
+.Last_cmp:
+ xor r0,r4,r5
+ bset r0,r0,SHIFT
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ b.d .Leven_cmp
+ and r1,r1,24
+.Leven:
+ xor r0,r4,r5
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ ; slow track insn
+ and r1,r1,24
+.Leven_cmp:
+ asl r2,r4,r1
+ asl r12,r5,r1
+ lsr_s r2,r2,1
+ lsr_s r12,r12,1
+ j_s.d [blink]
+ sub r0,r2,r12
+ .balign 4
+.Lodd:
+ xor r0,WORD2,r12
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ ; slow track insn
+ and r1,r1,24
+ asl_s r2,r2,r1
+ asl_s r12,r12,r1
+ lsr_s r2,r2,1
+ lsr_s r12,r12,1
+ j_s.d [blink]
+ sub r0,r2,r12
+#else /* BIG ENDIAN */
+.Last_cmp:
+ neg_s SHIFT,SHIFT
+ lsr r4,r4,SHIFT
+ lsr r5,r5,SHIFT
+ ; slow track insn
+.Leven:
+ sub.f r0,r4,r5
+ mov.ne r0,1
+ j_s.d [blink]
+ bset.cs r0,r0,31
+.Lodd:
+ cmp_s WORD2,r12
+
+ mov_s r0,1
+ j_s.d [blink]
+ bset.cs r0,r0,31
+#endif /* ENDIAN */
+ .balign 4
+.Lbytewise:
+ breq r2,0,.Lnil
+ ldb r4,[r0,0]
+ ldb r5,[r1,0]
+ lsr.f lp_count,r3
+ lpne .Lbyte_end
+ ldb_s r3,[r0,1]
+ ldb r12,[r1,1]
+ brne r4,r5,.Lbyte_even
+ ldb.a r4,[r0,2]
+ ldb.a r5,[r1,2]
+ brne r3,r12,.Lbyte_odd
+.Lbyte_end:
+ bcc .Lbyte_even
+ brne r4,r5,.Lbyte_even
+ ldb_s r3,[r0,1]
+ ldb_s r12,[r1,1]
+.Lbyte_odd:
+ j_s.d [blink]
+ sub r0,r3,r12
+.Lbyte_even:
+ j_s.d [blink]
+ sub r0,r4,r5
+.Lnil:
+ j_s.d [blink]
+ mov r0,0
+ARC_EXIT memcmp
diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
new file mode 100644
index 000000000000..b64cc10ac918
--- /dev/null
+++ b/arch/arc/lib/memcpy-700.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY memcpy
+ or r3,r0,r1
+ asl_s r3,r3,30
+ mov_s r5,r0
+ brls.d r2,r3,.Lcopy_bytewise
+ sub.f r3,r2,1
+ ld_s r12,[r1,0]
+ asr.f lp_count,r3,3
+ bbit0.d r3,2,.Lnox4
+ bmsk_s r2,r2,1
+ st.ab r12,[r5,4]
+ ld.a r12,[r1,4]
+.Lnox4:
+ lppnz .Lendloop
+ ld_s r3,[r1,4]
+ st.ab r12,[r5,4]
+ ld.a r12,[r1,8]
+ st.ab r3,[r5,4]
+.Lendloop:
+ breq r2,0,.Last_store
+ ld r3,[r5,0]
+#ifdef __LITTLE_ENDIAN__
+ add3 r2,-1,r2
+ ; uses long immediate
+ xor_s r12,r12,r3
+ bmsk r12,r12,r2
+ xor_s r12,r12,r3
+#else /* BIG ENDIAN */
+ sub3 r2,31,r2
+ ; uses long immediate
+ xor_s r3,r3,r12
+ bmsk r3,r3,r2
+ xor_s r12,r12,r3
+#endif /* ENDIAN */
+.Last_store:
+ j_s.d [blink]
+ st r12,[r5,0]
+
+ .balign 4
+.Lcopy_bytewise:
+ jcs [blink]
+ ldb_s r12,[r1,0]
+ lsr.f lp_count,r3
+ bhs_s .Lnox1
+ stb.ab r12,[r5,1]
+ ldb.a r12,[r1,1]
+.Lnox1:
+ lppnz .Lendbloop
+ ldb_s r3,[r1,1]
+ stb.ab r12,[r5,1]
+ ldb.a r12,[r1,2]
+ stb.ab r3,[r5,1]
+.Lendbloop:
+ j_s.d [blink]
+ stb r12,[r5,0]
+ARC_EXIT memcpy
diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
new file mode 100644
index 000000000000..9b2d88d2e141
--- /dev/null
+++ b/arch/arc/lib/memset.S
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */
+
+ARC_ENTRY memset
+ mov_s r4,r0
+ or r12,r0,r2
+ bmsk.f r12,r12,1
+ extb_s r1,r1
+ asl r3,r1,8
+ beq.d .Laligned
+ or_s r1,r1,r3
+ brls r2,SMALL,.Ltiny
+ add r3,r2,r0
+ stb r1,[r3,-1]
+ bclr_s r3,r3,0
+ stw r1,[r3,-2]
+ bmsk.f r12,r0,1
+ add_s r2,r2,r12
+ sub.ne r2,r2,4
+ stb.ab r1,[r4,1]
+ and r4,r4,-2
+ stw.ab r1,[r4,2]
+ and r4,r4,-4
+.Laligned: ; This code address should be aligned for speed.
+ asl r3,r1,16
+ lsr.f lp_count,r2,2
+ or_s r1,r1,r3
+ lpne .Loop_end
+ st.ab r1,[r4,4]
+.Loop_end:
+ j_s [blink]
+
+ .balign 4
+.Ltiny:
+ mov.f lp_count,r2
+ lpne .Ltiny_end
+ stb.ab r1,[r4,1]
+.Ltiny_end:
+ j_s [blink]
+ARC_EXIT memset
+
+; memzero: @r0 = mem, @r1 = size_t
+; memset: @r0 = mem, @r1 = char, @r2 = size_t
+
+ARC_ENTRY memzero
+ ; adjust bzero args to memset args
+ mov r2, r1
+ mov r1, 0
+ b memset ;tail call so need to tinker with blink
+ARC_EXIT memzero
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
new file mode 100644
index 000000000000..99c10475d477
--- /dev/null
+++ b/arch/arc/lib/strchr-700.S
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* ARC700 has a relatively long pipeline and branch prediction, so we want
+ to avoid branches that are hard to predict. On the other hand, the
+ presence of the norm instruction makes it easier to operate on whole
+ words branch-free. */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strchr
+ extb_s r1,r1
+ asl r5,r1,8
+ bmsk r2,r0,1
+ or r5,r5,r1
+ mov_s r3,0x01010101
+ breq.d r2,r0,.Laligned
+ asl r4,r5,16
+ sub_s r0,r0,r2
+ asl r7,r2,3
+ ld_s r2,[r0]
+#ifdef __LITTLE_ENDIAN__
+ asl r7,r3,r7
+#else
+ lsr r7,r3,r7
+#endif
+ or r5,r5,r4
+ ror r4,r3
+ sub r12,r2,r7
+ bic_s r12,r12,r2
+ and r12,r12,r4
+ brne.d r12,0,.Lfound0_ua
+ xor r6,r2,r5
+ ld.a r2,[r0,4]
+ sub r12,r6,r7
+ bic r12,r12,r6
+ and r7,r12,r4
+ breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
+ b .Lfound_char ; Likewise this one.
+; /* We require this code address to be unaligned for speed... */
+.Laligned:
+ ld_s r2,[r0]
+ or r5,r5,r4
+ ror r4,r3
+; /* ... so that this code address is aligned, for itself and ... */
+.Loop:
+ sub r12,r2,r3
+ bic_s r12,r12,r2
+ and r12,r12,r4
+ brne.d r12,0,.Lfound0
+ xor r6,r2,r5
+ ld.a r2,[r0,4]
+ sub r12,r6,r3
+ bic r12,r12,r6
+ and r7,r12,r4
+ breq r7,0,.Loop /* ... so that this branch is unaligned. */
+ ; Found searched-for character. r0 has already advanced to next word.
+#ifdef __LITTLE_ENDIAN__
+/* We only need the information about the first matching byte
+ (i.e. the least significant matching byte) to be exact,
+ hence there is no problem with carry effects. */
+.Lfound_char:
+ sub r3,r7,1
+ bic r3,r3,r7
+ norm r2,r3
+ sub_s r0,r0,1
+ asr_s r2,r2,3
+ j.d [blink]
+ sub_s r0,r0,r2
+
+ .balign 4
+.Lfound0_ua:
+ mov r3,r7
+.Lfound0:
+ sub r3,r6,r3
+ bic r3,r3,r6
+ and r2,r3,r4
+ or_s r12,r12,r2
+ sub_s r3,r12,1
+ bic_s r3,r3,r12
+ norm r3,r3
+ add_s r0,r0,3
+ asr_s r12,r3,3
+ asl.f 0,r2,r3
+ sub_s r0,r0,r12
+ j_s.d [blink]
+ mov.pl r0,0
+#else /* BIG ENDIAN */
+.Lfound_char:
+ lsr r7,r7,7
+
+ bic r2,r7,r6
+ norm r2,r2
+ sub_s r0,r0,4
+ asr_s r2,r2,3
+ j.d [blink]
+ add_s r0,r0,r2
+
+.Lfound0_ua:
+ mov_s r3,r7
+.Lfound0:
+ asl_s r2,r2,7
+ or r7,r6,r4
+ bic_s r12,r12,r2
+ sub r2,r7,r3
+ or r2,r2,r6
+ bic r12,r2,r12
+ bic.f r3,r4,r12
+ norm r3,r3
+
+ add.pl r3,r3,1
+ asr_s r12,r3,3
+ asl.f 0,r2,r3
+ add_s r0,r0,r12
+ j_s.d [blink]
+ mov.mi r0,0
+#endif /* ENDIAN */
+ARC_EXIT strchr
diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
new file mode 100644
index 000000000000..5dc802b45cf3
--- /dev/null
+++ b/arch/arc/lib/strcmp.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* This is optimized primarily for the ARC700.
+ It would be possible to speed up the loops by one cycle / word
+ respective one cycle / byte by forcing double source 1 alignment, unrolling
+ by a factor of two, and speculatively loading the second word / byte of
+ source 1; however, that would increase the overhead for loop setup / finish,
+ and strcmp might often terminate early. */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strcmp
+ or r2,r0,r1
+ bmsk_s r2,r2,1
+ brne r2,0,.Lcharloop
+ mov_s r12,0x01010101
+ ror r5,r12
+.Lwordloop:
+ ld.ab r2,[r0,4]
+ ld.ab r3,[r1,4]
+ nop_s
+ sub r4,r2,r12
+ bic r4,r4,r2
+ and r4,r4,r5
+ brne r4,0,.Lfound0
+ breq r2,r3,.Lwordloop
+#ifdef __LITTLE_ENDIAN__
+ xor r0,r2,r3 ; mask for difference
+ sub_s r1,r0,1
+ bic_s r0,r0,r1 ; mask for least significant difference bit
+ sub r1,r5,r0
+ xor r0,r5,r1 ; mask for least significant difference byte
+ and_s r2,r2,r0
+ and_s r3,r3,r0
+#endif /* LITTLE ENDIAN */
+ cmp_s r2,r3
+ mov_s r0,1
+ j_s.d [blink]
+ bset.lo r0,r0,31
+
+ .balign 4
+#ifdef __LITTLE_ENDIAN__
+.Lfound0:
+ xor r0,r2,r3 ; mask for difference
+ or r0,r0,r4 ; or in zero indicator
+ sub_s r1,r0,1
+ bic_s r0,r0,r1 ; mask for least significant difference bit
+ sub r1,r5,r0
+ xor r0,r5,r1 ; mask for least significant difference byte
+ and_s r2,r2,r0
+ and_s r3,r3,r0
+ sub.f r0,r2,r3
+ mov.hi r0,1
+ j_s.d [blink]
+ bset.lo r0,r0,31
+#else /* BIG ENDIAN */
+ /* The zero-detection above can mis-detect 0x01 bytes as zeroes
+ because of carry-propagateion from a lower significant zero byte.
+ We can compensate for this by checking that bit0 is zero.
+ This compensation is not necessary in the step where we
+ get a low estimate for r2, because in any affected bytes
+ we already have 0x00 or 0x01, which will remain unchanged
+ when bit 7 is cleared. */
+ .balign 4
+.Lfound0:
+ lsr r0,r4,8
+ lsr_s r1,r2
+ bic_s r2,r2,r0 ; get low estimate for r2 and get ...
+ bic_s r0,r0,r1 ; <this is the adjusted mask for zeros>
+ or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ...
+ cmp_s r3,r2 ; ... be independent of trailing garbage
+ or_s r2,r2,r0 ; likewise for r3 > r2
+ bic_s r3,r3,r0
+ rlc r0,0 ; r0 := r2 > r3 ? 1 : 0
+ cmp_s r2,r3
+ j_s.d [blink]
+ bset.lo r0,r0,31
+#endif /* ENDIAN */
+
+ .balign 4
+.Lcharloop:
+ ldb.ab r2,[r0,1]
+ ldb.ab r3,[r1,1]
+ nop_s
+ breq r2,0,.Lcmpend
+ breq r2,r3,.Lcharloop
+.Lcmpend:
+ j_s.d [blink]
+ sub r0,r2,r3
+ARC_EXIT strcmp
diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
new file mode 100644
index 000000000000..b7ca4ae81d88
--- /dev/null
+++ b/arch/arc/lib/strcpy-700.S
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
+ If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
+ it 8 byte aligned. Thus, we can do a little read-ahead, without
+ dereferencing a cache line that we should not touch.
+ Note that short and long instructions have been scheduled to avoid
+ branch stalls.
+ The beq_s to r3z could be made unaligned & long to avoid a stall
+ there, but the it is not likely to be taken often, and it
+ would also be likey to cost an unaligned mispredict at the next call. */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strcpy
+ or r2,r0,r1
+ bmsk_s r2,r2,1
+ brne.d r2,0,charloop
+ mov_s r10,r0
+ ld_s r3,[r1,0]
+ mov r8,0x01010101
+ bbit0.d r1,2,loop_start
+ ror r12,r8
+ sub r2,r3,r8
+ bic_s r2,r2,r3
+ tst_s r2,r12
+ bne r3z
+ mov_s r4,r3
+ .balign 4
+loop:
+ ld.a r3,[r1,4]
+ st.ab r4,[r10,4]
+loop_start:
+ ld.a r4,[r1,4]
+ sub r2,r3,r8
+ bic_s r2,r2,r3
+ tst_s r2,r12
+ bne_s r3z
+ st.ab r3,[r10,4]
+ sub r2,r4,r8
+ bic r2,r2,r4
+ tst r2,r12
+ beq loop
+ mov_s r3,r4
+#ifdef __LITTLE_ENDIAN__
+r3z: bmsk.f r1,r3,7
+ lsr_s r3,r3,8
+#else
+r3z: lsr.f r1,r3,24
+ asl_s r3,r3,8
+#endif
+ bne.d r3z
+ stb.ab r1,[r10,1]
+ j_s [blink]
+
+ .balign 4
+charloop:
+ ldb.ab r3,[r1,1]
+
+
+ brne.d r3,0,charloop
+ stb.ab r3,[r10,1]
+ j [blink]
+ARC_EXIT strcpy
diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
new file mode 100644
index 000000000000..39759e099696
--- /dev/null
+++ b/arch/arc/lib/strlen.S
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strlen
+ or r3,r0,7
+ ld r2,[r3,-7]
+ ld.a r6,[r3,-3]
+ mov r4,0x01010101
+ ; uses long immediate
+#ifdef __LITTLE_ENDIAN__
+ asl_s r1,r0,3
+ btst_s r0,2
+ asl r7,r4,r1
+ ror r5,r4
+ sub r1,r2,r7
+ bic_s r1,r1,r2
+ mov.eq r7,r4
+ sub r12,r6,r7
+ bic r12,r12,r6
+ or.eq r12,r12,r1
+ and r12,r12,r5
+ brne r12,0,.Learly_end
+#else /* BIG ENDIAN */
+ ror r5,r4
+ btst_s r0,2
+ mov_s r1,31
+ sub3 r7,r1,r0
+ sub r1,r2,r4
+ bic_s r1,r1,r2
+ bmsk r1,r1,r7
+ sub r12,r6,r4
+ bic r12,r12,r6
+ bmsk.ne r12,r12,r7
+ or.eq r12,r12,r1
+ and r12,r12,r5
+ brne r12,0,.Learly_end
+#endif /* ENDIAN */
+
+.Loop:
+ ld_s r2,[r3,4]
+ ld.a r6,[r3,8]
+ ; stall for load result
+ sub r1,r2,r4
+ bic_s r1,r1,r2
+ sub r12,r6,r4
+ bic r12,r12,r6
+ or r12,r12,r1
+ and r12,r12,r5
+ breq r12,0,.Loop
+.Lend:
+ and.f r1,r1,r5
+ sub.ne r3,r3,4
+ mov.eq r1,r12
+#ifdef __LITTLE_ENDIAN__
+ sub_s r2,r1,1
+ bic_s r2,r2,r1
+ norm r1,r2
+ sub_s r0,r0,3
+ lsr_s r1,r1,3
+ sub r0,r3,r0
+ j_s.d [blink]
+ sub r0,r0,r1
+#else /* BIG ENDIAN */
+ lsr_s r1,r1,7
+ mov.eq r2,r6
+ bic_s r1,r1,r2
+ norm r1,r1
+ sub r0,r3,r0
+ lsr_s r1,r1,3
+ j_s.d [blink]
+ add r0,r0,r1
+#endif /* ENDIAN */
+.Learly_end:
+ b.d .Lend
+ sub_s.ne r1,r1,r1
+ARC_EXIT strlen
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
new file mode 100644
index 000000000000..168dc146a8f6
--- /dev/null
+++ b/arch/arc/mm/Makefile
@@ -0,0 +1,10 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+obj-y := extable.o ioremap.o dma.o fault.o init.o
+obj-y += tlb.o tlbex.o cache_arc700.o
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
new file mode 100644
index 000000000000..88d617d84234
--- /dev/null
+++ b/arch/arc/mm/cache_arc700.c
@@ -0,0 +1,768 @@
+/*
+ * ARC700 VIPT Cache Management
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ * -flush_cache_dup_mm (fork)
+ * -likewise for flush_cache_mm (exit/execve)
+ * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
+ *
+ * vineetg: Apr 2011
+ * -Now that MMU can support larger pg sz (16K), the determiniation of
+ * aliasing shd not be based on assumption of 8k pg
+ *
+ * vineetg: Mar 2011
+ * -optimised version of flush_icache_range( ) for making I/D coherent
+ * when vaddr is available (agnostic of num of aliases)
+ *
+ * vineetg: Mar 2011
+ * -Added documentation about I-cache aliasing on ARC700 and the way it
+ * was handled up until MMU V2.
+ * -Spotted a three year old bug when killing the 4 aliases, which needs
+ * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
+ * instead of paddr | {0x00, 0x01, 0x10, 0x11}
+ * (Rajesh you owe me one now)
+ *
+ * vineetg: Dec 2010
+ * -Off-by-one error when computing num_of_lines to flush
+ * This broke signal handling with bionic which uses synthetic sigret stub
+ *
+ * vineetg: Mar 2010
+ * -GCC can't generate ZOL for core cache flush loops.
+ * Conv them into iterations based as opposed to while (start < end) types
+ *
+ * Vineetg: July 2009
+ * -In I-cache flush routine we used to chk for aliasing for every line INV.
+ * Instead now we setup routines per cache geometry and invoke them
+ * via function pointers.
+ *
+ * Vineetg: Jan 2009
+ * -Cache Line flush routines used to flush an extra line beyond end addr
+ * because check was while (end >= start) instead of (end > start)
+ * =Some call sites had to work around by doing -1, -4 etc to end param
+ * =Some callers didnt care. This was spec bad in case of INV routines
+ * which would discard valid data (cause of the horrible ext2 bug
+ * in ARC IDE driver)
+ *
+ * vineetg: June 11th 2008: Fixed flush_icache_range( )
+ * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
+ * to be flushed, which it was not doing.
+ * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
+ * however ARC cache maintenance OPs require PHY addr. Thus need to do
+ * vmalloc_to_phy.
+ * -Also added optimisation there, that for range > PAGE SIZE we flush the
+ * entire cache in one shot rather than line by line. For e.g. a module
+ * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
+ * while cache is only 16 or 32k.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/cache.h>
+#include <linux/mmu_context.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+#include <asm/setup.h>
+
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+static void __ic_line_inv_no_alias(unsigned long, int);
+static void __ic_line_inv_2_alias(unsigned long, int);
+static void __ic_line_inv_4_alias(unsigned long, int);
+
+/* Holds the ptr to flush routine, dependign on size due to aliasing issues */
+static void (*___flush_icache_rtn) (unsigned long, int);
+#endif
+
+char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ unsigned int c = smp_processor_id();
+
+#define PR_CACHE(p, enb, str) \
+{ \
+ if (!(p)->ver) \
+ n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
+ else \
+ n += scnprintf(buf + n, len - n, \
+ str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
+ TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
+ enb ? "" : "DISABLED (kernel-build)"); \
+}
+
+ PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache");
+ PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache");
+
+ return buf;
+}
+
+/*
+ * Read the Cache Build Confuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation done here, simply read/convert the BCRs
+ */
+void __init read_decode_cache_bcr(void)
+{
+ struct bcr_cache ibcr, dbcr;
+ struct cpuinfo_arc_cache *p_ic, *p_dc;
+ unsigned int cpu = smp_processor_id();
+
+ p_ic = &cpuinfo_arc700[cpu].icache;
+ READ_BCR(ARC_REG_IC_BCR, ibcr);
+
+ if (ibcr.config == 0x3)
+ p_ic->assoc = 2;
+ p_ic->line_len = 8 << ibcr.line_len;
+ p_ic->sz = 0x200 << ibcr.sz;
+ p_ic->ver = ibcr.ver;
+
+ p_dc = &cpuinfo_arc700[cpu].dcache;
+ READ_BCR(ARC_REG_DC_BCR, dbcr);
+
+ if (dbcr.config == 0x2)
+ p_dc->assoc = 4;
+ p_dc->line_len = 16 << dbcr.line_len;
+ p_dc->sz = 0x200 << dbcr.sz;
+ p_dc->ver = dbcr.ver;
+}
+
+/*
+ * 1. Validate the Cache Geomtery (compile time config matches hardware)
+ * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
+ * (aliasing D-cache configurations are not supported YET)
+ * 3. Enable the Caches, setup default flush mode for D-Cache
+ * 3. Calculate the SHMLBA used by user space
+ */
+void __init arc_cache_init(void)
+{
+ unsigned int temp;
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+ struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
+ int way_pg_ratio = way_pg_ratio;
+ char str[256];
+
+ printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+ if (!ic->ver)
+ goto chk_dc;
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+ /* 1. Confirm some of I-cache params which Linux assumes */
+ if ((ic->assoc != ARC_ICACHE_WAYS) ||
+ (ic->line_len != ARC_ICACHE_LINE_LEN)) {
+ panic("Cache H/W doesn't match kernel Config");
+ }
+#if (CONFIG_ARC_MMU_VER > 2)
+ if (ic->ver != 3) {
+ if (running_on_hw)
+ panic("Cache ver doesn't match MMU ver\n");
+
+ /* For ISS - suggest the toggles to use */
+ pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
+
+ }
+#endif
+
+ /*
+ * if Cache way size is <= page size then no aliasing exhibited
+ * otherwise ratio determines num of aliases.
+ * e.g. 32K I$, 2 way set assoc, 8k pg size
+ * way-sz = 32k/2 = 16k
+ * way-pg-ratio = 16k/8k = 2, so 2 aliases possible
+ * (meaning 1 line could be in 2 possible locations).
+ */
+ way_pg_ratio = ic->sz / ARC_ICACHE_WAYS / PAGE_SIZE;
+ switch (way_pg_ratio) {
+ case 0:
+ case 1:
+ ___flush_icache_rtn = __ic_line_inv_no_alias;
+ break;
+ case 2:
+ ___flush_icache_rtn = __ic_line_inv_2_alias;
+ break;
+ case 4:
+ ___flush_icache_rtn = __ic_line_inv_4_alias;
+ break;
+ default:
+ panic("Unsupported I-Cache Sz\n");
+ }
+#endif
+
+ /* Enable/disable I-Cache */
+ temp = read_aux_reg(ARC_REG_IC_CTRL);
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+ temp &= ~IC_CTRL_CACHE_DISABLE;
+#else
+ temp |= IC_CTRL_CACHE_DISABLE;
+#endif
+
+ write_aux_reg(ARC_REG_IC_CTRL, temp);
+
+chk_dc:
+ if (!dc->ver)
+ return;
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+ if ((dc->assoc != ARC_DCACHE_WAYS) ||
+ (dc->line_len != ARC_DCACHE_LINE_LEN)) {
+ panic("Cache H/W doesn't match kernel Config");
+ }
+
+ /* check for D-Cache aliasing */
+ if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE)
+ panic("D$ aliasing not handled right now\n");
+#endif
+
+ /* Set the default Invalidate Mode to "simpy discard dirty lines"
+ * as this is more frequent then flush before invalidate
+ * Ofcourse we toggle this default behviour when desired
+ */
+ temp = read_aux_reg(ARC_REG_DC_CTRL);
+ temp &= ~DC_CTRL_INV_MODE_FLUSH;
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+ /* Enable D-Cache: Clear Bit 0 */
+ write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
+#else
+ /* Flush D cache */
+ write_aux_reg(ARC_REG_DC_FLSH, 0x1);
+ /* Disable D cache */
+ write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
+#endif
+
+ return;
+}
+
+#define OP_INV 0x1
+#define OP_FLUSH 0x2
+#define OP_FLUSH_N_INV 0x3
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+
+/***************************************************************
+ * Machine specific helpers for Entire D-Cache or Per Line ops
+ */
+
+static inline void wait_for_flush(void)
+{
+ while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
+ ;
+}
+
+/*
+ * Operation on Entire D-Cache
+ * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
+ * Note that constant propagation ensures all the checks are gone
+ * in generated code
+ */
+static inline void __dc_entire_op(const int cacheop)
+{
+ unsigned long flags, tmp = tmp;
+ int aux;
+
+ local_irq_save(flags);
+
+ if (cacheop == OP_FLUSH_N_INV) {
+ /* Dcache provides 2 cmd: FLUSH or INV
+ * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+ * flush-n-inv is achieved by INV cmd but with IM=1
+ * Default INV sub-mode is DISCARD, which needs to be toggled
+ */
+ tmp = read_aux_reg(ARC_REG_DC_CTRL);
+ write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
+ }
+
+ if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
+ aux = ARC_REG_DC_IVDC;
+ else
+ aux = ARC_REG_DC_FLSH;
+
+ write_aux_reg(aux, 0x1);
+
+ if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
+ wait_for_flush();
+
+ /* Switch back the DISCARD ONLY Invalidate mode */
+ if (cacheop == OP_FLUSH_N_INV)
+ write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Per Line Operation on D-Cache
+ * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
+ * It's sole purpose is to help gcc generate ZOL
+ */
+static inline void __dc_line_loop(unsigned long start, unsigned long sz,
+ int aux_reg)
+{
+ int num_lines, slack;
+
+ /* Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @start - aligned to cache line and integral @num_lines.
+ * This however can be avoided for page sized since:
+ * -@start will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
+ slack = start & ~DCACHE_LINE_MASK;
+ sz += slack;
+ start -= slack;
+ }
+
+ num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
+
+ while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ /*
+ * Just as for I$, in MMU v3, D$ ops also require
+ * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
+ * But we pass phy addr for both. This works since Linux
+ * doesn't support aliasing configs for D$, yet.
+ * Thus paddr is enough to provide both tag and index.
+ */
+ write_aux_reg(ARC_REG_DC_PTAG, start);
+#endif
+ write_aux_reg(aux_reg, start);
+ start += ARC_DCACHE_LINE_LEN;
+ }
+}
+
+/*
+ * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
+ */
+static inline void __dc_line_op(unsigned long start, unsigned long sz,
+ const int cacheop)
+{
+ unsigned long flags, tmp = tmp;
+ int aux;
+
+ local_irq_save(flags);
+
+ if (cacheop == OP_FLUSH_N_INV) {
+ /*
+ * Dcache provides 2 cmd: FLUSH or INV
+ * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+ * flush-n-inv is achieved by INV cmd but with IM=1
+ * Default INV sub-mode is DISCARD, which needs to be toggled
+ */
+ tmp = read_aux_reg(ARC_REG_DC_CTRL);
+ write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
+ }
+
+ if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */
+ aux = ARC_REG_DC_IVDL;
+ else
+ aux = ARC_REG_DC_FLDL;
+
+ __dc_line_loop(start, sz, aux);
+
+ if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
+ wait_for_flush();
+
+ /* Switch back the DISCARD ONLY Invalidate mode */
+ if (cacheop == OP_FLUSH_N_INV)
+ write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
+
+ local_irq_restore(flags);
+}
+
+#else
+
+#define __dc_entire_op(cacheop)
+#define __dc_line_op(start, sz, cacheop)
+
+#endif /* CONFIG_ARC_HAS_DCACHE */
+
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+
+/*
+ * I-Cache Aliasing in ARC700 VIPT caches
+ *
+ * For fetching code from I$, ARC700 uses vaddr (embedded in program code)
+ * to "index" into SET of cache-line and paddr from MMU to match the TAG
+ * in the WAYS of SET.
+ *
+ * However the CDU iterface (to flush/inv) lines from software, only takes
+ * paddr (to have simpler hardware interface). For simpler cases, using paddr
+ * alone suffices.
+ * e.g. 2-way-set-assoc, 16K I$ (8k MMU pg sz, 32b cache line size):
+ * way_sz = cache_sz / num_ways = 16k/2 = 8k
+ * num_sets = way_sz / line_sz = 8k/32 = 256 => 8 bits
+ * Ignoring the bottom 5 bits corresp to the off within a 32b cacheline,
+ * bits req for calc set-index = bits 12:5 (0 based). Since this range fits
+ * inside the bottom 13 bits of paddr, which are same for vaddr and paddr
+ * (with 8k pg sz), paddr alone can be safely used by CDU to unambigously
+ * locate a cache-line.
+ *
+ * However for a difft sized cache, say 32k I$, above math yields need
+ * for 14 bits of vaddr to locate a cache line, which can't be provided by
+ * paddr, since the bit 13 (0 based) might differ between the two.
+ *
+ * This lack of extra bits needed for correct line addressing, defines the
+ * classical problem of Cache aliasing with VIPT architectures
+ * num_aliases = 1 << extra_bits
+ * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz => 2 aliases
+ * 2-way-set-assoc, 64K I$ with 8k MMU pg sz => 4 aliases
+ * 2-way-set-assoc, 16K I$ with 8k MMU pg sz => NO aliases
+ *
+ * ------------------
+ * MMU v1/v2 (Fixed Page Size 8k)
+ * ------------------
+ * The solution was to provide CDU with these additonal vaddr bits. These
+ * would be bits [x:13], x would depend on cache-geom.
+ * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
+ * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
+ * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
+ * represent the offset within cache-line. The adv of using this "clumsy"
+ * interface for additional info was no new reg was needed in CDU.
+ *
+ * 17:13 represented the max num of bits passable, actual bits needed were
+ * fewer, based on the num-of-aliases possible.
+ * -for 2 alias possibility, only bit 13 needed (32K cache)
+ * -for 4 alias possibility, bits 14:13 needed (64K cache)
+ *
+ * Since vaddr was not available for all instances of I$ flush req by core
+ * kernel, the only safe way (non-optimal though) was to kill all possible
+ * lines which could represent an alias (even if they didnt represent one
+ * in execution).
+ * e.g. for 64K I$, 4 aliases possible, so we did
+ * flush start
+ * flush start | 0x01
+ * flush start | 0x2
+ * flush start | 0x3
+ *
+ * The penalty was invoking the operation itself, since tag match is anyways
+ * paddr based, a line which didn't represent an alias would not match the
+ * paddr, hence wont be killed
+ *
+ * Note that aliasing concerns are independent of line-sz for a given cache
+ * geometry (size + set_assoc) because the extra bits required by line-sz are
+ * reduced from the set calc.
+ * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz and using math above
+ * 32b line-sz: 9 bits set-index-calc, 5 bits offset-in-line => 1 extra bit
+ * 64b line-sz: 8 bits set-index-calc, 6 bits offset-in-line => 1 extra bit
+ *
+ * ------------------
+ * MMU v3
+ * ------------------
+ * This ver of MMU supports var page sizes (1k-16k) - Linux will support
+ * 8k (default), 16k and 4k.
+ * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * meaning more vaddr bits needed to disambiguate the cache-line-op ;
+ * the existing scheme of piggybacking won't work for certain configurations.
+ * Two new registers IC_PTAG and DC_PTAG inttoduced.
+ * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
+ */
+
+/***********************************************************
+ * Machine specific helpers for per line I-Cache invalidate.
+ * 3 routines to accpunt for 1, 2, 4 aliases possible
+ */
+
+static void __ic_line_inv_no_alias(unsigned long start, int num_lines)
+{
+ while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ write_aux_reg(ARC_REG_IC_PTAG, start);
+#endif
+ write_aux_reg(ARC_REG_IC_IVIL, start);
+ start += ARC_ICACHE_LINE_LEN;
+ }
+}
+
+static void __ic_line_inv_2_alias(unsigned long start, int num_lines)
+{
+ while (num_lines-- > 0) {
+
+#if (CONFIG_ARC_MMU_VER > 2)
+ /*
+ * MMU v3, CDU prog model (for line ops) now uses a new IC_PTAG
+ * reg to pass the "tag" bits and existing IVIL reg only looks
+ * at bits relevant for "index" (details above)
+ * Programming Notes:
+ * -when writing tag to PTAG reg, bit chopping can be avoided,
+ * CDU ignores non-tag bits.
+ * -Ideally "index" must be computed from vaddr, but it is not
+ * avail in these rtns. So to be safe, we kill the lines in all
+ * possible indexes corresp to num of aliases possible for
+ * given cache config.
+ */
+ write_aux_reg(ARC_REG_IC_PTAG, start);
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x1 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL, start | (0x1 << PAGE_SHIFT));
+#else
+ write_aux_reg(ARC_REG_IC_IVIL, start);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
+#endif
+ start += ARC_ICACHE_LINE_LEN;
+ }
+}
+
+static void __ic_line_inv_4_alias(unsigned long start, int num_lines)
+{
+ while (num_lines-- > 0) {
+
+#if (CONFIG_ARC_MMU_VER > 2)
+ write_aux_reg(ARC_REG_IC_PTAG, start);
+
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x3 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x2 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x1 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL, start | (0x3 << PAGE_SHIFT));
+#else
+ write_aux_reg(ARC_REG_IC_IVIL, start);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x02);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x03);
+#endif
+ start += ARC_ICACHE_LINE_LEN;
+ }
+}
+
+static void __ic_line_inv(unsigned long start, unsigned long sz)
+{
+ unsigned long flags;
+ int num_lines, slack;
+
+ /*
+ * Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @start - aligned to cache line, and integral @num_lines
+ * However page sized flushes can be compile time optimised.
+ * -@start will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
+ slack = start & ~ICACHE_LINE_MASK;
+ sz += slack;
+ start -= slack;
+ }
+
+ num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
+
+ local_irq_save(flags);
+ (*___flush_icache_rtn) (start, num_lines);
+ local_irq_restore(flags);
+}
+
+/* Unlike routines above, having vaddr for flush op (along with paddr),
+ * prevents the need to speculatively kill the lines in multiple sets
+ * based on ratio of way_sz : pg_sz
+ */
+static void __ic_line_inv_vaddr(unsigned long phy_start,
+ unsigned long vaddr, unsigned long sz)
+{
+ unsigned long flags;
+ int num_lines, slack;
+ unsigned int addr;
+
+ slack = phy_start & ~ICACHE_LINE_MASK;
+ sz += slack;
+ phy_start -= slack;
+ num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
+
+#if (CONFIG_ARC_MMU_VER > 2)
+ vaddr &= ~ICACHE_LINE_MASK;
+ addr = phy_start;
+#else
+ /* bits 17:13 of vaddr go as bits 4:0 of paddr */
+ addr = phy_start | ((vaddr >> 13) & 0x1F);
+#endif
+
+ local_irq_save(flags);
+ while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ /* tag comes from phy addr */
+ write_aux_reg(ARC_REG_IC_PTAG, addr);
+
+ /* index bits come from vaddr */
+ write_aux_reg(ARC_REG_IC_IVIL, vaddr);
+ vaddr += ARC_ICACHE_LINE_LEN;
+#else
+ /* this paddr contains vaddrs bits as needed */
+ write_aux_reg(ARC_REG_IC_IVIL, addr);
+#endif
+ addr += ARC_ICACHE_LINE_LEN;
+ }
+ local_irq_restore(flags);
+}
+
+#else
+
+#define __ic_line_inv(start, sz)
+#define __ic_line_inv_vaddr(pstart, vstart, sz)
+
+#endif /* CONFIG_ARC_HAS_ICACHE */
+
+
+/***********************************************************
+ * Exported APIs
+ */
+
+/* TBD: use pg_arch_1 to optimize this */
+void flush_dcache_page(struct page *page)
+{
+ __dc_line_op((unsigned long)page_address(page), PAGE_SIZE, OP_FLUSH);
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz)
+{
+ __dc_line_op(start, sz, OP_FLUSH_N_INV);
+}
+EXPORT_SYMBOL(dma_cache_wback_inv);
+
+void dma_cache_inv(unsigned long start, unsigned long sz)
+{
+ __dc_line_op(start, sz, OP_INV);
+}
+EXPORT_SYMBOL(dma_cache_inv);
+
+void dma_cache_wback(unsigned long start, unsigned long sz)
+{
+ __dc_line_op(start, sz, OP_FLUSH);
+}
+EXPORT_SYMBOL(dma_cache_wback);
+
+/*
+ * This is API for making I/D Caches consistent when modifying code
+ * (loadable modules, kprobes, etc)
+ * This is called on insmod, with kernel virtual address for CODE of
+ * the module. ARC cache maintenance ops require PHY address thus we
+ * need to convert vmalloc addr to PHY addr
+ */
+void flush_icache_range(unsigned long kstart, unsigned long kend)
+{
+ unsigned int tot_sz, off, sz;
+ unsigned long phy, pfn;
+ unsigned long flags;
+
+ /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
+
+ /* This is not the right API for user virtual address */
+ if (kstart < TASK_SIZE) {
+ BUG_ON("Flush icache range for user virtual addr space");
+ return;
+ }
+
+ /* Shortcut for bigger flush ranges.
+ * Here we don't care if this was kernel virtual or phy addr
+ */
+ tot_sz = kend - kstart;
+ if (tot_sz > PAGE_SIZE) {
+ flush_cache_all();
+ return;
+ }
+
+ /* Case: Kernel Phy addr (0x8000_0000 onwards) */
+ if (likely(kstart > PAGE_OFFSET)) {
+ __ic_line_inv(kstart, kend - kstart);
+ __dc_line_op(kstart, kend - kstart, OP_FLUSH);
+ return;
+ }
+
+ /*
+ * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
+ * (1) ARC Cache Maintenance ops only take Phy addr, hence special
+ * handling of kernel vaddr.
+ *
+ * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
+ * it still needs to handle a 2 page scenario, where the range
+ * straddles across 2 virtual pages and hence need for loop
+ */
+ while (tot_sz > 0) {
+ off = kstart % PAGE_SIZE;
+ pfn = vmalloc_to_pfn((void *)kstart);
+ phy = (pfn << PAGE_SHIFT) + off;
+ sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
+ local_irq_save(flags);
+ __dc_line_op(phy, sz, OP_FLUSH);
+ __ic_line_inv(phy, sz);
+ local_irq_restore(flags);
+ kstart += sz;
+ tot_sz -= sz;
+ }
+}
+
+/*
+ * Optimised ver of flush_icache_range() with spec callers: ptrace/signals
+ * where vaddr is also available. This allows passing both vaddr and paddr
+ * bits to CDU for cache flush, short-circuting the current pessimistic algo
+ * which kills all possible aliases.
+ * An added adv of knowing that vaddr is user-vaddr avoids various checks
+ * and handling for k-vaddr, k-paddr as done in orig ver above
+ */
+void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
+ int len)
+{
+ __ic_line_inv_vaddr(paddr, u_vaddr, len);
+ __dc_line_op(paddr, len, OP_FLUSH);
+}
+
+/*
+ * XXX: This also needs to be optim using pg_arch_1
+ * This is called when a page-cache page is about to be mapped into a
+ * user process' address space. It offers an opportunity for a
+ * port to ensure d-cache/i-cache coherency if necessary.
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
+
+ __ic_line_inv((unsigned long)page_address(page), PAGE_SIZE);
+}
+
+void flush_icache_all(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ write_aux_reg(ARC_REG_IC_IVIC, 1);
+
+ /* lr will not complete till the icache inv operation is not over */
+ read_aux_reg(ARC_REG_IC_CTRL);
+ local_irq_restore(flags);
+}
+
+noinline void flush_cache_all(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ flush_icache_all();
+ __dc_entire_op(OP_FLUSH_N_INV);
+
+ local_irq_restore(flags);
+
+}
+
+/**********************************************************************
+ * Explicit Cache flush request from user space via syscall
+ * Needed for JITs which generate code on the fly
+ */
+SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
+{
+ /* TBD: optimize this */
+ flush_cache_all();
+ return 0;
+}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
new file mode 100644
index 000000000000..12cc6485b218
--- /dev/null
+++ b/arch/arc/mm/dma.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * DMA Coherent API Notes
+ *
+ * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
+ * implemented by accessintg it using a kernel virtual address, with
+ * Cache bit off in the TLB entry.
+ *
+ * The default DMA address == Phy address which is 0x8000_0000 based.
+ * A platform/device can make it zero based, by over-riding
+ * plat_{dma,kernel}_addr_to_{kernel,dma}
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-debug.h>
+#include <linux/export.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Helpers for Coherent DMA API.
+ */
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *paddr;
+
+ /* This is linear addr (0x8000_0000 based) */
+ paddr = alloc_pages_exact(size, gfp);
+ if (!paddr)
+ return NULL;
+
+ /* This is bus address, platform dependent */
+ *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+
+ return paddr;
+}
+EXPORT_SYMBOL(dma_alloc_noncoherent);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
+ size);
+}
+EXPORT_SYMBOL(dma_free_noncoherent);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *paddr, *kvaddr;
+
+ /* This is linear addr (0x8000_0000 based) */
+ paddr = alloc_pages_exact(size, gfp);
+ if (!paddr)
+ return NULL;
+
+ /* This is kernel Virtual address (0x7000_0000 based) */
+ kvaddr = ioremap_nocache((unsigned long)paddr, size);
+ if (kvaddr != NULL)
+ memset(kvaddr, 0, size);
+
+ /* This is bus address, platform dependent */
+ *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+
+ return kvaddr;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+ dma_addr_t dma_handle)
+{
+ iounmap((void __force __iomem *)kvaddr);
+
+ free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
+ size);
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+/*
+ * Helper for streaming DMA...
+ */
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ __inline_dma_cache_sync(paddr, size, dir);
+}
+EXPORT_SYMBOL(__arc_dma_cache_sync);
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
new file mode 100644
index 000000000000..014172ba8432
--- /dev/null
+++ b/arch/arc/mm/extable.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Borrowed heavily from MIPS
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+ if (fixup) {
+ regs->ret = fixup->fixup;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+
+long arc_copy_from_user_noinline(void *to, const void __user * from,
+ unsigned long n)
+{
+ return __arc_copy_from_user(to, from, n);
+}
+EXPORT_SYMBOL(arc_copy_from_user_noinline);
+
+long arc_copy_to_user_noinline(void __user *to, const void *from,
+ unsigned long n)
+{
+ return __arc_copy_to_user(to, from, n);
+}
+EXPORT_SYMBOL(arc_copy_to_user_noinline);
+
+unsigned long arc_clear_user_noinline(void __user *to,
+ unsigned long n)
+{
+ return __arc_clear_user(to, n);
+}
+EXPORT_SYMBOL(arc_clear_user_noinline);
+
+long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+ long count)
+{
+ return __arc_strncpy_from_user(dst, src, count);
+}
+EXPORT_SYMBOL(arc_strncpy_from_user_noinline);
+
+long arc_strnlen_user_noinline(const char __user *src, long n)
+{
+ return __arc_strnlen_user(src, n);
+}
+EXPORT_SYMBOL(arc_strnlen_user_noinline);
+#endif
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
new file mode 100644
index 000000000000..af55aab803d2
--- /dev/null
+++ b/arch/arc/mm/fault.c
@@ -0,0 +1,228 @@
+/* Page Fault Handling for ARC (TLB Miss / ProtV)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/signal.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+#include <asm/pgalloc.h>
+
+static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+{
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+
+ pgd = pgd_offset_fast(mm, address);
+ pgd_k = pgd_offset_k(address);
+
+ if (!pgd_present(*pgd_k))
+ goto bad_area;
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto bad_area;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto bad_area;
+
+ set_pmd(pmd, *pmd_k);
+
+ /* XXX: create the TLB entry here */
+ return 0;
+
+bad_area:
+ return 1;
+}
+
+void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
+ unsigned long cause_code)
+{
+ struct vm_area_struct *vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ siginfo_t info;
+ int fault, ret;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0);
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (address >= VMALLOC_START && address <= VMALLOC_END) {
+ ret = handle_vmalloc_fault(mm, address);
+ if (unlikely(ret))
+ goto bad_area_nosemaphore;
+ else
+ return;
+ }
+
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto no_context;
+
+retry:
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+
+ /* Handle protection violation, execute on heap or stack */
+
+ if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH))
+ goto bad_area;
+
+ if (write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+survive:
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
+ if (unlikely(fatal_signal_pending(current))) {
+ if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
+ up_read(&mm->mmap_sem);
+ if (user_mode(regs))
+ return;
+ }
+
+ if (likely(!(fault & VM_FAULT_ERROR))) {
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ /* To avoid updating stats twice for retry case */
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
+ }
+
+ /* Fault Handled Gracefully */
+ up_read(&mm->mmap_sem);
+ return;
+ }
+
+ /* TBD: switch to pagefault_out_of_memory() */
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+
+ /* no man's land */
+ BUG();
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ tsk->thread.fault_address = address;
+ tsk->thread.cause_code = cause_code;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault?
+ *
+ * (The kernel has valid exception-points in the source
+ * when it acesses user-memory. When it fails in one
+ * of those points, we find it in a table and do a jump
+ * to some fixup code that loads an appropriate error
+ * code)
+ */
+ if (fixup_exception(regs))
+ return;
+
+ die("Oops", regs, address, cause_code);
+
+out_of_memory:
+ if (is_global_init(tsk)) {
+ yield();
+ goto survive;
+ }
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs))
+ do_group_exit(SIGKILL); /* This will never return */
+
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ if (!user_mode(regs))
+ goto no_context;
+
+ tsk->thread.fault_address = address;
+ tsk->thread.cause_code = cause_code;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
new file mode 100644
index 000000000000..caf797de23fc
--- /dev/null
+++ b/arch/arc/mm/init.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#ifdef CONFIG_BLOCK_DEV_RAM
+#include <linux/blk.h>
+#endif
+#include <linux/swap.h>
+#include <linux/module.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/arcregs.h>
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
+char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+EXPORT_SYMBOL(empty_zero_page);
+
+/* Default tot mem from .config */
+static unsigned long arc_mem_sz = 0x20000000; /* some default */
+
+/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
+static int __init setup_mem_sz(char *str)
+{
+ arc_mem_sz = memparse(str, NULL) & PAGE_MASK;
+
+ /* early console might not be setup yet - it will show up later */
+ pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(arc_mem_sz));
+
+ return 0;
+}
+early_param("mem", setup_mem_sz);
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ arc_mem_sz = size & PAGE_MASK;
+ pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz));
+}
+
+/*
+ * First memory setup routine called from setup_arch()
+ * 1. setup swapper's mm @init_mm
+ * 2. Count the pages we have and setup bootmem allocator
+ * 3. zone setup
+ */
+void __init setup_arch_memory(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, 0 };
+ unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz;
+
+ init_mm.start_code = (unsigned long)_text;
+ init_mm.end_code = (unsigned long)_etext;
+ init_mm.end_data = (unsigned long)_edata;
+ init_mm.brk = (unsigned long)_end;
+
+ /*
+ * We do it here, so that memory is correctly instantiated
+ * even if "mem=xxx" cmline over-ride is given and/or
+ * DT has memory node. Each causes an update to @arc_mem_sz
+ * and we finally add memory one here
+ */
+ memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz);
+
+ /*------------- externs in mm need setting up ---------------*/
+
+ /* first page of system - kernel .vector starts here */
+ min_low_pfn = PFN_DOWN(CONFIG_LINUX_LINK_BASE);
+
+ /* Last usable page of low mem (no HIGHMEM yet for ARC port) */
+ max_low_pfn = max_pfn = PFN_DOWN(end_mem);
+
+ max_mapnr = num_physpages = max_low_pfn - min_low_pfn;
+
+ /*------------- reserve kernel image -----------------------*/
+ memblock_reserve(CONFIG_LINUX_LINK_BASE,
+ __pa(_end) - CONFIG_LINUX_LINK_BASE);
+
+ memblock_dump_all();
+
+ /*-------------- node setup --------------------------------*/
+ memset(zones_size, 0, sizeof(zones_size));
+ zones_size[ZONE_NORMAL] = num_physpages;
+
+ /*
+ * We can't use the helper free_area_init(zones[]) because it uses
+ * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
+ * when our kernel doesn't start at PAGE_OFFSET, i.e.
+ * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE
+ */
+ free_area_init_node(0, /* node-id */
+ zones_size, /* num pages per zone */
+ min_low_pfn, /* first pfn of node */
+ NULL); /* NO holes */
+}
+
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
+ int codesize, datasize, initsize, reserved_pages, free_pages;
+ int tmp;
+
+ high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz);
+
+ totalram_pages = free_all_bootmem();
+
+ /* count all reserved pages [kernel code/data/mem_map..] */
+ reserved_pages = 0;
+ for (tmp = 0; tmp < max_mapnr; tmp++)
+ if (PageReserved(mem_map + tmp))
+ reserved_pages++;
+
+ /* XXX: nr_free_pages() is equivalent */
+ free_pages = max_mapnr - reserved_pages;
+
+ /*
+ * For the purpose of display below, split the "reserve mem"
+ * kernel code/data is already shown explicitly,
+ * Show any other reservations (mem_map[ ] et al)
+ */
+ reserved_pages -= (((unsigned int)_end - CONFIG_LINUX_LINK_BASE) >>
+ PAGE_SHIFT);
+
+ codesize = _etext - _text;
+ datasize = _end - _etext;
+ initsize = __init_end - __init_begin;
+
+ pr_info("Memory Available: %dM / %ldM (%dK code, %dK data, %dK init, %dK reserv)\n",
+ PAGES_TO_MB(free_pages),
+ TO_MB(arc_mem_sz),
+ TO_KB(codesize), TO_KB(datasize), TO_KB(initsize),
+ PAGES_TO_KB(reserved_pages));
+}
+
+static void __init free_init_pages(const char *what, unsigned long begin,
+ unsigned long end)
+{
+ unsigned long addr;
+
+ pr_info("Freeing %s: %ldk [%lx] to [%lx]\n",
+ what, TO_KB(end - begin), begin, end);
+
+ /* need to check that the page we free is not a partial page */
+ for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+}
+
+/*
+ * free_initmem: Free all the __init memory.
+ */
+void __init_refok free_initmem(void)
+{
+ free_init_pages("unused kernel memory",
+ (unsigned long)__init_begin,
+ (unsigned long)__init_end);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_init_pages("initrd memory", start, end);
+}
+#endif
+
+#ifdef CONFIG_OF_FLATTREE
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ pr_err("%s(%lx, %lx)\n", __func__, start, end);
+}
+#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
new file mode 100644
index 000000000000..3e5c92c79936
--- /dev/null
+++ b/arch/arc/mm/ioremap.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/cache.h>
+
+void __iomem *ioremap(unsigned long paddr, unsigned long size)
+{
+ unsigned long end;
+
+ /* Don't allow wraparound or zero size */
+ end = paddr + size - 1;
+ if (!size || (end < paddr))
+ return NULL;
+
+ /* If the region is h/w uncached, avoid MMU mappings */
+ if (paddr >= ARC_UNCACHED_ADDR_SPACE)
+ return (void __iomem *)paddr;
+
+ return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
+}
+EXPORT_SYMBOL(ioremap);
+
+/*
+ * ioremap with access flags
+ * Cache semantics wise it is same as ioremap - "forced" uncached.
+ * However unline vanilla ioremap which bypasses ARC MMU for addresses in
+ * ARC hardware uncached region, this one still goes thru the MMU as caller
+ * might need finer access control (R/W/X)
+ */
+void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+ unsigned long flags)
+{
+ void __iomem *vaddr;
+ struct vm_struct *area;
+ unsigned long off, end;
+ pgprot_t prot = __pgprot(flags);
+
+ /* Don't allow wraparound, zero size */
+ end = paddr + size - 1;
+ if ((!size) || (end < paddr))
+ return NULL;
+
+ /* An early platform driver might end up here */
+ if (!slab_is_available())
+ return NULL;
+
+ /* force uncached */
+ prot = pgprot_noncached(prot);
+
+ /* Mappings have to be page-aligned */
+ off = paddr & ~PAGE_MASK;
+ paddr &= PAGE_MASK;
+ size = PAGE_ALIGN(end + 1) - paddr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ area->phys_addr = paddr;
+ vaddr = (void __iomem *)area->addr;
+ if (ioremap_page_range((unsigned long)vaddr,
+ (unsigned long)vaddr + size, paddr, prot)) {
+ vunmap((void __force *)vaddr);
+ return NULL;
+ }
+ return (void __iomem *)(off + (char __iomem *)vaddr);
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+
+void iounmap(const void __iomem *addr)
+{
+ if (addr >= (void __force __iomem *)ARC_UNCACHED_ADDR_SPACE)
+ return;
+
+ vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
new file mode 100644
index 000000000000..9b9ce23f4ec3
--- /dev/null
+++ b/arch/arc/mm/tlb.c
@@ -0,0 +1,645 @@
+/*
+ * TLB Management (flush/create/diagnostics) for ARC700
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Aug 2011
+ * -Reintroduce duplicate PD fixup - some customer chips still have the issue
+ *
+ * vineetg: May 2011
+ * -No need to flush_cache_page( ) for each call to update_mmu_cache()
+ * some of the LMBench tests improved amazingly
+ * = page-fault thrice as fast (75 usec to 28 usec)
+ * = mmap twice as fast (9.6 msec to 4.6 msec),
+ * = fork (5.3 msec to 3.7 msec)
+ *
+ * vineetg: April 2011 :
+ * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
+ * helps avoid a shift when preparing PD0 from PTE
+ *
+ * vineetg: April 2011 : Preparing for MMU V3
+ * -MMU v2/v3 BCRs decoded differently
+ * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
+ * -tlb_entry_erase( ) can be void
+ * -local_flush_tlb_range( ):
+ * = need not "ceil" @end
+ * = walks MMU only if range spans < 32 entries, as opposed to 256
+ *
+ * Vineetg: Sept 10th 2008
+ * -Changes related to MMU v2 (Rel 4.8)
+ *
+ * Vineetg: Aug 29th 2008
+ * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
+ * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
+ * it fails. Thus need to load it with ANY valid value before invoking
+ * TLBIVUTLB cmd
+ *
+ * Vineetg: Aug 21th 2008:
+ * -Reduced the duration of IRQ lockouts in TLB Flush routines
+ * -Multiple copies of TLB erase code seperated into a "single" function
+ * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
+ * in interrupt-safe region.
+ *
+ * Vineetg: April 23rd Bug #93131
+ * Problem: tlb_flush_kernel_range() doesnt do anything if the range to
+ * flush is more than the size of TLB itself.
+ *
+ * Rahul Trivedi : Codito Technologies 2004
+ */
+
+#include <linux/module.h>
+#include <asm/arcregs.h>
+#include <asm/setup.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+
+/* Need for ARC MMU v2
+ *
+ * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
+ * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
+ * map into same set, there would be contention for the 2 ways causing severe
+ * Thrashing.
+ *
+ * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
+ * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
+ * Given this, the thrasing problem should never happen because once the 3
+ * J-TLB entries are created (even though 3rd will knock out one of the prev
+ * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
+ *
+ * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
+ * This is a simple design for keeping them in sync. So what do we do?
+ * The solution which James came up was pretty neat. It utilised the assoc
+ * of uTLBs by not invalidating always but only when absolutely necessary.
+ *
+ * - Existing TLB commands work as before
+ * - New command (TLBWriteNI) for TLB write without clearing uTLBs
+ * - New command (TLBIVUTLB) to invalidate uTLBs.
+ *
+ * The uTLBs need only be invalidated when pages are being removed from the
+ * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
+ * as a result of a miss, the removed entry is still allowed to exist in the
+ * uTLBs as it is still valid and present in the OS page table. This allows the
+ * full associativity of the uTLBs to hide the limited associativity of the main
+ * TLB.
+ *
+ * During a miss handler, the new "TLBWriteNI" command is used to load
+ * entries without clearing the uTLBs.
+ *
+ * When the OS page table is updated, TLB entries that may be associated with a
+ * removed page are removed (flushed) from the TLB using TLBWrite. In this
+ * circumstance, the uTLBs must also be cleared. This is done by using the
+ * existing TLBWrite command. An explicit IVUTLB is also required for those
+ * corner cases when TLBWrite was not executed at all because the corresp
+ * J-TLB entry got evicted/replaced.
+ */
+
+/* A copy of the ASID from the PID reg is kept in asid_cache */
+int asid_cache = FIRST_ASID;
+
+/* ASID to mm struct mapping. We have one extra entry corresponding to
+ * NO_ASID to save us a compare when clearing the mm entry for old asid
+ * see get_new_mmu_context (asm-arc/mmu_context.h)
+ */
+struct mm_struct *asid_mm_map[NUM_ASID + 1];
+
+/*
+ * Utility Routine to erase a J-TLB entry
+ * The procedure is to look it up in the MMU. If found, ERASE it by
+ * issuing a TlbWrite CMD with PD0 = PD1 = 0
+ */
+
+static void __tlb_entry_erase(void)
+{
+ write_aux_reg(ARC_REG_TLBPD1, 0);
+ write_aux_reg(ARC_REG_TLBPD0, 0);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+}
+
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
+{
+ unsigned int idx;
+
+ /* Locate the TLB entry for this vaddr + ASID */
+ write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ /* No error means entry found, zero it out */
+ if (likely(!(idx & TLB_LKUP_ERR))) {
+ __tlb_entry_erase();
+ } else { /* Some sort of Error */
+
+ /* Duplicate entry error */
+ if (idx & 0x1) {
+ /* TODO we need to handle this case too */
+ pr_emerg("unhandled Duplicate flush for %x\n",
+ vaddr_n_asid);
+ }
+ /* else entry not found so nothing to do */
+ }
+}
+
+/****************************************************************************
+ * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
+ *
+ * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
+ *
+ * utlb_invalidate ( )
+ * -For v2 MMU calls Flush uTLB Cmd
+ * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
+ * This is because in v1 TLBWrite itself invalidate uTLBs
+ ***************************************************************************/
+
+static void utlb_invalidate(void)
+{
+#if (CONFIG_ARC_MMU_VER >= 2)
+
+#if (CONFIG_ARC_MMU_VER < 3)
+ /* MMU v2 introduced the uTLB Flush command.
+ * There was however an obscure hardware bug, where uTLB flush would
+ * fail when a prior probe for J-TLB (both totally unrelated) would
+ * return lkup err - because the entry didnt exist in MMU.
+ * The Workround was to set Index reg with some valid value, prior to
+ * flush. This was fixed in MMU v3 hence not needed any more
+ */
+ unsigned int idx;
+
+ /* make sure INDEX Reg is valid */
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ /* If not write some dummy val */
+ if (unlikely(idx & TLB_LKUP_ERR))
+ write_aux_reg(ARC_REG_TLBINDEX, 0xa);
+#endif
+
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
+#endif
+
+}
+
+/*
+ * Un-conditionally (without lookup) erase the entire MMU contents
+ */
+
+noinline void local_flush_tlb_all(void)
+{
+ unsigned long flags;
+ unsigned int entry;
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ local_irq_save(flags);
+
+ /* Load PD0 and PD1 with template for a Blank Entry */
+ write_aux_reg(ARC_REG_TLBPD1, 0);
+ write_aux_reg(ARC_REG_TLBPD0, 0);
+
+ for (entry = 0; entry < mmu->num_tlb; entry++) {
+ /* write this entry to the TLB */
+ write_aux_reg(ARC_REG_TLBINDEX, entry);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Flush the entrie MM for userland. The fastest way is to move to Next ASID
+ */
+noinline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ /*
+ * Small optimisation courtesy IA64
+ * flush_mm called during fork,exit,munmap etc, multiple times as well.
+ * Only for fork( ) do we need to move parent to a new MMU ctxt,
+ * all other cases are NOPs, hence this check.
+ */
+ if (atomic_read(&mm->mm_users) == 0)
+ return;
+
+ /*
+ * Workaround for Android weirdism:
+ * A binder VMA could end up in a task such that vma->mm != tsk->mm
+ * old code would cause h/w - s/w ASID to get out of sync
+ */
+ if (current->mm != mm)
+ destroy_context(mm);
+ else
+ get_new_mmu_context(mm);
+}
+
+/*
+ * Flush a Range of TLB entries for userland.
+ * @start is inclusive, while @end is exclusive
+ * Difference between this and Kernel Range Flush is
+ * -Here the fastest way (if range is too large) is to move to next ASID
+ * without doing any explicit Shootdown
+ * -In case of kernel Flush, entry has to be shot down explictly
+ */
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned long flags;
+ unsigned int asid;
+
+ /* If range @start to @end is more than 32 TLB entries deep,
+ * its better to move to a new ASID rather than searching for
+ * individual entries and then shooting them down
+ *
+ * The calc above is rough, doesn't account for unaligned parts,
+ * since this is heuristics based anyways
+ */
+ if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+ local_flush_tlb_mm(vma->vm_mm);
+ return;
+ }
+
+ /*
+ * @start moved to page start: this alone suffices for checking
+ * loop end condition below, w/o need for aligning @end to end
+ * e.g. 2000 to 4001 will anyhow loop twice
+ */
+ start &= PAGE_MASK;
+
+ local_irq_save(flags);
+ asid = vma->vm_mm->context.asid;
+
+ if (asid != NO_ASID) {
+ while (start < end) {
+ tlb_entry_erase(start | (asid & 0xff));
+ start += PAGE_SIZE;
+ }
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
+ * @start, @end interpreted as kvaddr
+ * Interestingly, shared TLB entries can also be flushed using just
+ * @start,@end alone (interpreted as user vaddr), although technically SASID
+ * is also needed. However our smart TLbProbe lookup takes care of that.
+ */
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /* exactly same as above, except for TLB entry not taking ASID */
+
+ if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+ local_flush_tlb_all();
+ return;
+ }
+
+ start &= PAGE_MASK;
+
+ local_irq_save(flags);
+ while (start < end) {
+ tlb_entry_erase(start);
+ start += PAGE_SIZE;
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Delete TLB entry in MMU for a given page (??? address)
+ * NOTE One TLB entry contains translation for single PAGE
+ */
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ unsigned long flags;
+
+ /* Note that it is critical that interrupts are DISABLED between
+ * checking the ASID and using it flush the TLB entry
+ */
+ local_irq_save(flags);
+
+ if (vma->vm_mm->context.asid != NO_ASID) {
+ tlb_entry_erase((page & PAGE_MASK) |
+ (vma->vm_mm->context.asid & 0xff));
+ utlb_invalidate();
+ }
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Routine to create a TLB entry
+ */
+void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ unsigned long flags;
+ unsigned int idx, asid_or_sasid;
+ unsigned long pd0_flags;
+
+ /*
+ * create_tlb() assumes that current->mm == vma->mm, since
+ * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
+ * -completes the lazy write to SASID reg (again valid for curr tsk)
+ *
+ * Removing the assumption involves
+ * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
+ * -Fix the TLB paranoid debug code to not trigger false negatives.
+ * -More importantly it makes this handler inconsistent with fast-path
+ * TLB Refill handler which always deals with "current"
+ *
+ * Lets see the use cases when current->mm != vma->mm and we land here
+ * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
+ * Here VM wants to pre-install a TLB entry for user stack while
+ * current->mm still points to pre-execve mm (hence the condition).
+ * However the stack vaddr is soon relocated (randomization) and
+ * move_page_tables() tries to undo that TLB entry.
+ * Thus not creating TLB entry is not any worse.
+ *
+ * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
+ * breakpoint in debugged task. Not creating a TLB now is not
+ * performance critical.
+ *
+ * Both the cases above are not good enough for code churn.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+
+ tlb_paranoid_check(vma->vm_mm->context.asid, address);
+
+ address &= PAGE_MASK;
+
+ /* update this PTE credentials */
+ pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
+
+ /* Create HW TLB entry Flags (in PD0) from PTE Flags */
+#if (CONFIG_ARC_MMU_VER <= 2)
+ pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1);
+#else
+ pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0));
+#endif
+
+ /* ASID for this task */
+ asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid);
+
+ /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */
+ write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1));
+
+ /* First verify if entry for this vaddr+ASID already exists */
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ /*
+ * If Not already present get a free slot from MMU.
+ * Otherwise, Probe would have located the entry and set INDEX Reg
+ * with existing location. This will cause Write CMD to over-write
+ * existing entry with new PD0 and PD1
+ */
+ if (likely(idx & TLB_LKUP_ERR))
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
+
+ /*
+ * Commit the Entry to MMU
+ * It doesnt sound safe to use the TLBWriteNI cmd here
+ * which doesn't flush uTLBs. I'd rather be safe than sorry.
+ */
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+
+ local_irq_restore(flags);
+}
+
+/* arch hook called by core VM at the end of handle_mm_fault( ),
+ * when a new PTE is entered in Page Tables or an existing one
+ * is modified. We aggresively pre-install a TLB entry
+ */
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress,
+ pte_t *ptep)
+{
+
+ create_tlb(vma, vaddress, ptep);
+}
+
+/* Read the Cache Build Confuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation is done here, simply read/convert the BCRs
+ */
+void __init read_decode_mmu_bcr(void)
+{
+ unsigned int tmp;
+ struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */
+ struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ tmp = read_aux_reg(ARC_REG_MMU_BCR);
+ mmu->ver = (tmp >> 24);
+
+ if (mmu->ver <= 2) {
+ mmu2 = (struct bcr_mmu_1_2 *)&tmp;
+ mmu->pg_sz = PAGE_SIZE;
+ mmu->sets = 1 << mmu2->sets;
+ mmu->ways = 1 << mmu2->ways;
+ mmu->u_dtlb = mmu2->u_dtlb;
+ mmu->u_itlb = mmu2->u_itlb;
+ } else {
+ mmu3 = (struct bcr_mmu_3 *)&tmp;
+ mmu->pg_sz = 512 << mmu3->pg_sz;
+ mmu->sets = 1 << mmu3->sets;
+ mmu->ways = 1 << mmu3->ways;
+ mmu->u_dtlb = mmu3->u_dtlb;
+ mmu->u_itlb = mmu3->u_itlb;
+ }
+
+ mmu->num_tlb = mmu->sets * mmu->ways;
+}
+
+char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
+ p_mmu->ver, TO_KB(p_mmu->pg_sz));
+
+ n += scnprintf(buf + n, len - n,
+ "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
+ p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
+ p_mmu->u_dtlb, p_mmu->u_itlb,
+ __CONFIG_ARC_MMU_SASID_VAL ? "SASID" : "");
+
+ return buf;
+}
+
+void __init arc_mmu_init(void)
+{
+ char str[256];
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
+
+ /* For efficiency sake, kernel is compile time built for a MMU ver
+ * This must match the hardware it is running on.
+ * Linux built for MMU V2, if run on MMU V1 will break down because V1
+ * hardware doesn't understand cmds such as WriteNI, or IVUTLB
+ * On the other hand, Linux built for V1 if run on MMU V2 will do
+ * un-needed workarounds to prevent memcpy thrashing.
+ * Similarly MMU V3 has new features which won't work on older MMU
+ */
+ if (mmu->ver != CONFIG_ARC_MMU_VER) {
+ panic("MMU ver %d doesn't match kernel built for %d...\n",
+ mmu->ver, CONFIG_ARC_MMU_VER);
+ }
+
+ if (mmu->pg_sz != PAGE_SIZE)
+ panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
+
+ /*
+ * ASID mgmt data structures are compile time init
+ * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes
+ */
+
+ local_flush_tlb_all();
+
+ /* Enable the MMU */
+ write_aux_reg(ARC_REG_PID, MMU_ENABLE);
+
+ /* In smp we use this reg for interrupt 1 scratch */
+#ifndef CONFIG_SMP
+ /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
+#endif
+}
+
+/*
+ * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
+ * The mapping is Column-first.
+ * --------------------- -----------
+ * |way0|way1|way2|way3| |way0|way1|
+ * --------------------- -----------
+ * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
+ * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
+ * ~ ~ ~ ~
+ * [set127] | 508| 509| 510| 511| | 254| 255|
+ * --------------------- -----------
+ * For normal operations we don't(must not) care how above works since
+ * MMU cmd getIndex(vaddr) abstracts that out.
+ * However for walking WAYS of a SET, we need to know this
+ */
+#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
+
+/* Handling of Duplicate PD (TLB entry) in MMU.
+ * -Could be due to buggy customer tapeouts or obscure kernel bugs
+ * -MMU complaints not at the time of duplicate PD installation, but at the
+ * time of lookup matching multiple ways.
+ * -Ideally these should never happen - but if they do - workaround by deleting
+ * the duplicate one.
+ * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
+ */
+volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
+
+void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ int set, way, n;
+ unsigned int pd0[4], pd1[4]; /* assume max 4 ways */
+ unsigned long flags, is_valid;
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ local_irq_save(flags);
+
+ /* re-enable the MMU */
+ write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
+
+ /* loop thru all sets of TLB */
+ for (set = 0; set < mmu->sets; set++) {
+
+ /* read out all the ways of current set */
+ for (way = 0, is_valid = 0; way < mmu->ways; way++) {
+ write_aux_reg(ARC_REG_TLBINDEX,
+ SET_WAY_TO_IDX(mmu, set, way));
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
+ pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
+ pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
+ is_valid |= pd0[way] & _PAGE_PRESENT;
+ }
+
+ /* If all the WAYS in SET are empty, skip to next SET */
+ if (!is_valid)
+ continue;
+
+ /* Scan the set for duplicate ways: needs a nested loop */
+ for (way = 0; way < mmu->ways; way++) {
+ if (!pd0[way])
+ continue;
+
+ for (n = way + 1; n < mmu->ways; n++) {
+ if ((pd0[way] & PAGE_MASK) ==
+ (pd0[n] & PAGE_MASK)) {
+
+ if (dup_pd_verbose) {
+ pr_info("Duplicate PD's @"
+ "[%d:%d]/[%d:%d]\n",
+ set, way, set, n);
+ pr_info("TLBPD0[%u]: %08x\n",
+ way, pd0[way]);
+ }
+
+ /*
+ * clear entry @way and not @n. This is
+ * critical to our optimised loop
+ */
+ pd0[way] = pd1[way] = 0;
+ write_aux_reg(ARC_REG_TLBINDEX,
+ SET_WAY_TO_IDX(mmu, set, way));
+ __tlb_entry_erase();
+ }
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/***********************************************************************
+ * Diagnostic Routines
+ * -Called from Low Level TLB Hanlders if things don;t look good
+ **********************************************************************/
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+
+/*
+ * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
+ * don't match
+ */
+void print_asid_mismatch(int is_fast_path)
+{
+ int pid_sw, pid_hw;
+ pid_sw = current->active_mm->context.asid;
+ pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
+ is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw);
+
+ __asm__ __volatile__("flag 1");
+}
+
+void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr)
+{
+ unsigned int pid_hw;
+
+ pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID)))
+ print_asid_mismatch(0);
+}
+#endif
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
new file mode 100644
index 000000000000..9df765dc7c3a
--- /dev/null
+++ b/arch/arc/mm/tlbex.S
@@ -0,0 +1,408 @@
+/*
+ * TLB Exception Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: April 2011 :
+ * -MMU v1: moved out legacy code into a seperate file
+ * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
+ * helps avoid a shift when preparing PD0 from PTE
+ *
+ * Vineetg: July 2009
+ * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
+ * entry, so that it doesn't knock out it's I-TLB entry
+ * -Some more fine tuning:
+ * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
+ *
+ * Vineetg: July 2009
+ * -Practically rewrote the I/D TLB Miss handlers
+ * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
+ * Hence Leaner by 1.5 K
+ * Used Conditional arithmetic to replace excessive branching
+ * Also used short instructions wherever possible
+ *
+ * Vineetg: Aug 13th 2008
+ * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
+ * more information in case of a Fatality
+ *
+ * Vineetg: March 25th Bug #92690
+ * -Added Debug Code to check if sw-ASID == hw-ASID
+
+ * Rahul Trivedi, Amit Bhor: Codito Technologies 2004
+ */
+
+ .cpu A7
+
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/tlb.h>
+#include <asm/pgtable.h>
+#include <asm/arcregs.h>
+#include <asm/cache.h>
+#include <asm/processor.h>
+#if (CONFIG_ARC_MMU_VER == 1)
+#include <asm/tlb-mmu1.h>
+#endif
+
+;--------------------------------------------------------------------------
+; scratch memory to save the registers (r0-r3) used to code TLB refill Handler
+; For details refer to comments before TLBMISS_FREEUP_REGS below
+;--------------------------------------------------------------------------
+
+ARCFP_DATA ex_saved_reg1
+ .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned
+ .type ex_saved_reg1, @object
+#ifdef CONFIG_SMP
+ .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+ex_saved_reg1:
+ .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+#else
+ .size ex_saved_reg1, 16
+ex_saved_reg1:
+ .zero 16
+#endif
+
+;============================================================================
+; Troubleshooting Stuff
+;============================================================================
+
+; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid
+; When Creating TLB Entries, instead of doing 3 dependent loads from memory,
+; we use the MMU PID Reg to get current ASID.
+; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble.
+; So we try to detect this in TLB Mis shandler
+
+
+.macro DBG_ASID_MISMATCH
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+
+ ; make sure h/w ASID is same as s/w ASID
+
+ GET_CURR_TASK_ON_CPU r3
+ ld r0, [r3, TASK_ACT_MM]
+ ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
+
+ lr r1, [ARC_REG_PID]
+ and r1, r1, 0xFF
+ breq r1, r0, 5f
+
+ ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
+ lr r0, [erstatus]
+ bbit0 r0, STATUS_U_BIT, 5f
+
+ ; We sure are in troubled waters, Flag the error, but to do so
+ ; need to switch to kernel mode stack to call error routine
+ GET_TSK_STACK_BASE r3, sp
+
+ ; Call printk to shoutout aloud
+ mov r0, 1
+ j print_asid_mismatch
+
+5: ; ASIDs match so proceed normally
+ nop
+
+#endif
+
+.endm
+
+;============================================================================
+;TLB Miss handling Code
+;============================================================================
+
+;-----------------------------------------------------------------------------
+; This macro does the page-table lookup for the faulting address.
+; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
+.macro LOAD_FAULT_PTE
+
+ lr r2, [efa]
+
+#ifndef CONFIG_SMP
+ lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
+#else
+ GET_CURR_TASK_ON_CPU r1
+ ld r1, [r1, TASK_ACT_MM]
+ ld r1, [r1, MM_PGD]
+#endif
+
+ lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
+ ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr
+ and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags
+ ; contains Ptr to Page Table
+ bz.d do_slow_path_pf ; if no Page Table, do page fault
+
+ ; Get the PTE entry: The idea is
+ ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
+ ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
+ ; (3) z = pgtbl[y]
+ ; To avoid the multiply by in end, we do the -2, <<2 below
+
+ lsr r0, r2, (PAGE_SHIFT - 2)
+ and r0, r0, ( (PTRS_PER_PTE - 1) << 2)
+ ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ and.f 0, r0, _PAGE_PRESENT
+ bz 1f
+ ld r2, [num_pte_not_present]
+ add r2, r2, 1
+ st r2, [num_pte_not_present]
+1:
+#endif
+
+.endm
+
+;-----------------------------------------------------------------
+; Convert Linux PTE entry into TLB entry
+; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
+; IN: r0 = PTE, r1 = ptr to PTE
+
+.macro CONV_PTE_TO_TLB
+ and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE
+ sr r3, [ARC_REG_TLBPD1] ; these go in PD1
+
+ and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
+#if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */
+ lsr r2, r2 ; shift PTE flags to match layout in PD0
+#endif
+
+ lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid
+
+ or r3, r3, r2 ; S | vaddr | {sasid|asid}
+ sr r3,[ARC_REG_TLBPD0] ; rewrite PD0
+.endm
+
+;-----------------------------------------------------------------
+; Commit the TLB entry into MMU
+
+.macro COMMIT_ENTRY_TO_MMU
+
+ /* Get free TLB slot: Set = computed from vaddr, way = random */
+ sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+
+ /* Commit the Write */
+#if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */
+ sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
+#else
+ sr TLBWrite, [ARC_REG_TLBCOMMAND]
+#endif
+.endm
+
+;-----------------------------------------------------------------
+; ARC700 Exception Handling doesn't auto-switch stack and it only provides
+; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
+;
+; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
+; "global" is used to free-up FIRST core reg to be able to code the rest of
+; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
+; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
+; need to be saved as well by extending the "global" to be 4 words. Hence
+; ".size ex_saved_reg1, 16"
+; [All of this dance is to avoid stack switching for each TLB Miss, since we
+; only need to save only a handful of regs, as opposed to complete reg file]
+;
+; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
+; core reg as it will not be SMP safe.
+; Thus scratch AUX reg is used (and no longer used to cache task PGD).
+; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
+; Epilogue thus has to locate the "per-cpu" storage for regs.
+; To avoid cache line bouncing the per-cpu global is aligned/sized per
+; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
+; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
+
+; As simple as that....
+
+.macro TLBMISS_FREEUP_REGS
+#ifdef CONFIG_SMP
+ sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
+ GET_CPU_ID r0 ; get to per cpu scratch mem,
+ lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
+ add r0, @ex_saved_reg1, r0
+#else
+ st r0, [@ex_saved_reg1]
+ mov_s r0, @ex_saved_reg1
+#endif
+ st_s r1, [r0, 4]
+ st_s r2, [r0, 8]
+ st_s r3, [r0, 12]
+
+ ; VERIFY if the ASID in MMU-PID Reg is same as
+ ; one in Linux data structures
+
+ DBG_ASID_MISMATCH
+.endm
+
+;-----------------------------------------------------------------
+.macro TLBMISS_RESTORE_REGS
+#ifdef CONFIG_SMP
+ GET_CPU_ID r0 ; get to per cpu scratch mem
+ lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
+ add r0, @ex_saved_reg1, r0
+ ld_s r3, [r0,12]
+ ld_s r2, [r0, 8]
+ ld_s r1, [r0, 4]
+ lr r0, [ARC_REG_SCRATCH_DATA0]
+#else
+ mov_s r0, @ex_saved_reg1
+ ld_s r3, [r0,12]
+ ld_s r2, [r0, 8]
+ ld_s r1, [r0, 4]
+ ld_s r0, [r0]
+#endif
+.endm
+
+ARCFP_CODE ;Fast Path Code, candidate for ICCM
+
+;-----------------------------------------------------------------------------
+; I-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ARC_ENTRY EV_TLBMissI
+
+ TLBMISS_FREEUP_REGS
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ ld r0, [@numitlb]
+ add r0, r0, 1
+ st r0, [@numitlb]
+#endif
+
+ ;----------------------------------------------------------------
+ ; Get the PTE corresponding to V-addr accessed
+ LOAD_FAULT_PTE
+
+ ;----------------------------------------------------------------
+ ; VERIFY_PTE: Check if PTE permissions approp for executing code
+ cmp_s r2, VMALLOC_START
+ mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE)
+ mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
+
+ and r3, r0, r2 ; Mask out NON Flag bits from PTE
+ xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
+ bnz do_slow_path_pf
+
+ ; Let Linux VM know that the page was accessed
+ or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; set Accessed Bit
+ st_s r0, [r1] ; Write back PTE
+
+ CONV_PTE_TO_TLB
+ COMMIT_ENTRY_TO_MMU
+ TLBMISS_RESTORE_REGS
+ rtie
+
+ARC_EXIT EV_TLBMissI
+
+;-----------------------------------------------------------------------------
+; D-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ARC_ENTRY EV_TLBMissD
+
+ TLBMISS_FREEUP_REGS
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ ld r0, [@numdtlb]
+ add r0, r0, 1
+ st r0, [@numdtlb]
+#endif
+
+ ;----------------------------------------------------------------
+ ; Get the PTE corresponding to V-addr accessed
+ ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE
+ LOAD_FAULT_PTE
+
+ ;----------------------------------------------------------------
+ ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
+
+ mov_s r2, 0
+ lr r3, [ecr]
+ btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
+ or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
+ or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
+ ; Above laddering takes care of XCHG access
+ ; which is both Read and Write
+
+ ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx
+ ; For copy_(to|from)_user, despite exception taken in kernel mode,
+ ; this code is not hit, because EFA would still be the user mode
+ ; address (EFA < 0x6000_0000).
+ ; This code is for legit kernel mode faults, vmalloc specifically
+ ; (EFA: 0x7000_0000 to 0x7FFF_FFFF)
+
+ lr r3, [efa]
+ cmp r3, VMALLOC_START - 1 ; If kernel mode access
+ asl.hi r2, r2, 3 ; make _PAGE_xx flags as _PAGE_K_xx
+ or r2, r2, _PAGE_PRESENT ; Common flag for K/U mode
+
+ ; By now, r2 setup with all the Flags we need to check in PTE
+ and r3, r0, r2 ; Mask out NON Flag bits from PTE
+ brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
+
+ ;----------------------------------------------------------------
+ ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
+ lr r3, [ecr]
+ or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; Accessed bit always
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
+ or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well
+ st_s r0, [r1] ; Write back PTE
+
+ CONV_PTE_TO_TLB
+
+#if (CONFIG_ARC_MMU_VER == 1)
+ ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of
+ ; memcpy where 3 parties contend for 2 ways, ensuing a livelock.
+ ; But only for old MMU or one with Metal Fix
+ TLB_WRITE_HEURISTICS
+#endif
+
+ COMMIT_ENTRY_TO_MMU
+ TLBMISS_RESTORE_REGS
+ rtie
+
+;-------- Common routine to call Linux Page Fault Handler -----------
+do_slow_path_pf:
+
+ ; Restore the 4-scratch regs saved by fast path miss handler
+ TLBMISS_RESTORE_REGS
+
+ ; Slow path TLB Miss handled as a regular ARC Exception
+ ; (stack switching / save the complete reg-file).
+ ; That requires freeing up r9
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ ; ------- setup args for Linux Page fault Hanlder ---------
+ mov_s r0, sp
+ lr r2, [efa]
+ lr r3, [ecr]
+
+ ; Both st and ex imply WRITE access of some sort, hence do_page_fault( )
+ ; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or
+ ; DTLB-ld Miss
+ ; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03
+ ; Following code uses that fact that st/ex have one bit in common
+
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS
+ mov.z r1, 0
+ mov.nz r1, 1
+
+ ; We don't want exceptions to be disabled while the fault is handled.
+ ; Now that we have saved the context we return from exception hence
+ ; exceptions get re-enable
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_page_fault
+ b ret_from_exception
+
+ARC_EXIT EV_TLBMissD
+
+ARC_ENTRY EV_TLBMissB ; Bogus entry to measure sz of DTLBMiss hdlr
diff --git a/arch/arc/oprofile/Makefile b/arch/arc/oprofile/Makefile
new file mode 100644
index 000000000000..ce417a6e70b8
--- /dev/null
+++ b/arch/arc/oprofile/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arc/oprofile/common.c b/arch/arc/oprofile/common.c
new file mode 100644
index 000000000000..c80fcad4a5a7
--- /dev/null
+++ b/arch/arc/oprofile/common.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on orig code from @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ /*
+ * A failure here, forces oprofile core to switch to Timer based PC
+ * sampling, which will happen if say perf is not enabled/available
+ */
+ return oprofile_perf_init(ops);
+}
+
+void oprofile_arch_exit(void)
+{
+ oprofile_perf_exit();
+}
diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig
new file mode 100644
index 000000000000..b41e786cdbc0
--- /dev/null
+++ b/arch/arc/plat-arcfpga/Kconfig
@@ -0,0 +1,84 @@
+#
+# Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+menuconfig ARC_PLAT_FPGA_LEGACY
+ bool "\"Legacy\" ARC FPGA dev Boards"
+ select ISS_SMP_EXTN if SMP
+ help
+ Support for ARC development boards, provided by Synopsys.
+ These are based on FPGA or ISS. e.g.
+ - ARCAngel4
+ - ML509
+ - MetaWare ISS
+
+if ARC_PLAT_FPGA_LEGACY
+
+config ARC_BOARD_ANGEL4
+ bool "ARC Angel4"
+ default y
+ help
+ ARC Angel4 FPGA Ref Platform (Xilinx Virtex Based)
+
+config ARC_BOARD_ML509
+ bool "ML509"
+ help
+ ARC ML509 FPGA Ref Platform (Xilinx Virtex-5 Based)
+
+config ISS_SMP_EXTN
+ bool "ARC SMP Extensions (ISS Models only)"
+ default n
+ depends on SMP
+ select ARC_HAS_COH_RTSC
+ help
+ SMP Extensions to ARC700, in a "simulation only" Model, supported in
+ ARC ISS (Instruction Set Simulator).
+ The SMP extensions include:
+ -IDU (Interrupt Distribution Unit)
+ -XTL (To enable CPU start/stop/set-PC for another CPU)
+ It doesn't provide coherent Caches and/or Atomic Ops (LLOCK/SCOND)
+
+config ARC_SERIAL_BAUD
+ int "UART Baud rate"
+ default "115200"
+ depends on SERIAL_ARC || SERIAL_ARC_CONSOLE
+ help
+ Baud rate for the ARC UART
+
+menuconfig ARC_HAS_BVCI_LAT_UNIT
+ bool "BVCI Bus Latency Unit"
+ depends on ARC_BOARD_ML509 || ARC_BOARD_ANGEL4
+ help
+ IP to add artifical latency to BVCI Bus Based FPGA builds.
+ The default latency (even worst case) for FPGA is non-realistic
+ (~10 SDRAM, ~5 SSRAM).
+
+config BVCI_LAT_UNITS
+ hex "Latency Unit(s) Bitmap"
+ default "0x0"
+ depends on ARC_HAS_BVCI_LAT_UNIT
+ help
+ There are multiple Latency Units corresponding to the many
+ interfaces of the system bus arbiter (both CPU side as well as
+ the peripheral side).
+ To add latency to ALL memory transaction, choose Unit 0, otherwise
+ for finer grainer - interface wise latency, specify a bitmap (1 bit
+ per unit) of all units. e.g. 1,2,12 will be 0x1003
+
+ Unit 0 - System Arb and Mem Controller
+ Unit 1 - I$ and System Bus
+ Unit 2 - D$ and System Bus
+ ..
+ Unit 12 - IDE Disk controller and System Bus
+
+config BVCI_LAT_CYCLES
+ int "Latency Value in cycles"
+ range 0 63
+ default "30"
+ depends on ARC_HAS_BVCI_LAT_UNIT
+
+endif
diff --git a/arch/arc/plat-arcfpga/Makefile b/arch/arc/plat-arcfpga/Makefile
new file mode 100644
index 000000000000..a44e22ebc1b7
--- /dev/null
+++ b/arch/arc/plat-arcfpga/Makefile
@@ -0,0 +1,12 @@
+#
+# Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+KBUILD_CFLAGS += -Iarch/arc/plat-arcfpga/include
+
+obj-y := platform.o irq.o
+obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h
new file mode 100644
index 000000000000..41e335670f60
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/irq.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Feb 2009
+ * -For AA4 board, IRQ assignments to peripherals
+ */
+
+#ifndef __PLAT_IRQ_H
+#define __PLAT_IRQ_H
+
+#define UART0_IRQ 5
+#define UART1_IRQ 10
+#define UART2_IRQ 11
+
+#define VMAC_IRQ 6
+
+#define IDE_IRQ 13
+#define PCI_IRQ 14
+#define PS2_IRQ 15
+
+#ifdef CONFIG_SMP
+#define IDU_INTERRUPT_0 16
+#endif
+
+extern void __init plat_fpga_init_IRQ(void);
+
+#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/memmap.h b/arch/arc/plat-arcfpga/include/plat/memmap.h
new file mode 100644
index 000000000000..1663f3388085
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/memmap.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Feb 2009
+ * -For AA4 board, System Memory Map for Peripherals etc
+ */
+
+#ifndef __PLAT_MEMMAP_H
+#define __PLAT_MEMMAP_H
+
+#define UART0_BASE 0xC0FC1000
+#define UART1_BASE 0xC0FC1100
+
+#define VMAC_REG_BASEADDR 0xC0FC2000
+
+#define IDE_CONTROLLER_BASE 0xC0FC9000
+
+#define AHB_PCI_HOST_BRG_BASE 0xC0FD0000
+
+#define PGU_BASEADDR 0xC0FC8000
+#define VLCK_ADDR 0xC0FCF028
+
+#define BVCI_LAT_UNIT_BASE 0xC0FED000
+
+#define PS2_BASE_ADDR 0xC0FCC000
+
+#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/smp.h b/arch/arc/plat-arcfpga/include/plat/smp.h
new file mode 100644
index 000000000000..c09eb4cfc77c
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/smp.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Rajeshwar Ranga: Interrupt Distribution Unit API's
+ */
+
+#ifndef __PLAT_ARCFPGA_SMP_H
+#define __PLAT_ARCFPGA_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <asm/arcregs.h>
+
+#define ARC_AUX_IDU_REG_CMD 0x2000
+#define ARC_AUX_IDU_REG_PARAM 0x2001
+
+#define ARC_AUX_XTL_REG_CMD 0x2002
+#define ARC_AUX_XTL_REG_PARAM 0x2003
+
+#define ARC_REG_MP_BCR 0x2021
+
+#define ARC_XTL_CMD_WRITE_PC 0x04
+#define ARC_XTL_CMD_CLEAR_HALT 0x02
+
+/*
+ * Build Configuration Register which identifies the sub-components
+ */
+struct bcr_mp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int mp_arch:16, pad:5, sdu:1, idu:1, scu:1, ver:8;
+#else
+ unsigned int ver:8, scu:1, idu:1, sdu:1, pad:5, mp_arch:16;
+#endif
+};
+
+/* IDU supports 256 common interrupts */
+#define NR_IDU_IRQS 256
+
+/*
+ * The Aux Regs layout is same bit-by-bit in both BE/LE modes.
+ * However when casted as a bitfield encoded "C" struct, gcc treats it as
+ * memory, generating different code for BE/LE, requiring strcture adj (see
+ * include/asm/arcregs.h)
+ *
+ * However when manually "carving" the value for a Aux, no special handling
+ * of BE is needed because of the property discribed above
+ */
+#define IDU_SET_COMMAND(irq, cmd) \
+do { \
+ uint32_t __val; \
+ __val = (((irq & 0xFF) << 8) | (cmd & 0xFF)); \
+ write_aux_reg(ARC_AUX_IDU_REG_CMD, __val); \
+} while (0)
+
+#define IDU_SET_PARAM(par) write_aux_reg(ARC_AUX_IDU_REG_PARAM, par)
+#define IDU_GET_PARAM() read_aux_reg(ARC_AUX_IDU_REG_PARAM)
+
+/* IDU Commands */
+#define IDU_DISABLE 0x00
+#define IDU_ENABLE 0x01
+#define IDU_IRQ_CLEAR 0x02
+#define IDU_IRQ_ASSERT 0x03
+#define IDU_IRQ_WMODE 0x04
+#define IDU_IRQ_STATUS 0x05
+#define IDU_IRQ_ACK 0x06
+#define IDU_IRQ_PEND 0x07
+#define IDU_IRQ_RMODE 0x08
+#define IDU_IRQ_WBITMASK 0x09
+#define IDU_IRQ_RBITMASK 0x0A
+
+#define idu_enable() IDU_SET_COMMAND(0, IDU_ENABLE)
+#define idu_disable() IDU_SET_COMMAND(0, IDU_DISABLE)
+
+#define idu_irq_assert(irq) IDU_SET_COMMAND((irq), IDU_IRQ_ASSERT)
+#define idu_irq_clear(irq) IDU_SET_COMMAND((irq), IDU_IRQ_CLEAR)
+
+/* IDU Interrupt Mode - Destination Encoding */
+#define IDU_IRQ_MOD_DISABLE 0x00
+#define IDU_IRQ_MOD_ROUND_RECP 0x01
+#define IDU_IRQ_MOD_TCPU_FIRSTRECP 0x02
+#define IDU_IRQ_MOD_TCPU_ALLRECP 0x03
+
+/* IDU Interrupt Mode - Triggering Mode */
+#define IDU_IRQ_MODE_LEVEL_TRIG 0x00
+#define IDU_IRQ_MODE_PULSE_TRIG 0x01
+
+#define IDU_IRQ_MODE_PARAM(dest_mode, trig_mode) \
+ (((trig_mode & 0x01) << 15) | (dest_mode & 0xFF))
+
+struct idu_irq_config {
+ uint8_t irq;
+ uint8_t dest_mode;
+ uint8_t trig_mode;
+};
+
+struct idu_irq_status {
+ uint8_t irq;
+ bool enabled;
+ bool status;
+ bool ack;
+ bool pend;
+ uint8_t next_rr;
+};
+
+extern void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask);
+extern void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode);
+
+extern void iss_model_init_smp(unsigned int cpu);
+extern void iss_model_init_early_smp(void);
+
+#endif /* CONFIG_SMP */
+
+#endif
diff --git a/arch/arc/plat-arcfpga/irq.c b/arch/arc/plat-arcfpga/irq.c
new file mode 100644
index 000000000000..d2215fd889c2
--- /dev/null
+++ b/arch/arc/plat-arcfpga/irq.c
@@ -0,0 +1,25 @@
+/*
+ * ARC FPGA Platform IRQ hookups
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <plat/irq.h>
+
+void __init plat_fpga_init_IRQ(void)
+{
+ /*
+ * SMP Hack because UART IRQ hardwired to cpu0 (boot-cpu) but if the
+ * request_irq() comes from any other CPU, the low level IRQ unamsking
+ * essential for getting Interrupts won't be enabled on cpu0, locking
+ * up the UART state machine.
+ */
+#ifdef CONFIG_SMP
+ arch_unmask_irq(UART0_IRQ);
+#endif
+}
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c
new file mode 100644
index 000000000000..4e20a1a5104d
--- /dev/null
+++ b/arch/arc/plat-arcfpga/platform.c
@@ -0,0 +1,226 @@
+/*
+ * ARC FPGA Platform support code
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/of_platform.h>
+#include <asm/setup.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+#include <plat/memmap.h>
+#include <plat/smp.h>
+#include <plat/irq.h>
+
+/*-----------------------BVCI Latency Unit -----------------------------*/
+
+#ifdef CONFIG_ARC_HAS_BVCI_LAT_UNIT
+
+int lat_cycles = CONFIG_BVCI_LAT_CYCLES;
+
+/* BVCI Bus Profiler: Latency Unit */
+static void __init setup_bvci_lat_unit(void)
+{
+#define MAX_BVCI_UNITS 12
+
+ unsigned int i;
+ unsigned int *base = (unsigned int *)BVCI_LAT_UNIT_BASE;
+ const unsigned long units_req = CONFIG_BVCI_LAT_UNITS;
+ const unsigned int REG_UNIT = 21;
+ const unsigned int REG_VAL = 22;
+
+ /*
+ * There are multiple Latency Units corresponding to the many
+ * interfaces of the system bus arbiter (both CPU side as well as
+ * the peripheral side).
+ *
+ * Unit 0 - System Arb and Mem Controller - adds latency to all
+ * memory trasactions
+ * Unit 1 - I$ and System Bus
+ * Unit 2 - D$ and System Bus
+ * ..
+ * Unit 12 - IDE Disk controller and System Bus
+ *
+ * The programmers model requires writing to lat_unit reg first
+ * and then the latency value (cycles) to lat_value reg
+ */
+
+ if (CONFIG_BVCI_LAT_UNITS == 0) {
+ writel(0, base + REG_UNIT);
+ writel(lat_cycles, base + REG_VAL);
+ pr_info("BVCI Latency for all Memory Transactions %d cycles\n",
+ lat_cycles);
+ } else {
+ for_each_set_bit(i, &units_req, MAX_BVCI_UNITS) {
+ writel(i + 1, base + REG_UNIT); /* loop is 0 based */
+ writel(lat_cycles, base + REG_VAL);
+ pr_info("BVCI Latency for Unit[%d] = %d cycles\n",
+ (i + 1), lat_cycles);
+ }
+ }
+}
+#else
+static void __init setup_bvci_lat_unit(void)
+{
+}
+#endif
+
+/*----------------------- Platform Devices -----------------------------*/
+
+static unsigned long arc_uart_info[] = {
+ 0, /* uart->is_emulated (runtime @running_on_hw) */
+ 0, /* uart->port.uartclk */
+ 0, /* uart->baud */
+ 0
+};
+
+#if defined(CONFIG_SERIAL_ARC_CONSOLE)
+/*
+ * static platform data - but only for early serial
+ * TBD: derive this from a special DT node
+ */
+static struct resource arc_uart0_res[] = {
+ {
+ .start = UART0_BASE,
+ .end = UART0_BASE + 0xFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = UART0_IRQ,
+ .end = UART0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device arc_uart0_dev = {
+ .name = "arc-uart",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(arc_uart0_res),
+ .resource = arc_uart0_res,
+ .dev = {
+ .platform_data = &arc_uart_info,
+ },
+};
+
+static struct platform_device *fpga_early_devs[] __initdata = {
+ &arc_uart0_dev,
+};
+#endif
+
+static void arc_fpga_serial_init(void)
+{
+ /* To let driver workaround ISS bug: baudh Reg can't be set to 0 */
+ arc_uart_info[0] = !running_on_hw;
+
+ arc_uart_info[1] = arc_get_core_freq();
+
+ arc_uart_info[2] = CONFIG_ARC_SERIAL_BAUD;
+
+#if defined(CONFIG_SERIAL_ARC_CONSOLE)
+ early_platform_add_devices(fpga_early_devs,
+ ARRAY_SIZE(fpga_early_devs));
+
+ /*
+ * ARC console driver registers itself as an early platform driver
+ * of class "earlyprintk".
+ * Install it here, followed by probe of devices.
+ * The installation here doesn't require earlyprintk in command line
+ * To do so however, replace the lines below with
+ * parse_early_param();
+ * early_platform_driver_probe("earlyprintk", 1, 1);
+ * ^^
+ */
+ early_platform_driver_register_all("earlyprintk");
+ early_platform_driver_probe("earlyprintk", 1, 0);
+
+ /*
+ * This is to make sure that arc uart would be preferred console
+ * despite one/more of following:
+ * -command line lacked "console=ttyARC0" or
+ * -CONFIG_VT_CONSOLE was enabled (for no reason whatsoever)
+ * Note that this needs to be done after above early console is reg,
+ * otherwise the early console never gets a chance to run.
+ */
+ add_preferred_console("ttyARC", 0, "115200");
+#endif
+}
+
+static void __init plat_fpga_early_init(void)
+{
+ pr_info("[plat-arcfpga]: registering early dev resources\n");
+
+ setup_bvci_lat_unit();
+
+ arc_fpga_serial_init();
+
+#ifdef CONFIG_SMP
+ iss_model_init_early_smp();
+#endif
+}
+
+static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = {
+#if defined(CONFIG_SERIAL_ARC) || defined(CONFIG_SERIAL_ARC_MODULE)
+ OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info),
+#endif
+ {}
+};
+
+static void __init plat_fpga_populate_dev(void)
+{
+ pr_info("[plat-arcfpga]: registering device resources\n");
+
+ /*
+ * Traverses flattened DeviceTree - registering platform devices
+ * complete with their resources
+ */
+ of_platform_populate(NULL, of_default_bus_match_table,
+ plat_auxdata_lookup, NULL);
+}
+
+/*----------------------- Machine Descriptions ------------------------------
+ *
+ * Machine description is simply a set of platform/board specific callbacks
+ * This is not directly related to DeviceTree based dynamic device creation,
+ * however as part of early device tree scan, we also select the right
+ * callback set, by matching the DT compatible name.
+ */
+
+static const char *aa4_compat[] __initdata = {
+ "snps,arc-angel4",
+ NULL,
+};
+
+MACHINE_START(ANGEL4, "angel4")
+ .dt_compat = aa4_compat,
+ .init_early = plat_fpga_early_init,
+ .init_machine = plat_fpga_populate_dev,
+ .init_irq = plat_fpga_init_IRQ,
+#ifdef CONFIG_SMP
+ .init_smp = iss_model_init_smp,
+#endif
+MACHINE_END
+
+static const char *ml509_compat[] __initdata = {
+ "snps,arc-ml509",
+ NULL,
+};
+
+MACHINE_START(ML509, "ml509")
+ .dt_compat = ml509_compat,
+ .init_early = plat_fpga_early_init,
+ .init_machine = plat_fpga_populate_dev,
+ .init_irq = plat_fpga_init_IRQ,
+#ifdef CONFIG_SMP
+ .init_smp = iss_model_init_smp,
+#endif
+MACHINE_END
diff --git a/arch/arc/plat-arcfpga/smp.c b/arch/arc/plat-arcfpga/smp.c
new file mode 100644
index 000000000000..91b55349a5f8
--- /dev/null
+++ b/arch/arc/plat-arcfpga/smp.c
@@ -0,0 +1,171 @@
+/*
+ * ARC700 Simulation-only Extensions for SMP
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineet Gupta - 2012 : split off arch common and plat specific SMP
+ * Rajeshwar Ranga - 2007 : Interrupt Distribution Unit API's
+ */
+
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <plat/irq.h>
+#include <plat/smp.h>
+
+static char smp_cpuinfo_buf[128];
+
+/*
+ *-------------------------------------------------------------------
+ * Platform specific callbacks expected by arch SMP code
+ *-------------------------------------------------------------------
+ */
+
+/*
+ * Master kick starting another CPU
+ */
+static void iss_model_smp_wakeup_cpu(int cpu, unsigned long pc)
+{
+ /* setup the start PC */
+ write_aux_reg(ARC_AUX_XTL_REG_PARAM, pc);
+
+ /* Trigger WRITE_PC cmd for this cpu */
+ write_aux_reg(ARC_AUX_XTL_REG_CMD,
+ (ARC_XTL_CMD_WRITE_PC | (cpu << 8)));
+
+ /* Take the cpu out of Halt */
+ write_aux_reg(ARC_AUX_XTL_REG_CMD,
+ (ARC_XTL_CMD_CLEAR_HALT | (cpu << 8)));
+
+}
+
+/*
+ * Any SMP specific init any CPU does when it comes up.
+ * Here we setup the CPU to enable Inter-Processor-Interrupts
+ * Called for each CPU
+ * -Master : init_IRQ()
+ * -Other(s) : start_kernel_secondary()
+ */
+void iss_model_init_smp(unsigned int cpu)
+{
+ /* Check if CPU is configured for more than 16 interrupts */
+ if (NR_IRQS <= 16 || get_hw_config_num_irq() <= 16)
+ panic("[arcfpga] IRQ system can't support IDU IPI\n");
+
+ idu_disable();
+
+ /****************************************************************
+ * IDU provides a set of Common IRQs, each of which can be dynamically
+ * attached to (1|many|all) CPUs.
+ * The Common IRQs [0-15] are mapped as CPU pvt [16-31]
+ *
+ * Here we use a simple 1:1 mapping:
+ * A CPU 'x' is wired to Common IRQ 'x'.
+ * So an IDU ASSERT on IRQ 'x' will trigger Interupt on CPU 'x', which
+ * makes up for our simple IPI plumbing.
+ *
+ * TBD: Have a dedicated multicast IRQ for sending IPIs to all CPUs
+ * w/o having to do one-at-a-time
+ ******************************************************************/
+
+ /*
+ * Claim an IRQ which would trigger IPI on this CPU.
+ * In IDU parlance it involves setting up a cpu bitmask for the IRQ
+ * The bitmap here contains only 1 CPU (self).
+ */
+ idu_irq_set_tgtcpu(cpu, 0x1 << cpu);
+
+ /* Set the IRQ destination to use the bitmask above */
+ idu_irq_set_mode(cpu, 7, /* XXX: IDU_IRQ_MOD_TCPU_ALLRECP: ISS bug */
+ IDU_IRQ_MODE_PULSE_TRIG);
+
+ idu_enable();
+
+ /* Attach the arch-common IPI ISR to our IDU IRQ */
+ smp_ipi_irq_setup(cpu, IDU_INTERRUPT_0 + cpu);
+}
+
+static void iss_model_ipi_send(void *arg)
+{
+ struct cpumask *callmap = arg;
+ unsigned int cpu;
+
+ for_each_cpu(cpu, callmap)
+ idu_irq_assert(cpu);
+}
+
+static void iss_model_ipi_clear(int cpu, int irq)
+{
+ idu_irq_clear(IDU_INTERRUPT_0 + cpu);
+}
+
+void iss_model_init_early_smp(void)
+{
+#define IS_AVAIL1(var, str) ((var) ? str : "")
+
+ struct bcr_mp mp;
+
+ READ_BCR(ARC_REG_MP_BCR, mp);
+
+ sprintf(smp_cpuinfo_buf, "Extn [ISS-SMP]: v%d, arch(%d) %s %s %s\n",
+ mp.ver, mp.mp_arch, IS_AVAIL1(mp.scu, "SCU"),
+ IS_AVAIL1(mp.idu, "IDU"), IS_AVAIL1(mp.sdu, "SDU"));
+
+ plat_smp_ops.info = smp_cpuinfo_buf;
+
+ plat_smp_ops.cpu_kick = iss_model_smp_wakeup_cpu;
+ plat_smp_ops.ipi_send = iss_model_ipi_send;
+ plat_smp_ops.ipi_clear = iss_model_ipi_clear;
+}
+
+/*
+ *-------------------------------------------------------------------
+ * Low level Platform IPI Providers
+ *-------------------------------------------------------------------
+ */
+
+/* Set the Mode for the Common IRQ */
+void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode)
+{
+ uint32_t par = IDU_IRQ_MODE_PARAM(dest_mode, trig_mode);
+
+ IDU_SET_PARAM(par);
+ IDU_SET_COMMAND(irq, IDU_IRQ_WMODE);
+}
+
+/* Set the target cpu Bitmask for Common IRQ */
+void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask)
+{
+ IDU_SET_PARAM(mask);
+ IDU_SET_COMMAND(irq, IDU_IRQ_WBITMASK);
+}
+
+/* Get the Interrupt Acknowledged status for IRQ (as CPU Bitmask) */
+bool idu_irq_get_ack(uint8_t irq)
+{
+ uint32_t val;
+
+ IDU_SET_COMMAND(irq, IDU_IRQ_ACK);
+ val = IDU_GET_PARAM();
+
+ return val & (1 << irq);
+}
+
+/*
+ * Get the Interrupt Pending status for IRQ (as CPU Bitmask)
+ * -Pending means CPU has not yet noticed the IRQ (e.g. disabled)
+ * -After Interrupt has been taken, the IPI expcitily needs to be
+ * cleared, to be acknowledged.
+ */
+bool idu_irq_get_pend(uint8_t irq)
+{
+ uint32_t val;
+
+ IDU_SET_COMMAND(irq, IDU_IRQ_PEND);
+ val = IDU_GET_PARAM();
+
+ return val & (1 << irq);
+}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 4d5ea7648574..a2a47d9d6a22 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,6 +18,7 @@ config PARISC
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER
+ select SYSCTL_ARCH_UNALIGN_ALLOW
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_VIRT_TO_BUS
select MODULES_USE_ELF_RELA
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index c084767c88bc..59811df58c5b 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -38,12 +38,15 @@ extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
csum_partial_copy((src), (dst), (len), (sum))
#endif
+#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+#endif
+#ifndef csum_fold
/*
* Fold a partial checksum
*/
@@ -54,6 +57,7 @@ static inline __sum16 csum_fold(__wsum csum)
sum = (sum & 0xffff) + (sum >> 16);
return (__force __sum16)~sum;
}
+#endif
#ifndef csum_tcpudp_nofold
/*
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 9788568f7978..c184aa8ec8cd 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -7,7 +7,6 @@
* address space, e.g. all NOMMU machines.
*/
#include <linux/sched.h>
-#include <linux/mm.h>
#include <linux/string.h>
#include <asm/segment.h>
@@ -32,7 +31,9 @@ static inline void set_fs(mm_segment_t fs)
}
#endif
+#ifndef segment_eq
#define segment_eq(a, b) ((a).seg == (b).seg)
+#endif
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -168,12 +169,18 @@ static inline __must_check long __copy_to_user(void __user *to,
-EFAULT; \
})
+#ifndef __put_user_fn
+
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
size = __copy_to_user(ptr, x, size);
return size ? -EFAULT : size;
}
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
+
+#endif
+
extern int __put_user_bad(void) __attribute__((noreturn));
#define __get_user(x, ptr) \
@@ -224,12 +231,17 @@ extern int __put_user_bad(void) __attribute__((noreturn));
-EFAULT; \
})
+#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
size = __copy_from_user(x, ptr, size);
return size ? -EFAULT : size;
}
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
+
+#endif
+
extern int __get_user_bad(void) __attribute__((noreturn));
#ifndef __copy_from_user_inatomic
diff --git a/init/Kconfig b/init/Kconfig
index 0a5e80fb9ba2..22616cd434bc 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1230,6 +1230,14 @@ config SYSCTL_ARCH_UNALIGN_NO_WARN
Allows arch to define/use @no_unaligned_warning to possibly warn
about unaligned access emulation going on under the hood.
+config SYSCTL_ARCH_UNALIGN_ALLOW
+ bool
+ help
+ Enable support for /proc/sys/kernel/unaligned-trap
+ Allows arches to define/use @unaligned_enabled to runtime toggle
+ the unaligned access emulation.
+ see arch/parisc/kernel/unaligned.c for reference
+
config KALLSYMS
bool "Load all symbols for debugging/ksymoops" if EXPERT
default y
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d1b4ee67d2df..afc1dc60f3f8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -157,6 +157,9 @@ extern int sysctl_tsb_ratio;
#ifdef __hppa__
extern int pwrsw_enabled;
+#endif
+
+#ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
extern int unaligned_enabled;
#endif
@@ -555,6 +558,8 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#endif
+#ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
{
.procname = "unaligned-trap",
.data = &unaligned_enabled,
diff --git a/lib/checksum.c b/lib/checksum.c
index 12dceb27ff20..129775eb6de6 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -102,6 +102,7 @@ out:
}
#endif
+#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
@@ -111,6 +112,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
return (__force __sum16)~do_csum(iph, ihl*4);
}
EXPORT_SYMBOL(ip_fast_csum);
+#endif
/*
* computes the checksum of a memory block at buff, length len,
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index c2206c87fc9f..d5818c98d051 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -94,6 +94,12 @@
#define CPUINFO_PROC "cpu model"
#endif
+#ifdef __arc__
+#define rmb() asm volatile("" ::: "memory")
+#define cpu_relax() rmb()
+#define CPUINFO_PROC "Processor"
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>