summaryrefslogtreecommitdiff
path: root/arch/loongarch/include/asm/tlb.h
blob: 4f629ae9d5a9d0609d63e9140bdef3acb75862d6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
 */
#ifndef __ASM_TLB_H
#define __ASM_TLB_H

#include <linux/mm_types.h>
#include <asm/cpu-features.h>
#include <asm/loongarch.h>

/*
 * TLB Invalidate Flush
 */
static inline void tlbclr(void)
{
	__asm__ __volatile__("tlbclr");
}

static inline void tlbflush(void)
{
	__asm__ __volatile__("tlbflush");
}

/*
 * TLB R/W operations.
 */
static inline void tlb_probe(void)
{
	__asm__ __volatile__("tlbsrch");
}

static inline void tlb_read(void)
{
	__asm__ __volatile__("tlbrd");
}

static inline void tlb_write_indexed(void)
{
	__asm__ __volatile__("tlbwr");
}

static inline void tlb_write_random(void)
{
	__asm__ __volatile__("tlbfill");
}

enum invtlb_ops {
	/* Invalid all tlb */
	INVTLB_ALL = 0x0,
	/* Invalid current tlb */
	INVTLB_CURRENT_ALL = 0x1,
	/* Invalid all global=1 lines in current tlb */
	INVTLB_CURRENT_GTRUE = 0x2,
	/* Invalid all global=0 lines in current tlb */
	INVTLB_CURRENT_GFALSE = 0x3,
	/* Invalid global=0 and matched asid lines in current tlb */
	INVTLB_GFALSE_AND_ASID = 0x4,
	/* Invalid addr with global=0 and matched asid in current tlb */
	INVTLB_ADDR_GFALSE_AND_ASID = 0x5,
	/* Invalid addr with global=1 or matched asid in current tlb */
	INVTLB_ADDR_GTRUE_OR_ASID = 0x6,
	/* Invalid matched gid in guest tlb */
	INVGTLB_GID = 0x9,
	/* Invalid global=1, matched gid in guest tlb */
	INVGTLB_GID_GTRUE = 0xa,
	/* Invalid global=0, matched gid in guest tlb */
	INVGTLB_GID_GFALSE = 0xb,
	/* Invalid global=0, matched gid and asid in guest tlb */
	INVGTLB_GID_GFALSE_ASID = 0xc,
	/* Invalid global=0 , matched gid, asid and addr in guest tlb */
	INVGTLB_GID_GFALSE_ASID_ADDR = 0xd,
	/* Invalid global=1 , matched gid, asid and addr in guest tlb */
	INVGTLB_GID_GTRUE_ASID_ADDR = 0xe,
	/* Invalid all gid gva-->gpa guest tlb */
	INVGTLB_ALLGID_GVA_TO_GPA = 0x10,
	/* Invalid all gid gpa-->hpa tlb */
	INVTLB_ALLGID_GPA_TO_HPA = 0x11,
	/* Invalid all gid tlb, including  gva-->gpa and gpa-->hpa */
	INVTLB_ALLGID = 0x12,
	/* Invalid matched gid gva-->gpa guest tlb */
	INVGTLB_GID_GVA_TO_GPA = 0x13,
	/* Invalid matched gid gpa-->hpa tlb */
	INVTLB_GID_GPA_TO_HPA = 0x14,
	/* Invalid matched gid tlb,including gva-->gpa and gpa-->hpa */
	INVTLB_GID_ALL = 0x15,
	/* Invalid matched gid and addr gpa-->hpa tlb */
	INVTLB_GID_ADDR = 0x16,
};

/*
 * invtlb op info addr
 * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) |
 * (addr << 10) | (info << 5) | op
 */
static inline void invtlb(u32 op, u32 info, u64 addr)
{
	__asm__ __volatile__(
		"parse_r addr,%0\n\t"
		"parse_r info,%1\n\t"
		".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t"
		:
		: "r"(addr), "r"(info), "i"(op)
		:
		);
}

static inline void invtlb_addr(u32 op, u32 info, u64 addr)
{
	__asm__ __volatile__(
		"parse_r addr,%0\n\t"
		".word ((0x6498000) | (addr << 10) | (0 << 5) | %1)\n\t"
		:
		: "r"(addr), "i"(op)
		:
		);
}

static inline void invtlb_info(u32 op, u32 info, u64 addr)
{
	__asm__ __volatile__(
		"parse_r info,%0\n\t"
		".word ((0x6498000) | (0 << 10) | (info << 5) | %1)\n\t"
		:
		: "r"(info), "i"(op)
		:
		);
}

static inline void invtlb_all(u32 op, u32 info, u64 addr)
{
	__asm__ __volatile__(
		".word ((0x6498000) | (0 << 10) | (0 << 5) | %0)\n\t"
		:
		: "i"(op)
		:
		);
}

/*
 * LoongArch doesn't need any special per-pte or per-vma handling, except
 * we need to flush cache for area to be unmapped.
 */
#define tlb_start_vma(tlb, vma)					\
	do {							\
		if (!(tlb)->fullmm)				\
			flush_cache_range(vma, vma->vm_start, vma->vm_end); \
	}  while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)

static void tlb_flush(struct mmu_gather *tlb);

#define tlb_flush tlb_flush
#include <asm-generic/tlb.h>

static inline void tlb_flush(struct mmu_gather *tlb)
{
	struct vm_area_struct vma;

	vma.vm_mm = tlb->mm;
	vma.vm_flags = 0;
	if (tlb->fullmm) {
		flush_tlb_mm(tlb->mm);
		return;
	}

	flush_tlb_range(&vma, tlb->start, tlb->end);
}

extern void handle_tlb_load(void);
extern void handle_tlb_store(void);
extern void handle_tlb_modify(void);
extern void handle_tlb_refill(void);
extern void handle_tlb_protect(void);

extern void dump_tlb_all(void);
extern void dump_tlb_regs(void);

#endif /* __ASM_TLB_H */