mips: delete non-required instances of include <linux/init.h>
[linux-drm-fsl-dcu.git] / arch / mips / mm / tlb-r8k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8  * Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/sched.h>
12 #include <linux/smp.h>
13 #include <linux/mm.h>
14
15 #include <asm/cpu.h>
16 #include <asm/bootinfo.h>
17 #include <asm/mmu_context.h>
18 #include <asm/pgtable.h>
19
20 extern void build_tlb_refill_handler(void);
21
22 #define TFP_TLB_SIZE            384
23 #define TFP_TLB_SET_SHIFT       7
24
25 /* CP0 hazard avoidance. */
26 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
27                                      "nop; nop; nop; nop; nop; nop;\n\t" \
28                                      ".set reorder\n\t")
29
30 void local_flush_tlb_all(void)
31 {
32         unsigned long flags;
33         unsigned long old_ctx;
34         int entry;
35
36         local_irq_save(flags);
37         /* Save old context and create impossible VPN2 value */
38         old_ctx = read_c0_entryhi();
39         write_c0_entrylo(0);
40
41         for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
42                 write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
43                 write_c0_vaddr(entry << PAGE_SHIFT);
44                 write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
45                 mtc0_tlbw_hazard();
46                 tlb_write();
47         }
48         tlbw_use_hazard();
49         write_c0_entryhi(old_ctx);
50         local_irq_restore(flags);
51 }
52
53 void local_flush_tlb_mm(struct mm_struct *mm)
54 {
55         int cpu = smp_processor_id();
56
57         if (cpu_context(cpu, mm) != 0)
58                 drop_mmu_context(mm, cpu);
59 }
60
61 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
62         unsigned long end)
63 {
64         struct mm_struct *mm = vma->vm_mm;
65         int cpu = smp_processor_id();
66         unsigned long flags;
67         int oldpid, newpid, size;
68
69         if (!cpu_context(cpu, mm))
70                 return;
71
72         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
73         size = (size + 1) >> 1;
74
75         local_irq_save(flags);
76
77         if (size > TFP_TLB_SIZE / 2) {
78                 drop_mmu_context(mm, cpu);
79                 goto out_restore;
80         }
81
82         oldpid = read_c0_entryhi();
83         newpid = cpu_asid(cpu, mm);
84
85         write_c0_entrylo(0);
86
87         start &= PAGE_MASK;
88         end += (PAGE_SIZE - 1);
89         end &= PAGE_MASK;
90         while (start < end) {
91                 signed long idx;
92
93                 write_c0_vaddr(start);
94                 write_c0_entryhi(start);
95                 start += PAGE_SIZE;
96                 tlb_probe();
97                 idx = read_c0_tlbset();
98                 if (idx < 0)
99                         continue;
100
101                 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
102                 tlb_write();
103         }
104         write_c0_entryhi(oldpid);
105
106 out_restore:
107         local_irq_restore(flags);
108 }
109
110 /* Usable for KV1 addresses only! */
111 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
112 {
113         unsigned long size, flags;
114
115         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
116         size = (size + 1) >> 1;
117
118         if (size > TFP_TLB_SIZE / 2) {
119                 local_flush_tlb_all();
120                 return;
121         }
122
123         local_irq_save(flags);
124
125         write_c0_entrylo(0);
126
127         start &= PAGE_MASK;
128         end += (PAGE_SIZE - 1);
129         end &= PAGE_MASK;
130         while (start < end) {
131                 signed long idx;
132
133                 write_c0_vaddr(start);
134                 write_c0_entryhi(start);
135                 start += PAGE_SIZE;
136                 tlb_probe();
137                 idx = read_c0_tlbset();
138                 if (idx < 0)
139                         continue;
140
141                 write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
142                 tlb_write();
143         }
144
145         local_irq_restore(flags);
146 }
147
148 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
149 {
150         int cpu = smp_processor_id();
151         unsigned long flags;
152         int oldpid, newpid;
153         signed long idx;
154
155         if (!cpu_context(cpu, vma->vm_mm))
156                 return;
157
158         newpid = cpu_asid(cpu, vma->vm_mm);
159         page &= PAGE_MASK;
160         local_irq_save(flags);
161         oldpid = read_c0_entryhi();
162         write_c0_vaddr(page);
163         write_c0_entryhi(newpid);
164         tlb_probe();
165         idx = read_c0_tlbset();
166         if (idx < 0)
167                 goto finish;
168
169         write_c0_entrylo(0);
170         write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
171         tlb_write();
172
173 finish:
174         write_c0_entryhi(oldpid);
175         local_irq_restore(flags);
176 }
177
178 /*
179  * We will need multiple versions of update_mmu_cache(), one that just
180  * updates the TLB with the new pte(s), and another which also checks
181  * for the R4k "end of page" hardware bug and does the needy.
182  */
183 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
184 {
185         unsigned long flags;
186         pgd_t *pgdp;
187         pmd_t *pmdp;
188         pte_t *ptep;
189         int pid;
190
191         /*
192          * Handle debugger faulting in for debugee.
193          */
194         if (current->active_mm != vma->vm_mm)
195                 return;
196
197         pid = read_c0_entryhi() & ASID_MASK;
198
199         local_irq_save(flags);
200         address &= PAGE_MASK;
201         write_c0_vaddr(address);
202         write_c0_entryhi(pid);
203         pgdp = pgd_offset(vma->vm_mm, address);
204         pmdp = pmd_offset(pgdp, address);
205         ptep = pte_offset_map(pmdp, address);
206         tlb_probe();
207
208         write_c0_entrylo(pte_val(*ptep++) >> 6);
209         tlb_write();
210
211         write_c0_entryhi(pid);
212         local_irq_restore(flags);
213 }
214
215 static void probe_tlb(unsigned long config)
216 {
217         struct cpuinfo_mips *c = &current_cpu_data;
218
219         c->tlbsize = 3 * 128;           /* 3 sets each 128 entries */
220 }
221
222 void tlb_init(void)
223 {
224         unsigned int config = read_c0_config();
225         unsigned long status;
226
227         probe_tlb(config);
228
229         status = read_c0_status();
230         status &= ~(ST0_UPS | ST0_KPS);
231 #ifdef CONFIG_PAGE_SIZE_4KB
232         status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36);
233 #elif defined(CONFIG_PAGE_SIZE_8KB)
234         status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36);
235 #elif defined(CONFIG_PAGE_SIZE_16KB)
236         status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36);
237 #elif defined(CONFIG_PAGE_SIZE_64KB)
238         status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36);
239 #endif
240         write_c0_status(status);
241
242         write_c0_wired(0);
243
244         local_flush_tlb_all();
245
246         build_tlb_refill_handler();
247 }