2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003 Ralf Baechle
12 #include <asm/pgtable-32.h>
15 #include <asm/pgtable-64.h>
19 #include <asm/pgtable-bits.h>
22 struct vm_area_struct;
24 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
25 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \
26 _page_cachable_default)
27 #define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
28 (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
29 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \
30 _page_cachable_default)
31 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
32 _PAGE_GLOBAL | _page_cachable_default)
33 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
34 _page_cachable_default)
35 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
36 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
39 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
40 * execute, and consider it to be the same as read. Also, write
41 * permissions imply read permissions. This is the closest we can get
42 * by reasonable means..
46 * Dummy values to fill the table in mmap.c
47 * The real values will be generated at runtime
49 #define __P000 __pgprot(0)
50 #define __P001 __pgprot(0)
51 #define __P010 __pgprot(0)
52 #define __P011 __pgprot(0)
53 #define __P100 __pgprot(0)
54 #define __P101 __pgprot(0)
55 #define __P110 __pgprot(0)
56 #define __P111 __pgprot(0)
58 #define __S000 __pgprot(0)
59 #define __S001 __pgprot(0)
60 #define __S010 __pgprot(0)
61 #define __S011 __pgprot(0)
62 #define __S100 __pgprot(0)
63 #define __S101 __pgprot(0)
64 #define __S110 __pgprot(0)
65 #define __S111 __pgprot(0)
67 extern unsigned long _page_cachable_default;
70 * ZERO_PAGE is a global shared page that is always zero; used
71 * for zero-mapped memory areas etc..
74 extern unsigned long empty_zero_page;
75 extern unsigned long zero_page_mask;
77 #define ZERO_PAGE(vaddr) \
78 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
79 #define __HAVE_COLOR_ZERO_PAGE
81 extern void paging_init(void);
84 * Conversion functions: convert a page and protection to a page entry,
85 * and a page entry and page directory to the page they refer to.
87 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
88 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
89 #define pmd_page_vaddr(pmd) pmd_val(pmd)
91 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
93 #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
94 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
96 static inline void set_pte(pte_t *ptep, pte_t pte)
98 ptep->pte_high = pte.pte_high;
100 ptep->pte_low = pte.pte_low;
101 //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low);
103 if (pte.pte_low & _PAGE_GLOBAL) {
104 pte_t *buddy = ptep_buddy(ptep);
106 * Make sure the buddy is global too (if it's !none,
107 * it better already be global)
109 if (pte_none(*buddy)) {
110 buddy->pte_low |= _PAGE_GLOBAL;
111 buddy->pte_high |= _PAGE_GLOBAL;
115 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
117 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
119 pte_t null = __pte(0);
121 /* Preserve global status for the pair */
122 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
123 null.pte_low = null.pte_high = _PAGE_GLOBAL;
125 set_pte_at(mm, addr, ptep, null);
129 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
130 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
133 * Certain architectures need to do special things when pte's
134 * within a page table are directly modified. Thus, the following
135 * hook is made available.
137 static inline void set_pte(pte_t *ptep, pte_t pteval)
140 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
141 if (pte_val(pteval) & _PAGE_GLOBAL) {
142 pte_t *buddy = ptep_buddy(ptep);
144 * Make sure the buddy is global too (if it's !none,
145 * it better already be global)
147 if (pte_none(*buddy))
148 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
152 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
154 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
156 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
157 /* Preserve global status for the pair */
158 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
159 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
162 set_pte_at(mm, addr, ptep, __pte(0));
167 * (pmds are folded into puds so this doesn't get actually called,
168 * but the define is needed for a generic inline function.)
170 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
172 #ifndef __PAGETABLE_PMD_FOLDED
174 * (puds are folded into pgds so this doesn't get actually called,
175 * but the define is needed for a generic inline function.)
177 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
180 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
181 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
182 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
185 * We used to declare this array with size but gcc 3.3 and older are not able
186 * to find that this expression is a constant, so the size is dropped.
188 extern pgd_t swapper_pg_dir[];
191 * The following only work if pte_present() is true.
192 * Undefined behaviour if not..
194 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
195 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
196 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
197 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
198 static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
200 static inline pte_t pte_wrprotect(pte_t pte)
202 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
203 pte.pte_high &= ~_PAGE_SILENT_WRITE;
207 static inline pte_t pte_mkclean(pte_t pte)
209 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
210 pte.pte_high &= ~_PAGE_SILENT_WRITE;
214 static inline pte_t pte_mkold(pte_t pte)
216 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
217 pte.pte_high &= ~_PAGE_SILENT_READ;
221 static inline pte_t pte_mkwrite(pte_t pte)
223 pte.pte_low |= _PAGE_WRITE;
224 if (pte.pte_low & _PAGE_MODIFIED) {
225 pte.pte_low |= _PAGE_SILENT_WRITE;
226 pte.pte_high |= _PAGE_SILENT_WRITE;
231 static inline pte_t pte_mkdirty(pte_t pte)
233 pte.pte_low |= _PAGE_MODIFIED;
234 if (pte.pte_low & _PAGE_WRITE) {
235 pte.pte_low |= _PAGE_SILENT_WRITE;
236 pte.pte_high |= _PAGE_SILENT_WRITE;
241 static inline pte_t pte_mkyoung(pte_t pte)
243 pte.pte_low |= _PAGE_ACCESSED;
244 if (pte.pte_low & _PAGE_READ) {
245 pte.pte_low |= _PAGE_SILENT_READ;
246 pte.pte_high |= _PAGE_SILENT_READ;
251 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
252 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
253 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
254 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
256 static inline pte_t pte_wrprotect(pte_t pte)
258 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
262 static inline pte_t pte_mkclean(pte_t pte)
264 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
268 static inline pte_t pte_mkold(pte_t pte)
270 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
274 static inline pte_t pte_mkwrite(pte_t pte)
276 pte_val(pte) |= _PAGE_WRITE;
277 if (pte_val(pte) & _PAGE_MODIFIED)
278 pte_val(pte) |= _PAGE_SILENT_WRITE;
282 static inline pte_t pte_mkdirty(pte_t pte)
284 pte_val(pte) |= _PAGE_MODIFIED;
285 if (pte_val(pte) & _PAGE_WRITE)
286 pte_val(pte) |= _PAGE_SILENT_WRITE;
290 static inline pte_t pte_mkyoung(pte_t pte)
292 pte_val(pte) |= _PAGE_ACCESSED;
294 if (!(pte_val(pte) & _PAGE_NO_READ))
295 pte_val(pte) |= _PAGE_SILENT_READ;
297 if (pte_val(pte) & _PAGE_READ)
298 pte_val(pte) |= _PAGE_SILENT_READ;
304 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
306 static inline pte_t pte_mkhuge(pte_t pte)
308 pte_val(pte) |= _PAGE_HUGE;
311 #endif /* _PAGE_HUGE */
313 static inline int pte_special(pte_t pte) { return 0; }
314 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
317 * Macro to make mark a page protection value as "uncacheable". Note
318 * that "protection" is really a misnomer here as the protection value
319 * contains the memory attribute bits, dirty bits, and various other
322 #define pgprot_noncached pgprot_noncached
324 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
326 unsigned long prot = pgprot_val(_prot);
328 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
330 return __pgprot(prot);
334 * Conversion functions: convert a page and protection to a page entry,
335 * and a page entry and page directory to the page they refer to.
337 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
339 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
340 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
342 pte.pte_low &= _PAGE_CHG_MASK;
343 pte.pte_high &= ~0x3f;
344 pte.pte_low |= pgprot_val(newprot);
345 pte.pte_high |= pgprot_val(newprot) & 0x3f;
349 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
351 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
356 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
358 extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
361 static inline void update_mmu_cache(struct vm_area_struct *vma,
362 unsigned long address, pte_t *ptep)
365 __update_tlb(vma, address, pte);
366 __update_cache(vma, address, pte);
369 #define kern_addr_valid(addr) (1)
371 #ifdef CONFIG_64BIT_PHYS_ADDR
372 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
374 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
380 phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
381 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
384 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
385 remap_pfn_range(vma, vaddr, pfn, size, prot)
388 #include <asm-generic/pgtable.h>
391 * uncached accelerated TLB map for video memory access
393 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
394 #define __HAVE_PHYS_MEM_ACCESS_PROT
397 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
398 unsigned long size, pgprot_t vma_prot);
399 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
400 unsigned long size, pgprot_t *vma_prot);
404 * We provide our own get_unmapped area to cope with the virtual aliasing
405 * constraints placed on us by the cache architecture.
407 #define HAVE_ARCH_UNMAPPED_AREA
408 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
411 * No page table caches to initialise
413 #define pgtable_cache_init() do { } while (0)
415 #endif /* _ASM_PGTABLE_H */