1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
8 #include <trace/events/tlb.h>
10 #include <asm/pgalloc.h>
11 #include <asm/tlbflush.h>
12 #include <asm/paravirt.h>
14 #ifndef CONFIG_PARAVIRT
15 static inline void paravirt_activate_mm(struct mm_struct *prev,
16 struct mm_struct *next)
19 #endif /* !CONFIG_PARAVIRT */
22 * Used for LDT copy/destruction.
24 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
25 void destroy_context(struct mm_struct *mm);
28 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
31 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
32 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
36 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
37 struct task_struct *tsk)
39 unsigned cpu = smp_processor_id();
41 if (likely(prev != next)) {
43 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
44 this_cpu_write(cpu_tlbstate.active_mm, next);
46 cpumask_set_cpu(cpu, mm_cpumask(next));
48 /* Re-load page tables */
50 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
52 /* Stop flush ipis for the previous mm */
53 cpumask_clear_cpu(cpu, mm_cpumask(prev));
56 * Load the LDT, if the LDT is different.
58 * It's possible leave_mm(prev) has been called. If so,
59 * then prev->context.ldt could be out of sync with the
60 * LDT descriptor or the LDT register. This can only happen
61 * if prev->context.ldt is non-null, since we never free
62 * an LDT. But LDTs can't be shared across mms, so
63 * prev->context.ldt won't be equal to next->context.ldt.
65 if (unlikely(prev->context.ldt != next->context.ldt))
66 load_LDT_nolock(&next->context);
70 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
71 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
73 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
75 * On established mms, the mm_cpumask is only changed
76 * from irq context, from ptep_clear_flush() while in
77 * lazy tlb mode, and here. Irqs are blocked during
78 * schedule, protecting us from simultaneous changes.
80 cpumask_set_cpu(cpu, mm_cpumask(next));
82 * We were in lazy tlb mode and leave_mm disabled
83 * tlb flush IPI delivery. We must reload CR3
84 * to make sure to use no freed page tables.
87 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
88 load_LDT_nolock(&next->context);
94 #define activate_mm(prev, next) \
96 paravirt_activate_mm((prev), (next)); \
97 switch_mm((prev), (next), NULL); \
101 #define deactivate_mm(tsk, mm) \
106 #define deactivate_mm(tsk, mm) \
109 loadsegment(fs, 0); \
113 static inline void arch_dup_mmap(struct mm_struct *oldmm,
114 struct mm_struct *mm)
116 paravirt_arch_dup_mmap(oldmm, mm);
119 static inline void arch_exit_mmap(struct mm_struct *mm)
121 paravirt_arch_exit_mmap(mm);
124 static inline void arch_bprm_mm_init(struct mm_struct *mm,
125 struct vm_area_struct *vma)
130 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
131 unsigned long start, unsigned long end)
134 * mpx_notify_unmap() goes and reads a rarely-hot
135 * cacheline in the mm_struct. That can be expensive
136 * enough to be seen in profiles.
138 * The mpx_notify_unmap() call and its contents have been
139 * observed to affect munmap() performance on hardware
140 * where MPX is not present.
142 * The unlikely() optimizes for the fast case: no MPX
143 * in the CPU, or no MPX use in the process. Even if
144 * we get this wrong (in the unlikely event that MPX
145 * is widely enabled on some system) the overhead of
146 * MPX itself (reading bounds tables) is expected to
147 * overwhelm the overhead of getting this unlikely()
148 * consistently wrong.
150 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
151 mpx_notify_unmap(mm, vma, start, end);
154 #endif /* _ASM_X86_MMU_CONTEXT_H */