X-Git-Url: http://git.agner.ch/gitweb/?p=linux-drm-fsl-dcu.git;a=blobdiff_plain;f=arch%2Fi386%2Fmm%2Fhighmem.c;h=bb2de1089add4e90270e4f35afbbe442007cbefe;hp=ba44000b9069f5376d64304fafdf43ddcd4987f7;hb=d68798374bcf5cd4a19105b86d96121651b3c8cb;hpb=e823aff2d6eb43083abcc75a32ddfb167c324089 diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c index ba44000b9069..bb2de1089add 100644 --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c @@ -32,47 +32,41 @@ void *kmap_atomic(struct page *page, enum km_type type) unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ - inc_preempt_count(); + pagefault_disable(); + + idx = type + KM_TYPE_NR*smp_processor_id(); + BUG_ON(!pte_none(*(kmap_pte-idx))); + if (!PageHighMem(page)) return page_address(page); - idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -#ifdef CONFIG_DEBUG_HIGHMEM - if (!pte_none(*(kmap_pte-idx))) - BUG(); -#endif set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); - __flush_tlb_one(vaddr); return (void*) vaddr; } void kunmap_atomic(void *kvaddr, enum km_type type) { -#ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); - if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) { - dec_preempt_count(); - preempt_check_resched(); - return; - } - - if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) - BUG(); - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it + * Force other mappings to Oops if they'll try to access this pte + * without first remap it. Keeping stale mappings around is a bad idea + * also, in case the page changes cacheability attributes or becomes + * a protected page in a hypervisor. */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); - __flush_tlb_one(vaddr); + if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) + kpte_clear_flush(kmap_pte-idx, vaddr); + else { +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr < PAGE_OFFSET); + BUG_ON(vaddr >= (unsigned long)high_memory); #endif + } - dec_preempt_count(); - preempt_check_resched(); + pagefault_enable(); } /* This is the same as kmap_atomic() but can map memory that doesn't @@ -83,12 +77,11 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) enum fixed_addresses idx; unsigned long vaddr; - inc_preempt_count(); + pagefault_disable(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); - __flush_tlb_one(vaddr); return (void*) vaddr; }