Merge branch 'async-scsi-resume' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / arch / arm / kvm / mmu.c
index 7789857d147034b8cbfd1e44d599f81db085215b..80bb1e6c2c2906d0764ae5b696e72053c8faff9c 100644 (file)
@@ -144,8 +144,9 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
        while (addr < end) {
                pgd = pgdp + pgd_index(addr);
                pud = pud_offset(pgd, addr);
+               pte = NULL;
                if (pud_none(*pud)) {
-                       addr = pud_addr_end(addr, end);
+                       addr = kvm_pud_addr_end(addr, end);
                        continue;
                }
 
@@ -155,13 +156,13 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
                         * move on.
                         */
                        clear_pud_entry(kvm, pud, addr);
-                       addr = pud_addr_end(addr, end);
+                       addr = kvm_pud_addr_end(addr, end);
                        continue;
                }
 
                pmd = pmd_offset(pud, addr);
                if (pmd_none(*pmd)) {
-                       addr = pmd_addr_end(addr, end);
+                       addr = kvm_pmd_addr_end(addr, end);
                        continue;
                }
 
@@ -174,12 +175,12 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
                /*
                 * If the pmd entry is to be cleared, walk back up the ladder
                 */
-               if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
+               if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) {
                        clear_pmd_entry(kvm, pmd, addr);
-                       next = pmd_addr_end(addr, end);
+                       next = kvm_pmd_addr_end(addr, end);
                        if (page_empty(pmd) && !page_empty(pud)) {
                                clear_pud_entry(kvm, pud, addr);
-                               next = pud_addr_end(addr, end);
+                               next = kvm_pud_addr_end(addr, end);
                        }
                }
 
@@ -187,6 +188,99 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
        }
 }
 
+static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pte_t *pte;
+
+       pte = pte_offset_kernel(pmd, addr);
+       do {
+               if (!pte_none(*pte)) {
+                       hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
+                       kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
+               }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pmd_t *pmd;
+       phys_addr_t next;
+
+       pmd = pmd_offset(pud, addr);
+       do {
+               next = kvm_pmd_addr_end(addr, end);
+               if (!pmd_none(*pmd)) {
+                       if (kvm_pmd_huge(*pmd)) {
+                               hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
+                               kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
+                       } else {
+                               stage2_flush_ptes(kvm, pmd, addr, next);
+                       }
+               }
+       } while (pmd++, addr = next, addr != end);
+}
+
+static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
+                             phys_addr_t addr, phys_addr_t end)
+{
+       pud_t *pud;
+       phys_addr_t next;
+
+       pud = pud_offset(pgd, addr);
+       do {
+               next = kvm_pud_addr_end(addr, end);
+               if (!pud_none(*pud)) {
+                       if (pud_huge(*pud)) {
+                               hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
+                               kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
+                       } else {
+                               stage2_flush_pmds(kvm, pud, addr, next);
+                       }
+               }
+       } while (pud++, addr = next, addr != end);
+}
+
+static void stage2_flush_memslot(struct kvm *kvm,
+                                struct kvm_memory_slot *memslot)
+{
+       phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
+       phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
+       phys_addr_t next;
+       pgd_t *pgd;
+
+       pgd = kvm->arch.pgd + pgd_index(addr);
+       do {
+               next = kvm_pgd_addr_end(addr, end);
+               stage2_flush_puds(kvm, pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
+/**
+ * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
+ * @kvm: The struct kvm pointer
+ *
+ * Go through the stage 2 page tables and invalidate any cache lines
+ * backing memory already mapped to the VM.
+ */
+void stage2_flush_vm(struct kvm *kvm)
+{
+       struct kvm_memslots *slots;
+       struct kvm_memory_slot *memslot;
+       int idx;
+
+       idx = srcu_read_lock(&kvm->srcu);
+       spin_lock(&kvm->mmu_lock);
+
+       slots = kvm_memslots(kvm);
+       kvm_for_each_memslot(memslot, slots)
+               stage2_flush_memslot(kvm, memslot);
+
+       spin_unlock(&kvm->mmu_lock);
+       srcu_read_unlock(&kvm->srcu, idx);
+}
+
 /**
  * free_boot_hyp_pgd - free HYP boot page tables
  *
@@ -715,7 +809,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_s2pmd_writable(&new_pmd);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE);
+               coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
                ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
        } else {
                pte_t new_pte = pfn_pte(pfn, PAGE_S2);
@@ -723,7 +817,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_s2pte_writable(&new_pte);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_icache_guest_page(kvm, hva, PAGE_SIZE);
+               coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
                ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
        }