irqchip: mips-gic: Convert CPU numbers to VP IDs.
[linux-drm-fsl-dcu.git] / mm / vmscan.c
index 8286938c70ded6b82d4268174c92669a90eeb674..2d978b28a410b25df1acde351630dee387efbbe5 100644 (file)
@@ -175,7 +175,7 @@ static bool sane_reclaim(struct scan_control *sc)
        if (!memcg)
                return true;
 #ifdef CONFIG_CGROUP_WRITEBACK
-       if (cgroup_on_dfl(mem_cgroup_css(memcg)->cgroup))
+       if (memcg->css.cgroup)
                return true;
 #endif
        return false;
@@ -985,7 +985,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 *    __GFP_IO|__GFP_FS for this reason); but more thought
                 *    would probably show more reasons.
                 *
-                * 3) Legacy memcg encounters a page that is not already marked
+                * 3) Legacy memcg encounters a page that is already marked
                 *    PageReclaim. memcg does not have any dirty pages
                 *    throttling so we could easily OOM just because too many
                 *    pages are in writeback and there is nothing else to
@@ -1015,12 +1015,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                 */
                                SetPageReclaim(page);
                                nr_writeback++;
-
                                goto keep_locked;
 
                        /* Case 3 above */
                        } else {
+                               unlock_page(page);
                                wait_on_page_writeback(page);
+                               /* then go back and try same page again */
+                               list_add_tail(&page->lru, page_list);
+                               continue;
                        }
                }
 
@@ -1057,7 +1060,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * processes. Try to unmap it here.
                 */
                if (page_mapped(page) && mapping) {
-                       switch (try_to_unmap(page, ttu_flags)) {
+                       switch (try_to_unmap(page,
+                                       ttu_flags|TTU_BATCH_FLUSH)) {
                        case SWAP_FAIL:
                                goto activate_locked;
                        case SWAP_AGAIN:
@@ -1097,7 +1101,12 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        if (!sc->may_writepage)
                                goto keep_locked;
 
-                       /* Page is dirty, try to write it out here */
+                       /*
+                        * Page is dirty. Flush the TLB if a writable entry
+                        * potentially exists to avoid CPU writes after IO
+                        * starts and then write it out here.
+                        */
+                       try_to_unmap_flush_dirty();
                        switch (pageout(page, mapping, sc)) {
                        case PAGE_KEEP:
                                goto keep_locked;
@@ -1190,7 +1199,7 @@ cull_mlocked:
                if (PageSwapCache(page))
                        try_to_free_swap(page);
                unlock_page(page);
-               putback_lru_page(page);
+               list_add(&page->lru, &ret_pages);
                continue;
 
 activate_locked:
@@ -1208,6 +1217,7 @@ keep:
        }
 
        mem_cgroup_uncharge_list(&free_pages);
+       try_to_unmap_flush();
        free_hot_cold_page_list(&free_pages, true);
 
        list_splice(&ret_pages, page_list);
@@ -1352,7 +1362,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
        unsigned long nr_taken = 0;
        unsigned long scan;
 
-       for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
+       for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
+                                       !list_empty(src); scan++) {
                struct page *page;
                int nr_pages;
 
@@ -2151,6 +2162,23 @@ out:
        }
 }
 
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+static void init_tlb_ubc(void)
+{
+       /*
+        * This deliberately does not clear the cpumask as it's expensive
+        * and unnecessary. If there happens to be data in there then the
+        * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
+        * then will be cleared.
+        */
+       current->tlb_ubc.flush_required = false;
+}
+#else
+static inline void init_tlb_ubc(void)
+{
+}
+#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
+
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
@@ -2185,6 +2213,8 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
        scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
                         sc->priority == DEF_PRIORITY);
 
+       init_tlb_ubc();
+
        blk_start_plug(&plug);
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {