mm/page_alloc: protect pcp->batch accesses with ACCESS_ONCE
authorCody P Schafer <cody@linux.vnet.ibm.com>
Wed, 3 Jul 2013 22:01:32 +0000 (15:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Jul 2013 23:07:27 +0000 (16:07 -0700)
pcp->batch could change at any point, avoid relying on it being a stable
value.

Signed-off-by: Cody P Schafer <cody@linux.vnet.ibm.com>
Cc: Gilad Ben-Yossef <gilad@benyossef.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index eaaef2a094249d1e7f68ff249bdb840365e5ae91..97b8f861e63d49c6ede9eafc46a9b11410e3a281 100644 (file)
@@ -1182,10 +1182,12 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
 {
        unsigned long flags;
        int to_drain;
+       unsigned long batch;
 
        local_irq_save(flags);
-       if (pcp->count >= pcp->batch)
-               to_drain = pcp->batch;
+       batch = ACCESS_ONCE(pcp->batch);
+       if (pcp->count >= batch)
+               to_drain = batch;
        else
                to_drain = pcp->count;
        if (to_drain > 0) {
@@ -1353,8 +1355,9 @@ void free_hot_cold_page(struct page *page, int cold)
                list_add(&page->lru, &pcp->lists[migratetype]);
        pcp->count++;
        if (pcp->count >= pcp->high) {
-               free_pcppages_bulk(zone, pcp->batch, pcp);
-               pcp->count -= pcp->batch;
+               unsigned long batch = ACCESS_ONCE(pcp->batch);
+               free_pcppages_bulk(zone, batch, pcp);
+               pcp->count -= batch;
        }
 
 out: