Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 Feb 2015 18:54:28 +0000 (10:54 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 14 Feb 2015 18:54:28 +0000 (10:54 -0800)
Pull ACCESS_ONCE() rule tightening from Christian Borntraeger:
 "Tighten rules for ACCESS_ONCE

  This series tightens the rules for ACCESS_ONCE to only work on scalar
  types.  It also contains the necessary fixups as indicated by build
  bots of linux-next.  Now everything is in place to prevent new
  non-scalar users of ACCESS_ONCE and we can continue to convert code to
  READ_ONCE/WRITE_ONCE"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux:
  kernel: Fix sparse warning for ACCESS_ONCE
  next: sh: Fix compile error
  kernel: tighten rules for ACCESS ONCE
  mm/gup: Replace ACCESS_ONCE with READ_ONCE
  x86/spinlock: Leftover conversion ACCESS_ONCE->READ_ONCE
  x86/xen/p2m: Replace ACCESS_ONCE with READ_ONCE
  ppc/hugetlbfs: Replace ACCESS_ONCE with READ_ONCE
  ppc/kvm: Replace ACCESS_ONCE with READ_ONCE

arch/powerpc/kvm/book3s_hv_rm_xics.c
arch/powerpc/kvm/book3s_xics.c
arch/powerpc/mm/hugetlbpage.c
arch/sh/mm/gup.c
arch/x86/include/asm/spinlock.h
arch/x86/xen/p2m.c
include/linux/compiler.h
mm/gup.c

index 7b066f6b02aded88e701b536b2ddcd5cb24d7a96..7c22997de906410fc71802855607b92f2d700954 100644 (file)
@@ -152,7 +152,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
         * in virtual mode.
         */
        do {
-               old_state = new_state = ACCESS_ONCE(icp->state);
+               old_state = new_state = READ_ONCE(icp->state);
 
                /* Down_CPPR */
                new_state.cppr = new_cppr;
@@ -211,7 +211,7 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
         * pending priority
         */
        do {
-               old_state = new_state = ACCESS_ONCE(icp->state);
+               old_state = new_state = READ_ONCE(icp->state);
 
                xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
                if (!old_state.xisr)
@@ -277,7 +277,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
         * whenever the MFRR is made less favored.
         */
        do {
-               old_state = new_state = ACCESS_ONCE(icp->state);
+               old_state = new_state = READ_ONCE(icp->state);
 
                /* Set_MFRR */
                new_state.mfrr = mfrr;
@@ -352,7 +352,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
        icp_rm_clr_vcpu_irq(icp->vcpu);
 
        do {
-               old_state = new_state = ACCESS_ONCE(icp->state);
+               old_state = new_state = READ_ONCE(icp->state);
 
                reject = 0;
                new_state.cppr = cppr;
index 807351f76f84e899d560d2766e5163583d2ca910..a4a8d9f0dcb735d6d16057aeb81fc9e4ac1bb5e1 100644 (file)
@@ -327,7 +327,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
                 icp->server_num);
 
        do {
-               old_state = new_state = ACCESS_ONCE(icp->state);
+               old_state = new_state = READ_ONCE(icp->state);
 
                *reject = 0;
 
@@ -512,7 +512,7 @@ static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
         * in virtual mode.
         */
        do {
-               old_state = new_state = ACCESS_ONCE(icp->state);
+               old_state = new_state = READ_ONCE(icp->state);
 
                /* Down_CPPR */
                new_state.cppr = new_cppr;
@@ -567,7 +567,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
         * pending priority
         */
        do {
-               old_state = new_state = ACCESS_ONCE(icp->state);
+               old_state = new_state = READ_ONCE(icp->state);
 
                xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
                if (!old_state.xisr)
@@ -634,7 +634,7 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
         * whenever the MFRR is made less favored.
         */
        do {
-               old_state = new_state = ACCESS_ONCE(icp->state);
+               old_state = new_state = READ_ONCE(icp->state);
 
                /* Set_MFRR */
                new_state.mfrr = mfrr;
@@ -679,7 +679,7 @@ static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
                if (!icp)
                        return H_PARAMETER;
        }
-       state = ACCESS_ONCE(icp->state);
+       state = READ_ONCE(icp->state);
        kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
        kvmppc_set_gpr(vcpu, 5, state.mfrr);
        return H_SUCCESS;
@@ -721,7 +721,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
                                      BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
 
        do {
-               old_state = new_state = ACCESS_ONCE(icp->state);
+               old_state = new_state = READ_ONCE(icp->state);
 
                reject = 0;
                new_state.cppr = cppr;
@@ -885,7 +885,7 @@ static int xics_debug_show(struct seq_file *m, void *private)
                if (!icp)
                        continue;
 
-               state.raw = ACCESS_ONCE(icp->state.raw);
+               state.raw = READ_ONCE(icp->state.raw);
                seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
                           icp->server_num, state.xisr,
                           state.pending_pri, state.cppr, state.mfrr,
@@ -1082,7 +1082,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
         * the ICS states before the ICP states.
         */
        do {
-               old_state = ACCESS_ONCE(icp->state);
+               old_state = READ_ONCE(icp->state);
 
                if (new_state.mfrr <= old_state.mfrr) {
                        resend = false;
index cf0464f4284f22861391a7bc5c10125323df1768..7e408bfc79482e2c9caff1257f0c8002335aa92c 100644 (file)
@@ -986,7 +986,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
                 */
                pdshift = PUD_SHIFT;
                pudp = pud_offset(&pgd, ea);
-               pud  = ACCESS_ONCE(*pudp);
+               pud  = READ_ONCE(*pudp);
 
                if (pud_none(pud))
                        return NULL;
@@ -998,7 +998,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
                else {
                        pdshift = PMD_SHIFT;
                        pmdp = pmd_offset(&pud, ea);
-                       pmd  = ACCESS_ONCE(*pmdp);
+                       pmd  = READ_ONCE(*pmdp);
                        /*
                         * A hugepage collapse is captured by pmd_none, because
                         * it mark the pmd none and do a hpte invalidate.
index e15f52a17b6c1c3650ccb1e2549e3e2bbc7679e1..e7af6a65baab915552ae405a16d3bd405b84f763 100644 (file)
@@ -17,7 +17,7 @@
 static inline pte_t gup_get_pte(pte_t *ptep)
 {
 #ifndef CONFIG_X2TLB
-       return ACCESS_ONCE(*ptep);
+       return READ_ONCE(*ptep);
 #else
        /*
         * With get_user_pages_fast, we walk down the pagetables without
index 625660f8a2fcf0cb4b4b1a9216908c98fffe00a1..7050d864f5207c4fb384672b083a18a6584bdbbe 100644 (file)
@@ -183,10 +183,10 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
 
 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
-       __ticket_t head = ACCESS_ONCE(lock->tickets.head);
+       __ticket_t head = READ_ONCE(lock->tickets.head);
 
        for (;;) {
-               struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+               struct __raw_tickets tmp = READ_ONCE(lock->tickets);
                /*
                 * We need to check "unlocked" in a loop, tmp.head == head
                 * can be false positive because of overflow.
index f18fd1d411f6f13775121ccf62d8ecf3266c9228..740ae3026a148ec54800710427a455dc890351d3 100644 (file)
@@ -550,7 +550,7 @@ static bool alloc_p2m(unsigned long pfn)
                mid_mfn = NULL;
        }
 
-       p2m_pfn = pte_pfn(ACCESS_ONCE(*ptep));
+       p2m_pfn = pte_pfn(READ_ONCE(*ptep));
        if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
            p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
                /* p2m leaf page is missing */
index 17f624cdf53c5609066389bd3da3375fcdfe2827..d1ec10a940ffffb01a94bcedf9d113e1940619c7 100644 (file)
@@ -451,12 +451,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  * to make the compiler aware of ordering is to put the two invocations of
  * ACCESS_ONCE() in different C statements.
  *
- * This macro does absolutely -nothing- to prevent the CPU from reordering,
- * merging, or refetching absolutely anything at any time.  Its main intended
- * use is to mediate communication between process-level code and irq/NMI
- * handlers, all running on the same CPU.
+ * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
+ * on a union member will work as long as the size of the member matches the
+ * size of the union and the size is smaller than word size.
+ *
+ * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
+ * between process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ *
+ * If possible use READ_ONCE/ASSIGN_ONCE instead.
  */
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#define __ACCESS_ONCE(x) ({ \
+        __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
+       (volatile typeof(x) *)&(x); })
+#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
 
 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
 #ifdef CONFIG_KPROBES
index 51bf0b06ca7bd76a53f6370d60baf93d9e7996f8..a6e24e246f8688af7664966e66d99dbd038b8066 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1092,7 +1092,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
 
        pmdp = pmd_offset(&pud, addr);
        do {
-               pmd_t pmd = ACCESS_ONCE(*pmdp);
+               pmd_t pmd = READ_ONCE(*pmdp);
 
                next = pmd_addr_end(addr, end);
                if (pmd_none(pmd) || pmd_trans_splitting(pmd))