[kernel] r22523 - in dists/sid/linux/debian: . patches patches/bugfix/all/access_once
Ben Hutchings
benh at moszumanska.debian.org
Thu Apr 23 15:41:16 UTC 2015
Author: benh
Date: Thu Apr 23 15:41:16 2015
New Revision: 22523
Log:
Add READ_ONCE/WRITE_ONCE and use them where necessary
The CVE-2015-3339 fix uses READ_ONCE, and while we could change that
to ACCESS_ONCE there will be other fixes that really do need
READ_ONCE.
So backport these patches:
* kernel: Provide READ_ONCE and ASSIGN_ONCE
* Replace use of ACCESS_ONCE on non-scalar types with READ_ONCE or barriers
as appropriate [multiple patches]
* kernel: tighten rules for ACCESS ONCE
* kernel: Change ASSIGN_ONCE(val, x) to WRITE_ONCE(x, val)
For powerpc, also patch misuses of ACCESS_ONCE in the get_user_pages
implementation which have been completely replaced upstream.
Added:
dists/sid/linux/debian/patches/bugfix/all/access_once/
dists/sid/linux/debian/patches/bugfix/all/access_once/0001-kernel-Provide-READ_ONCE-and-ASSIGN_ONCE.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0002-mm-replace-ACCESS_ONCE-with-READ_ONCE-or-barriers.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0003-x86-spinlock-Replace-ACCESS_ONCE-with-READ_ONCE.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0004-x86-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0005-mips-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0006-arm64-spinlock-Replace-ACCESS_ONCE-READ_ONCE.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0007-arm-spinlock-Replace-ACCESS_ONCE-with-READ_ONCE.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0008-powerpc-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0009-ppc-kvm-Replace-ACCESS_ONCE-with-READ_ONCE.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0010-ppc-hugetlbfs-Replace-ACCESS_ONCE-with-READ_ONCE.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0011-kernel-tighten-rules-for-ACCESS-ONCE.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0012-next-sh-Fix-compile-error.patch
dists/sid/linux/debian/patches/bugfix/all/access_once/0013-kernel-Change-ASSIGN_ONCE-val-x-to-WRITE_ONCE-x-val.patch
Modified:
dists/sid/linux/debian/changelog
dists/sid/linux/debian/patches/series
Modified: dists/sid/linux/debian/changelog
==============================================================================
--- dists/sid/linux/debian/changelog Wed Apr 22 18:17:09 2015 (r22522)
+++ dists/sid/linux/debian/changelog Thu Apr 23 15:41:16 2015 (r22523)
@@ -4,6 +4,11 @@
* [x86] crypto: aesni - fix memory usage in GCM decryption (Closes: #782561)
(CVE-2015-3331)
* tcp: Fix crash in TCP Fast Open (Closes: #782515) (CVE-2015-3332)
+ * kernel: Provide READ_ONCE and ASSIGN_ONCE
+ * Replace use of ACCESS_ONCE on non-scalar types with READ_ONCE or barriers
+ as appropriate
+ * kernel: tighten rules for ACCESS ONCE
+ * kernel: Change ASSIGN_ONCE(val, x) to WRITE_ONCE(x, val)
* fs: take i_mutex during prepare_binprm for set[ug]id executables
(CVE-2015-3339)
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0001-kernel-Provide-READ_ONCE-and-ASSIGN_ONCE.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0001-kernel-Provide-READ_ONCE-and-ASSIGN_ONCE.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,105 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Tue, 25 Nov 2014 10:01:16 +0100
+Subject: kernel: Provide READ_ONCE and ASSIGN_ONCE
+Origin: https://git.kernel.org/linus/230fa253df6352af12ad0a16128760b5cb3f92df
+
+ACCESS_ONCE does not work reliably on non-scalar types. For
+example gcc 4.6 and 4.7 might remove the volatile tag for such
+accesses during the SRA (scalar replacement of aggregates) step
+https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145)
+
+Let's provide READ_ONCE/ASSIGN_ONCE that will do all accesses via
+scalar types as suggested by Linus Torvalds. Accesses larger than
+the machines word size cannot be guaranteed to be atomic. These
+macros will use memcpy and emit a build warning.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+---
+ include/linux/compiler.h | 74 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 74 insertions(+)
+
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index d5ad7b1..a1c81f8 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -186,6 +186,80 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+ # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
+ #endif
+
++#include <uapi/linux/types.h>
++
++static __always_inline void data_access_exceeds_word_size(void)
++#ifdef __compiletime_warning
++__compiletime_warning("data access exceeds word size and won't be atomic")
++#endif
++;
++
++static __always_inline void data_access_exceeds_word_size(void)
++{
++}
++
++static __always_inline void __read_once_size(volatile void *p, void *res, int size)
++{
++ switch (size) {
++ case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
++ case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
++ case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
++#ifdef CONFIG_64BIT
++ case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
++#endif
++ default:
++ barrier();
++ __builtin_memcpy((void *)res, (const void *)p, size);
++ data_access_exceeds_word_size();
++ barrier();
++ }
++}
++
++static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
++{
++ switch (size) {
++ case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
++ case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
++ case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
++#ifdef CONFIG_64BIT
++ case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
++#endif
++ default:
++ barrier();
++ __builtin_memcpy((void *)p, (const void *)res, size);
++ data_access_exceeds_word_size();
++ barrier();
++ }
++}
++
++/*
++ * Prevent the compiler from merging or refetching reads or writes. The
++ * compiler is also forbidden from reordering successive instances of
++ * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
++ * compiler is aware of some particular ordering. One way to make the
++ * compiler aware of ordering is to put the two invocations of READ_ONCE,
++ * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
++ *
++ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
++ * data types like structs or unions. If the size of the accessed data
++ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
++ * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
++ * compile-time warning.
++ *
++ * Their two major use cases are: (1) Mediating communication between
++ * process-level code and irq/NMI handlers, all running on the same CPU,
++ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
++ * mutilate accesses that either do not require ordering or that interact
++ * with an explicit memory barrier or atomic instruction that provides the
++ * required ordering.
++ */
++
++#define READ_ONCE(x) \
++ ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
++
++#define ASSIGN_ONCE(val, x) \
++ ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
++
+ #endif /* __KERNEL__ */
+
+ #endif /* __ASSEMBLY__ */
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0002-mm-replace-ACCESS_ONCE-with-READ_ONCE-or-barriers.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0002-mm-replace-ACCESS_ONCE-with-READ_ONCE-or-barriers.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,38 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Sun, 7 Dec 2014 21:41:33 +0100
+Subject: mm: replace ACCESS_ONCE with READ_ONCE or barriers
+Origin: https://git.kernel.org/linus/e37c698270633327245beb0fbd8699db8a4b65b4
+
+ACCESS_ONCE does not work reliably on non-scalar types. For
+example gcc 4.6 and 4.7 might remove the volatile tag for such
+accesses during the SRA (scalar replacement of aggregates) step
+(https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145)
+
+Let's change the code to access the page table elements with
+READ_ONCE that does implicit scalar accesses for the gup code.
+
+mm_find_pmd is tricky, because m68k and sparc(32bit) define pmd_t
+as array of longs. This code requires just that the pmd_present
+and pmd_trans_huge check are done on the same value, so a barrier
+is sufficent.
+
+A similar case is in handle_pte_fault. On ppc44x the word size is
+32 bit, but a pte is 64 bit. A barrier is ok as well.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+Cc: linux-mm at kvack.org
+Acked-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+[bwh: Backported to 3.16: drop inapplicable changes]
+---
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -623,7 +623,8 @@ pmd_t *mm_find_pmd(struct mm_struct *mm,
+ * without holding anon_vma lock for write. So when looking for a
+ * genuine pmde (in which to find pte), test present and !THP together.
+ */
+- pmde = ACCESS_ONCE(*pmd);
++ pmde = *pmd;
++ barrier();
+ if (!pmd_present(pmde) || pmd_trans_huge(pmde))
+ pmd = NULL;
+ out:
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0003-x86-spinlock-Replace-ACCESS_ONCE-with-READ_ONCE.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0003-x86-spinlock-Replace-ACCESS_ONCE-with-READ_ONCE.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,55 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Mon, 24 Nov 2014 10:53:46 +0100
+Subject: x86/spinlock: Replace ACCESS_ONCE with READ_ONCE
+Origin: https://git.kernel.org/linus/4f9d1382e6f80dcfa891b2c02d5a35c53be485f1
+
+ACCESS_ONCE does not work reliably on non-scalar types. For
+example gcc 4.6 and 4.7 might remove the volatile tag for such
+accesses during the SRA (scalar replacement of aggregates) step
+(https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145)
+
+Change the spinlock code to replace ACCESS_ONCE with READ_ONCE.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+Acked-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+---
+ arch/x86/include/asm/spinlock.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/spinlock.h
++++ b/arch/x86/include/asm/spinlock.h
+@@ -92,7 +92,7 @@ static __always_inline void arch_spin_lo
+ unsigned count = SPIN_THRESHOLD;
+
+ do {
+- if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
++ if (READ_ONCE(lock->tickets.head) == inc.tail)
+ goto out;
+ cpu_relax();
+ } while (--count);
+@@ -105,7 +105,7 @@ static __always_inline int arch_spin_try
+ {
+ arch_spinlock_t old, new;
+
+- old.tickets = ACCESS_ONCE(lock->tickets);
++ old.tickets = READ_ONCE(lock->tickets);
+ if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
+ return 0;
+
+@@ -162,14 +162,14 @@ static __always_inline void arch_spin_un
+
+ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+ {
+- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
++ struct __raw_tickets tmp = READ_ONCE(lock->tickets);
+
+ return tmp.tail != tmp.head;
+ }
+
+ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+ {
+- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
++ struct __raw_tickets tmp = READ_ONCE(lock->tickets);
+
+ return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
+ }
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0004-x86-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0004-x86-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,29 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Fri, 21 Nov 2014 16:29:40 +0100
+Subject: x86/gup: Replace ACCESS_ONCE with READ_ONCE
+Origin: https://git.kernel.org/linus/14cf3d977b80f5e355f8ac7547cf1b9ff9fb3e09
+
+ACCESS_ONCE does not work reliably on non-scalar types. For
+example gcc 4.6 and 4.7 might remove the volatile tag for such
+accesses during the SRA (scalar replacement of aggregates) step
+(https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145)
+
+Change the gup code to replace ACCESS_ONCE with READ_ONCE.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+Acked-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+---
+ arch/x86/mm/gup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -15,7 +15,7 @@
+ static inline pte_t gup_get_pte(pte_t *ptep)
+ {
+ #ifndef CONFIG_X86_PAE
+- return ACCESS_ONCE(*ptep);
++ return READ_ONCE(*ptep);
+ #else
+ /*
+ * With get_user_pages_fast, we walk down the pagetables without taking
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0005-mips-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0005-mips-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,29 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Fri, 21 Nov 2014 16:21:56 +0100
+Subject: mips/gup: Replace ACCESS_ONCE with READ_ONCE
+Origin: https://git.kernel.org/linus/4218091cb45f601b889cd032e39fe6878a426e70
+
+ACCESS_ONCE does not work reliably on non-scalar types. For
+example gcc 4.6 and 4.7 might remove the volatile tag for such
+accesses during the SRA (scalar replacement of aggregates) step
+https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145)
+
+Change the gup code to replace ACCESS_ONCE with READ_ONCE.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+Acked-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+---
+ arch/mips/mm/gup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/mm/gup.c
++++ b/arch/mips/mm/gup.c
+@@ -30,7 +30,7 @@ retry:
+
+ return pte;
+ #else
+- return ACCESS_ONCE(*ptep);
++ return READ_ONCE(*ptep);
+ #endif
+ }
+
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0006-arm64-spinlock-Replace-ACCESS_ONCE-READ_ONCE.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0006-arm64-spinlock-Replace-ACCESS_ONCE-READ_ONCE.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,35 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Mon, 24 Nov 2014 10:53:11 +0100
+Subject: arm64/spinlock: Replace ACCESS_ONCE READ_ONCE
+Origin: https://git.kernel.org/linus/af2e7aaed1ccf30e61af3e096ac2c7df2f2d6c2a
+
+ACCESS_ONCE does not work reliably on non-scalar types. For
+example gcc 4.6 and 4.7 might remove the volatile tag for such
+accesses during the SRA (scalar replacement of aggregates) step
+(https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145)
+
+Change the spinlock code to replace ACCESS_ONCE with READ_ONCE.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+Acked-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+---
+ arch/arm64/include/asm/spinlock.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/spinlock.h
++++ b/arch/arm64/include/asm/spinlock.h
+@@ -99,12 +99,12 @@ static inline int arch_spin_value_unlock
+
+ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+ {
+- return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
++ return !arch_spin_value_unlocked(READ_ONCE(*lock));
+ }
+
+ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+ {
+- arch_spinlock_t lockval = ACCESS_ONCE(*lock);
++ arch_spinlock_t lockval = READ_ONCE(*lock);
+ return (lockval.next - lockval.owner) > 1;
+ }
+ #define arch_spin_is_contended arch_spin_is_contended
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0007-arm-spinlock-Replace-ACCESS_ONCE-with-READ_ONCE.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0007-arm-spinlock-Replace-ACCESS_ONCE-with-READ_ONCE.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,35 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Tue, 25 Nov 2014 11:44:26 +0100
+Subject: arm/spinlock: Replace ACCESS_ONCE with READ_ONCE
+Origin: https://git.kernel.org/linus/488beef1440e845751365202faace2465840ea98
+
+ACCESS_ONCE does not work reliably on non-scalar types. For
+example gcc 4.6 and 4.7 might remove the volatile tag for such
+accesses during the SRA (scalar replacement of aggregates) step
+(https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145)
+
+Change the spinlock code to replace ACCESS_ONCE with READ_ONCE.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+Acked-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+---
+ arch/arm/include/asm/spinlock.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/spinlock.h
++++ b/arch/arm/include/asm/spinlock.h
+@@ -120,12 +120,12 @@ static inline int arch_spin_value_unlock
+
+ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+ {
+- return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
++ return !arch_spin_value_unlocked(READ_ONCE(*lock));
+ }
+
+ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+ {
+- struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
++ struct __raw_tickets tickets = READ_ONCE(lock->tickets);
+ return (tickets.next - tickets.owner) > 1;
+ }
+ #define arch_spin_is_contended arch_spin_is_contended
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0008-powerpc-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0008-powerpc-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,56 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Thu, 23 Apr 2015 02:00:34 +0100
+Subject: powerpc/gup: Replace ACCESS_ONCE with READ_ONCE
+Forwarded: not-yet
+
+ACCESS_ONCE does not work reliably on non-scalar types. For
+example gcc 4.6 and 4.7 might remove the volatile tag for such
+accesses during the SRA (scalar replacement of aggregates) step
+https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145)
+
+Change the gup code to replace ACCESS_ONCE with READ_ONCE.
+
+This is not needed upstream as the code has been dropped in
+favour of the generic implementation which does not have this
+problem.
+
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+--- a/arch/powerpc/mm/gup.c
++++ b/arch/powerpc/mm/gup.c
+@@ -34,7 +34,7 @@ static noinline int gup_pte_range(pmd_t
+
+ ptep = pte_offset_kernel(&pmd, addr);
+ do {
+- pte_t pte = ACCESS_ONCE(*ptep);
++ pte_t pte = READ_ONCE(*ptep);
+ struct page *page;
+ /*
+ * Similar to the PMD case, NUMA hinting must take slow path
+@@ -68,7 +68,7 @@ static int gup_pmd_range(pud_t pud, unsi
+
+ pmdp = pmd_offset(&pud, addr);
+ do {
+- pmd_t pmd = ACCESS_ONCE(*pmdp);
++ pmd_t pmd = READ_ONCE(*pmdp);
+
+ next = pmd_addr_end(addr, end);
+ /*
+@@ -110,7 +110,7 @@ static int gup_pud_range(pgd_t pgd, unsi
+
+ pudp = pud_offset(&pgd, addr);
+ do {
+- pud_t pud = ACCESS_ONCE(*pudp);
++ pud_t pud = READ_ONCE(*pudp);
+
+ next = pud_addr_end(addr, end);
+ if (pud_none(pud))
+@@ -174,7 +174,7 @@ int __get_user_pages_fast(unsigned long
+
+ pgdp = pgd_offset(mm, addr);
+ do {
+- pgd_t pgd = ACCESS_ONCE(*pgdp);
++ pgd_t pgd = READ_ONCE(*pgdp);
+
+ pr_devel(" %016lx: normal pgd %p\n", addr,
+ (void *)pgd_val(pgd));
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0009-ppc-kvm-Replace-ACCESS_ONCE-with-READ_ONCE.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0009-ppc-kvm-Replace-ACCESS_ONCE-with-READ_ONCE.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,131 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Tue, 6 Jan 2015 22:41:46 +0100
+Subject: ppc/kvm: Replace ACCESS_ONCE with READ_ONCE
+Origin: https://git.kernel.org/linus/5ee07612e9e20817bb99256ab6cf1400fd5aa270
+
+ACCESS_ONCE does not work reliably on non-scalar types. For
+example gcc 4.6 and 4.7 might remove the volatile tag for such
+accesses during the SRA (scalar replacement of aggregates) step
+(https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145)
+
+Change the ppc/kvm code to replace ACCESS_ONCE with READ_ONCE.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+Acked-by: Alexander Graf <agraf at suse.de>
+---
+ arch/powerpc/kvm/book3s_hv_rm_xics.c | 8 ++++----
+ arch/powerpc/kvm/book3s_xics.c | 16 ++++++++--------
+ 2 files changed, 12 insertions(+), 12 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
+@@ -152,7 +152,7 @@ static void icp_rm_down_cppr(struct kvmp
+ * in virtual mode.
+ */
+ do {
+- old_state = new_state = ACCESS_ONCE(icp->state);
++ old_state = new_state = READ_ONCE(icp->state);
+
+ /* Down_CPPR */
+ new_state.cppr = new_cppr;
+@@ -209,7 +209,7 @@ unsigned long kvmppc_rm_h_xirr(struct kv
+ * pending priority
+ */
+ do {
+- old_state = new_state = ACCESS_ONCE(icp->state);
++ old_state = new_state = READ_ONCE(icp->state);
+
+ xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
+ if (!old_state.xisr)
+@@ -260,7 +260,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcp
+ * ICP state: Check_IPI
+ */
+ do {
+- old_state = new_state = ACCESS_ONCE(icp->state);
++ old_state = new_state = READ_ONCE(icp->state);
+
+ /* Set_MFRR */
+ new_state.mfrr = mfrr;
+@@ -332,7 +332,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vc
+ icp_rm_clr_vcpu_irq(icp->vcpu);
+
+ do {
+- old_state = new_state = ACCESS_ONCE(icp->state);
++ old_state = new_state = READ_ONCE(icp->state);
+
+ reject = 0;
+ new_state.cppr = cppr;
+--- a/arch/powerpc/kvm/book3s_xics.c
++++ b/arch/powerpc/kvm/book3s_xics.c
+@@ -327,7 +327,7 @@ static bool icp_try_to_deliver(struct kv
+ icp->server_num);
+
+ do {
+- old_state = new_state = ACCESS_ONCE(icp->state);
++ old_state = new_state = READ_ONCE(icp->state);
+
+ *reject = 0;
+
+@@ -512,7 +512,7 @@ static void icp_down_cppr(struct kvmppc_
+ * in virtual mode.
+ */
+ do {
+- old_state = new_state = ACCESS_ONCE(icp->state);
++ old_state = new_state = READ_ONCE(icp->state);
+
+ /* Down_CPPR */
+ new_state.cppr = new_cppr;
+@@ -567,7 +567,7 @@ static noinline unsigned long kvmppc_h_x
+ * pending priority
+ */
+ do {
+- old_state = new_state = ACCESS_ONCE(icp->state);
++ old_state = new_state = READ_ONCE(icp->state);
+
+ xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
+ if (!old_state.xisr)
+@@ -619,7 +619,7 @@ static noinline int kvmppc_h_ipi(struct
+ * ICP state: Check_IPI
+ */
+ do {
+- old_state = new_state = ACCESS_ONCE(icp->state);
++ old_state = new_state = READ_ONCE(icp->state);
+
+ /* Set_MFRR */
+ new_state.mfrr = mfrr;
+@@ -663,7 +663,7 @@ static int kvmppc_h_ipoll(struct kvm_vcp
+ if (!icp)
+ return H_PARAMETER;
+ }
+- state = ACCESS_ONCE(icp->state);
++ state = READ_ONCE(icp->state);
+ kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
+ kvmppc_set_gpr(vcpu, 5, state.mfrr);
+ return H_SUCCESS;
+@@ -705,7 +705,7 @@ static noinline void kvmppc_h_cppr(struc
+ BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+
+ do {
+- old_state = new_state = ACCESS_ONCE(icp->state);
++ old_state = new_state = READ_ONCE(icp->state);
+
+ reject = 0;
+ new_state.cppr = cppr;
+@@ -869,7 +869,7 @@ static int xics_debug_show(struct seq_fi
+ if (!icp)
+ continue;
+
+- state.raw = ACCESS_ONCE(icp->state.raw);
++ state.raw = READ_ONCE(icp->state.raw);
+ seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
+ icp->server_num, state.xisr,
+ state.pending_pri, state.cppr, state.mfrr,
+@@ -1066,7 +1066,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu
+ * the ICS states before the ICP states.
+ */
+ do {
+- old_state = ACCESS_ONCE(icp->state);
++ old_state = READ_ONCE(icp->state);
+
+ if (new_state.mfrr <= old_state.mfrr) {
+ resend = false;
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0010-ppc-hugetlbfs-Replace-ACCESS_ONCE-with-READ_ONCE.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0010-ppc-hugetlbfs-Replace-ACCESS_ONCE-with-READ_ONCE.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,37 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Tue, 6 Jan 2015 22:47:41 +0100
+Subject: ppc/hugetlbfs: Replace ACCESS_ONCE with READ_ONCE
+Origin: https://git.kernel.org/linus/da1a288d8562739aa8ba0273d4fb6b73b856c0d3
+
+ACCESS_ONCE does not work reliably on non-scalar types. For
+example gcc 4.6 and 4.7 might remove the volatile tag for such
+accesses during the SRA (scalar replacement of aggregates) step
+(https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145)
+
+Change the ppc/hugetlbfs code to replace ACCESS_ONCE with READ_ONCE.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+---
+ arch/powerpc/mm/hugetlbpage.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -971,7 +971,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *
+ */
+ pdshift = PUD_SHIFT;
+ pudp = pud_offset(&pgd, ea);
+- pud = ACCESS_ONCE(*pudp);
++ pud = READ_ONCE(*pudp);
+
+ if (pud_none(pud))
+ return NULL;
+@@ -983,7 +983,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *
+ else {
+ pdshift = PMD_SHIFT;
+ pmdp = pmd_offset(&pud, ea);
+- pmd = ACCESS_ONCE(*pmdp);
++ pmd = READ_ONCE(*pmdp);
+ /*
+ * A hugepage collapse is captured by pmd_none, because
+ * it mark the pmd none and do a hpte invalidate.
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0011-kernel-tighten-rules-for-ACCESS-ONCE.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0011-kernel-tighten-rules-for-ACCESS-ONCE.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,47 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Tue, 25 Nov 2014 10:16:39 +0100
+Subject: kernel: tighten rules for ACCESS ONCE
+Origin: https://git.kernel.org/linus/927609d622a3773995f84bc03b4564f873cf0e22
+
+Now that all non-scalar users of ACCESS_ONCE have been converted
+to READ_ONCE or ASSIGN once, lets tighten ACCESS_ONCE to only
+work on scalar types.
+This variant was proposed by Alexei Starovoitov.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+Reviewed-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+---
+ include/linux/compiler.h | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -447,12 +447,23 @@ static __always_inline void __assign_onc
+ * to make the compiler aware of ordering is to put the two invocations of
+ * ACCESS_ONCE() in different C statements.
+ *
+- * This macro does absolutely -nothing- to prevent the CPU from reordering,
+- * merging, or refetching absolutely anything at any time. Its main intended
+- * use is to mediate communication between process-level code and irq/NMI
+- * handlers, all running on the same CPU.
++ * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
++ * on a union member will work as long as the size of the member matches the
++ * size of the union and the size is smaller than word size.
++ *
++ * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
++ * between process-level code and irq/NMI handlers, all running on the same CPU,
++ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
++ * mutilate accesses that either do not require ordering or that interact
++ * with an explicit memory barrier or atomic instruction that provides the
++ * required ordering.
++ *
++ * If possible use READ_ONCE/ASSIGN_ONCE instead.
+ */
+-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
++#define __ACCESS_ONCE(x) ({ \
++ __maybe_unused typeof(x) __var = 0; \
++ (volatile typeof(x) *)&(x); })
++#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
+
+ /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
+ #ifdef CONFIG_KPROBES
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0012-next-sh-Fix-compile-error.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0012-next-sh-Fix-compile-error.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,35 @@
+From: Guenter Roeck <linux at roeck-us.net>
+Date: Wed, 7 Jan 2015 12:32:28 -0800
+Subject: next: sh: Fix compile error
+Origin: https://git.kernel.org/linus/378af02b1aecabb3756e19c0cbb8cdd9c3b9637f
+
+Commit 927609d622a3 ("kernel: tighten rules for ACCESS ONCE") results in a
+compile failure for sh builds with CONFIG_X2TLB enabled.
+
+arch/sh/mm/gup.c: In function 'gup_get_pte':
+arch/sh/mm/gup.c:20:2: error: invalid initializer
+make[1]: *** [arch/sh/mm/gup.o] Error 1
+
+Replace ACCESS_ONCE with READ_ONCE to fix the problem.
+
+Fixes: 927609d622a3 ("kernel: tighten rules for ACCESS ONCE")
+Cc: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+Cc: Christian Borntraeger <borntraeger at de.ibm.com>
+Signed-off-by: Guenter Roeck <linux at roeck-us.net>
+Reviewed-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+---
+ arch/sh/mm/gup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/sh/mm/gup.c
++++ b/arch/sh/mm/gup.c
+@@ -17,7 +17,7 @@
+ static inline pte_t gup_get_pte(pte_t *ptep)
+ {
+ #ifndef CONFIG_X2TLB
+- return ACCESS_ONCE(*ptep);
++ return READ_ONCE(*ptep);
+ #else
+ /*
+ * With get_user_pages_fast, we walk down the pagetables without
Added: dists/sid/linux/debian/patches/bugfix/all/access_once/0013-kernel-Change-ASSIGN_ONCE-val-x-to-WRITE_ONCE-x-val.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/all/access_once/0013-kernel-Change-ASSIGN_ONCE-val-x-to-WRITE_ONCE-x-val.patch Thu Apr 23 15:41:16 2015 (r22523)
@@ -0,0 +1,58 @@
+From: Christian Borntraeger <borntraeger at de.ibm.com>
+Date: Tue, 13 Jan 2015 10:46:42 +0100
+Subject: kernel: Change ASSIGN_ONCE(val, x) to WRITE_ONCE(x, val)
+Origin: https://git.kernel.org/linus/43239cbe79fc369f5d2160bd7f69e28b5c50a58c
+
+Feedback has shown that WRITE_ONCE(x, val) is easier to use than
+ASSIGN_ONCE(val,x).
+There are no in-tree users yet, so lets change it for 3.19.
+
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+Acked-by: Peter Zijlstra <peterz at infradead.org>
+Acked-by: Davidlohr Bueso <dave at stgolabs.net>
+Acked-by: Paul E. McKenney <paulmck at linux.vnet.ibm.com>
+---
+ include/linux/compiler.h | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -215,7 +215,7 @@ static __always_inline void __read_once_
+ }
+ }
+
+-static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
++static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+ {
+ switch (size) {
+ case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
+@@ -235,15 +235,15 @@ static __always_inline void __assign_onc
+ /*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+- * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
++ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
+ * compiler is aware of some particular ordering. One way to make the
+ * compiler aware of ordering is to put the two invocations of READ_ONCE,
+- * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
++ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ *
+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
+ * data types like structs or unions. If the size of the accessed data
+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
+- * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
++ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
+ * compile-time warning.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+@@ -257,8 +257,8 @@ static __always_inline void __assign_onc
+ #define READ_ONCE(x) \
+ ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
+
+-#define ASSIGN_ONCE(val, x) \
+- ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
++#define WRITE_ONCE(x, val) \
++ ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
+
+ #endif /* __KERNEL__ */
+
Modified: dists/sid/linux/debian/patches/series
==============================================================================
--- dists/sid/linux/debian/patches/series Wed Apr 22 18:17:09 2015 (r22522)
+++ dists/sid/linux/debian/patches/series Thu Apr 23 15:41:16 2015 (r22523)
@@ -582,4 +582,21 @@
bugfix/x86/crypto-aesni-fix-memory-usage-in-GCM-decryption.patch
bugfix/all/tcp-fix-crash-in-tcp-fast-open.patch
+
+# Introduce READ_ONCE and WRITE_ONCE; replace misuse of ACCESS_ONCE.
+# Needed for backports of other fixes.
+bugfix/all/access_once/0001-kernel-Provide-READ_ONCE-and-ASSIGN_ONCE.patch
+bugfix/all/access_once/0002-mm-replace-ACCESS_ONCE-with-READ_ONCE-or-barriers.patch
+bugfix/all/access_once/0003-x86-spinlock-Replace-ACCESS_ONCE-with-READ_ONCE.patch
+bugfix/all/access_once/0004-x86-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch
+bugfix/all/access_once/0005-mips-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch
+bugfix/all/access_once/0006-arm64-spinlock-Replace-ACCESS_ONCE-READ_ONCE.patch
+bugfix/all/access_once/0007-arm-spinlock-Replace-ACCESS_ONCE-with-READ_ONCE.patch
+bugfix/all/access_once/0008-powerpc-gup-Replace-ACCESS_ONCE-with-READ_ONCE.patch
+bugfix/all/access_once/0009-ppc-kvm-Replace-ACCESS_ONCE-with-READ_ONCE.patch
+bugfix/all/access_once/0010-ppc-hugetlbfs-Replace-ACCESS_ONCE-with-READ_ONCE.patch
+bugfix/all/access_once/0011-kernel-tighten-rules-for-ACCESS-ONCE.patch
+bugfix/all/access_once/0012-next-sh-Fix-compile-error.patch
+bugfix/all/access_once/0013-kernel-Change-ASSIGN_ONCE-val-x-to-WRITE_ONCE-x-val.patch
+
bugfix/all/fs-take-i_mutex-during-prepare_binprm-for-set-ug-id-.patch
More information about the Kernel-svn-changes
mailing list