[linux] 02/03: percpu: Fix synchronization with chunk destruction (CVE-2016-4794)

debian-kernel at lists.debian.org debian-kernel at lists.debian.org
Fri Jun 24 22:19:52 UTC 2016


This is an automated email from the git hooks/post-receive script.

benh pushed a commit to branch sid
in repository linux.

commit f5f99415a20f4ed243d406fb3533fbe5cc01f65f
Author: Ben Hutchings <ben at decadent.org.uk>
Date:   Sat Jun 25 00:11:57 2016 +0200

    percpu: Fix synchronization with chunk destruction (CVE-2016-4794)
---
 debian/changelog                                   |   4 +
 ...synchronization-between-chunk-map_extend_.patch | 153 +++++++++++++++++++++
 ...synchronization-between-synchronous-map-e.patch | 104 ++++++++++++++
 debian/patches/series                              |   2 +
 4 files changed, 263 insertions(+)

diff --git a/debian/changelog b/debian/changelog
index 5a8271d..4866ab8 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -5,6 +5,10 @@ linux (4.6.2-2) UNRELEASED; urgency=medium
     - mm/memcontrol: mem_cgroup_migrate() - replace another local_irq_disable()
       w. local_lock_irq()
   * KEYS: potential uninitialized variable (CVE-2016-4470)
+  * percpu: fix synchronization between chunk->map_extend_work and chunk
+    destruction (CVE-2016-4794)
+  * percpu: fix synchronization between synchronous map extension and
+    chunk destruction (CVE-2016-4794)
 
  -- Ben Hutchings <ben at decadent.org.uk>  Thu, 16 Jun 2016 12:37:27 +0100
 
diff --git a/debian/patches/bugfix/all/percpu-fix-synchronization-between-chunk-map_extend_.patch b/debian/patches/bugfix/all/percpu-fix-synchronization-between-chunk-map_extend_.patch
new file mode 100644
index 0000000..3410168
--- /dev/null
+++ b/debian/patches/bugfix/all/percpu-fix-synchronization-between-chunk-map_extend_.patch
@@ -0,0 +1,153 @@
+From: Tejun Heo <tj at kernel.org>
+Date: Wed, 25 May 2016 11:48:25 -0400
+Subject: percpu: fix synchronization between chunk->map_extend_work and chunk
+ destruction
+Origin: https://git.kernel.org/linus/4f996e234dad488e5d9ba0858bc1bae12eff82c3
+
+Atomic allocations can trigger async map extensions which is serviced
+by chunk->map_extend_work.  pcpu_balance_work which is responsible for
+destroying idle chunks wasn't synchronizing properly against
+chunk->map_extend_work and may end up freeing the chunk while the work
+item is still in flight.
+
+This patch fixes the bug by rolling async map extension operations
+into pcpu_balance_work.
+
+Signed-off-by: Tejun Heo <tj at kernel.org>
+Reported-and-tested-by: Alexei Starovoitov <alexei.starovoitov at gmail.com>
+Reported-by: Vlastimil Babka <vbabka at suse.cz>
+Reported-by: Sasha Levin <sasha.levin at oracle.com>
+Cc: stable at vger.kernel.org # v3.18+
+Fixes: 9c824b6a172c ("percpu: make sure chunk->map array has available space")
+---
+ mm/percpu.c | 57 ++++++++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 36 insertions(+), 21 deletions(-)
+
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 0c59684f1ff2..b1d2a3844792 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -112,7 +112,7 @@ struct pcpu_chunk {
+ 	int			map_used;	/* # of map entries used before the sentry */
+ 	int			map_alloc;	/* # of map entries allocated */
+ 	int			*map;		/* allocation map */
+-	struct work_struct	map_extend_work;/* async ->map[] extension */
++	struct list_head	map_extend_list;/* on pcpu_map_extend_chunks */
+ 
+ 	void			*data;		/* chunk data */
+ 	int			first_free;	/* no free below this */
+@@ -166,6 +166,9 @@ static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop */
+ 
+ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+ 
++/* chunks which need their map areas extended, protected by pcpu_lock */
++static LIST_HEAD(pcpu_map_extend_chunks);
++
+ /*
+  * The number of empty populated pages, protected by pcpu_lock.  The
+  * reserved chunk doesn't contribute to the count.
+@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
+ {
+ 	int margin, new_alloc;
+ 
++	lockdep_assert_held(&pcpu_lock);
++
+ 	if (is_atomic) {
+ 		margin = 3;
+ 
+ 		if (chunk->map_alloc <
+-		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
+-		    pcpu_async_enabled)
+-			schedule_work(&chunk->map_extend_work);
++		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
++			if (list_empty(&chunk->map_extend_list)) {
++				list_add_tail(&chunk->map_extend_list,
++					      &pcpu_map_extend_chunks);
++				pcpu_schedule_balance_work();
++			}
++		}
+ 	} else {
+ 		margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
+ 	}
+@@ -467,20 +476,6 @@ out_unlock:
+ 	return 0;
+ }
+ 
+-static void pcpu_map_extend_workfn(struct work_struct *work)
+-{
+-	struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
+-						map_extend_work);
+-	int new_alloc;
+-
+-	spin_lock_irq(&pcpu_lock);
+-	new_alloc = pcpu_need_to_extend(chunk, false);
+-	spin_unlock_irq(&pcpu_lock);
+-
+-	if (new_alloc)
+-		pcpu_extend_area_map(chunk, new_alloc);
+-}
+-
+ /**
+  * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
+  * @chunk: chunk the candidate area belongs to
+@@ -740,7 +735,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
+ 	chunk->map_used = 1;
+ 
+ 	INIT_LIST_HEAD(&chunk->list);
+-	INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
++	INIT_LIST_HEAD(&chunk->map_extend_list);
+ 	chunk->free_size = pcpu_unit_size;
+ 	chunk->contig_hint = pcpu_unit_size;
+ 
+@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
+ 		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
+ 			continue;
+ 
++		list_del_init(&chunk->map_extend_list);
+ 		list_move(&chunk->list, &to_free);
+ 	}
+ 
+@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
+ 		pcpu_destroy_chunk(chunk);
+ 	}
+ 
++	/* service chunks which requested async area map extension */
++	do {
++		int new_alloc = 0;
++
++		spin_lock_irq(&pcpu_lock);
++
++		chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
++					struct pcpu_chunk, map_extend_list);
++		if (chunk) {
++			list_del_init(&chunk->map_extend_list);
++			new_alloc = pcpu_need_to_extend(chunk, false);
++		}
++
++		spin_unlock_irq(&pcpu_lock);
++
++		if (new_alloc)
++			pcpu_extend_area_map(chunk, new_alloc);
++	} while (chunk);
++
+ 	/*
+ 	 * Ensure there are certain number of free populated pages for
+ 	 * atomic allocs.  Fill up from the most packed so that atomic
+@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ 	 */
+ 	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+ 	INIT_LIST_HEAD(&schunk->list);
+-	INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
++	INIT_LIST_HEAD(&schunk->map_extend_list);
+ 	schunk->base_addr = base_addr;
+ 	schunk->map = smap;
+ 	schunk->map_alloc = ARRAY_SIZE(smap);
+@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ 	if (dyn_size) {
+ 		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+ 		INIT_LIST_HEAD(&dchunk->list);
+-		INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
++		INIT_LIST_HEAD(&dchunk->map_extend_list);
+ 		dchunk->base_addr = base_addr;
+ 		dchunk->map = dmap;
+ 		dchunk->map_alloc = ARRAY_SIZE(dmap);
diff --git a/debian/patches/bugfix/all/percpu-fix-synchronization-between-synchronous-map-e.patch b/debian/patches/bugfix/all/percpu-fix-synchronization-between-synchronous-map-e.patch
new file mode 100644
index 0000000..e8a70c5
--- /dev/null
+++ b/debian/patches/bugfix/all/percpu-fix-synchronization-between-synchronous-map-e.patch
@@ -0,0 +1,104 @@
+From: Tejun Heo <tj at kernel.org>
+Date: Wed, 25 May 2016 11:48:25 -0400
+Subject: percpu: fix synchronization between synchronous map extension and
+ chunk destruction
+Origin: https://git.kernel.org/linus/6710e594f71ccaad8101bc64321152af7cd9ea28
+
+For non-atomic allocations, pcpu_alloc() can try to extend the area
+map synchronously after dropping pcpu_lock; however, the extension
+wasn't synchronized against chunk destruction and the chunk might get
+freed while extension is in progress.
+
+This patch fixes the bug by putting most of non-atomic allocations
+under pcpu_alloc_mutex to synchronize against pcpu_balance_work which
+is responsible for async chunk management including destruction.
+
+Signed-off-by: Tejun Heo <tj at kernel.org>
+Reported-and-tested-by: Alexei Starovoitov <alexei.starovoitov at gmail.com>
+Reported-by: Vlastimil Babka <vbabka at suse.cz>
+Reported-by: Sasha Levin <sasha.levin at oracle.com>
+Cc: stable at vger.kernel.org # v3.18+
+Fixes: 1a4d76076cda ("percpu: implement asynchronous chunk population")
+---
+ mm/percpu.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/mm/percpu.c b/mm/percpu.c
+index b1d2a3844792..9903830aaebb 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -162,7 +162,7 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
+ static int pcpu_reserved_chunk_limit;
+ 
+ static DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
+-static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop */
++static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
+ 
+ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+ 
+@@ -444,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
+ 	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
+ 	unsigned long flags;
+ 
++	lockdep_assert_held(&pcpu_alloc_mutex);
++
+ 	new = pcpu_mem_zalloc(new_size);
+ 	if (!new)
+ 		return -ENOMEM;
+@@ -890,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
+ 		return NULL;
+ 	}
+ 
++	if (!is_atomic)
++		mutex_lock(&pcpu_alloc_mutex);
++
+ 	spin_lock_irqsave(&pcpu_lock, flags);
+ 
+ 	/* serve reserved allocations from the reserved chunk if available */
+@@ -962,12 +967,9 @@ restart:
+ 	if (is_atomic)
+ 		goto fail;
+ 
+-	mutex_lock(&pcpu_alloc_mutex);
+-
+ 	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
+ 		chunk = pcpu_create_chunk();
+ 		if (!chunk) {
+-			mutex_unlock(&pcpu_alloc_mutex);
+ 			err = "failed to allocate new chunk";
+ 			goto fail;
+ 		}
+@@ -978,7 +980,6 @@ restart:
+ 		spin_lock_irqsave(&pcpu_lock, flags);
+ 	}
+ 
+-	mutex_unlock(&pcpu_alloc_mutex);
+ 	goto restart;
+ 
+ area_found:
+@@ -988,8 +989,6 @@ area_found:
+ 	if (!is_atomic) {
+ 		int page_start, page_end, rs, re;
+ 
+-		mutex_lock(&pcpu_alloc_mutex);
+-
+ 		page_start = PFN_DOWN(off);
+ 		page_end = PFN_UP(off + size);
+ 
+@@ -1000,7 +999,6 @@ area_found:
+ 
+ 			spin_lock_irqsave(&pcpu_lock, flags);
+ 			if (ret) {
+-				mutex_unlock(&pcpu_alloc_mutex);
+ 				pcpu_free_area(chunk, off, &occ_pages);
+ 				err = "failed to populate";
+ 				goto fail_unlock;
+@@ -1040,6 +1038,8 @@ fail:
+ 		/* see the flag handling in pcpu_blance_workfn() */
+ 		pcpu_atomic_alloc_failed = true;
+ 		pcpu_schedule_balance_work();
++	} else {
++		mutex_unlock(&pcpu_alloc_mutex);
+ 	}
+ 	return NULL;
+ }
diff --git a/debian/patches/series b/debian/patches/series
index c3affbc..3d57f4f 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -113,6 +113,8 @@ bugfix/all/sched-panic-on-corrupted-stack-end.patch
 bugfix/all/tipc-fix-an-infoleak-in-tipc_nl_compat_link_dump.patch
 bugfix/all/rds-fix-an-infoleak-in-rds_inc_info_copy.patch
 bugfix/all/keys-potential-uninitialized-variable.patch
+bugfix/all/percpu-fix-synchronization-between-chunk-map_extend_.patch
+bugfix/all/percpu-fix-synchronization-between-synchronous-map-e.patch
 
 # ABI maintenance
 debian/mips-siginfo-fix-abi-change-in-4.6.2.patch

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/kernel/linux.git



More information about the Kernel-svn-changes mailing list