[kernel] r14064 - in dists/sid/linux-2.6/debian: . patches/bugfix/all patches/series

Maximilian Attems maks at alioth.debian.org
Fri Jul 31 13:19:43 UTC 2009


Author: maks
Date: Fri Jul 31 13:19:41 2009
New Revision: 14064

Log:
revert big ABI breakers

if we bump abi we can still make them apply,
but that doesn't look like 2.6.30 right now, so..

Added:
   dists/sid/linux-2.6/debian/patches/bugfix/all/block-fix-sg-sg_dxfer_to_from_dev-regression.patch
   dists/sid/linux-2.6/debian/patches/bugfix/all/sched_rt-fix-overload-bug-on-rt-group-scheduling.patch
Modified:
   dists/sid/linux-2.6/debian/changelog
   dists/sid/linux-2.6/debian/patches/series/5

Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog	Fri Jul 31 13:19:34 2009	(r14063)
+++ dists/sid/linux-2.6/debian/changelog	Fri Jul 31 13:19:41 2009	(r14064)
@@ -7,6 +7,9 @@
     - ecryptfs: check tag 11 literal data buffer size (CVE-2009-2406)
     - ecryptfs: check tag 3 package encrypted size (CVE-2009-2407)
   * Ignore nf_conntrack ABI change.
+  * Revert to keep ABI:
+    - block: fix sg SG_DXFER_TO_FROM_DEV regression.
+    - sched_rt: Fix overload bug on rt group scheduling.
 
  -- dann frazier <dannf at debian.org>  Thu, 30 Jul 2009 11:10:47 -0600
 

Added: dists/sid/linux-2.6/debian/patches/bugfix/all/block-fix-sg-sg_dxfer_to_from_dev-regression.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/all/block-fix-sg-sg_dxfer_to_from_dev-regression.patch	Fri Jul 31 13:19:41 2009	(r14064)
@@ -0,0 +1,143 @@
+From ecb554a846f8e9d2a58f6d6c118168a63ac065aa Mon Sep 17 00:00:00 2001
+From: FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
+Date: Thu, 9 Jul 2009 14:46:53 +0200
+Subject: block: fix sg SG_DXFER_TO_FROM_DEV regression
+
+From: FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
+
+commit ecb554a846f8e9d2a58f6d6c118168a63ac065aa upstream.
+
+I overlooked SG_DXFER_TO_FROM_DEV support when I converted sg to use
+the block layer mapping API (2.6.28).
+
+Douglas Gilbert explained SG_DXFER_TO_FROM_DEV:
+
+http://www.spinics.net/lists/linux-scsi/msg37135.html
+
+=
+The semantics of SG_DXFER_TO_FROM_DEV were:
+   - copy user space buffer to kernel (LLD) buffer
+   - do SCSI command which is assumed to be of the DATA_IN
+     (data from device) variety. This would overwrite
+     some or all of the kernel buffer
+   - copy kernel (LLD) buffer back to the user space.
+
+The idea was to detect short reads by filling the original
+user space buffer with some marker bytes ("0xec" it would
+seem in this report). The "resid" value is a better way
+of detecting short reads but that was only added this century
+and requires co-operation from the LLD.
+=
+
+This patch changes the block layer mapping API to support this
+semantics. This simply adds another field to struct rq_map_data and
+enables __bio_copy_iov() to copy data from user space even with READ
+requests.
+
+It's better to add the flags field and kills null_mapped and the new
+from_user fields in struct rq_map_data but that approach makes it
+difficult to send this patch to stable trees because st and osst
+drivers use struct rq_map_data (they were converted to use the block
+layer in 2.6.29 and 2.6.30). Well, I should clean up the block layer
+mapping API.
+
+zhou sf reported this regiression and tested this patch:
+
+http://www.spinics.net/lists/linux-scsi/msg37128.html
+http://www.spinics.net/lists/linux-scsi/msg37168.html
+
+Reported-by: zhou sf <sxzzsf at gmail.com>
+Tested-by: zhou sf <sxzzsf at gmail.com>
+Signed-off-by: FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
+Signed-off-by: Jens Axboe <jens.axboe at oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+
+---
+ drivers/scsi/sg.c      |    4 ++++
+ fs/bio.c               |   22 ++++++++++++----------
+ include/linux/blkdev.h |    1 +
+ 3 files changed, 17 insertions(+), 10 deletions(-)
+
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1656,6 +1656,10 @@ static int sg_start_req(Sg_request *srp,
+ 		md->nr_entries = req_schp->k_use_sg;
+ 		md->offset = 0;
+ 		md->null_mapped = hp->dxferp ? 0 : 1;
++		if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
++			md->from_user = 1;
++		else
++			md->from_user = 0;
+ 	}
+ 
+ 	if (iov_count) {
+--- a/fs/bio.c
++++ b/fs/bio.c
+@@ -706,14 +706,13 @@ static struct bio_map_data *bio_alloc_ma
+ }
+ 
+ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
+-			  struct sg_iovec *iov, int iov_count, int uncopy,
+-			  int do_free_page)
++			  struct sg_iovec *iov, int iov_count,
++			  int to_user, int from_user, int do_free_page)
+ {
+ 	int ret = 0, i;
+ 	struct bio_vec *bvec;
+ 	int iov_idx = 0;
+ 	unsigned int iov_off = 0;
+-	int read = bio_data_dir(bio) == READ;
+ 
+ 	__bio_for_each_segment(bvec, bio, i, 0) {
+ 		char *bv_addr = page_address(bvec->bv_page);
+@@ -728,13 +727,14 @@ static int __bio_copy_iov(struct bio *bi
+ 			iov_addr = iov[iov_idx].iov_base + iov_off;
+ 
+ 			if (!ret) {
+-				if (!read && !uncopy)
+-					ret = copy_from_user(bv_addr, iov_addr,
+-							     bytes);
+-				if (read && uncopy)
++				if (to_user)
+ 					ret = copy_to_user(iov_addr, bv_addr,
+ 							   bytes);
+ 
++				if (from_user)
++					ret = copy_from_user(bv_addr, iov_addr,
++							     bytes);
++
+ 				if (ret)
+ 					ret = -EFAULT;
+ 			}
+@@ -771,7 +771,8 @@ int bio_uncopy_user(struct bio *bio)
+ 
+ 	if (!bio_flagged(bio, BIO_NULL_MAPPED))
+ 		ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+-				     bmd->nr_sgvecs, 1, bmd->is_our_pages);
++				     bmd->nr_sgvecs, bio_data_dir(bio) == READ,
++				     0, bmd->is_our_pages);
+ 	bio_free_map_data(bmd);
+ 	bio_put(bio);
+ 	return ret;
+@@ -876,8 +877,9 @@ struct bio *bio_copy_user_iov(struct req
+ 	/*
+ 	 * success
+ 	 */
+-	if (!write_to_vm && (!map_data || !map_data->null_mapped)) {
+-		ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
++	if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
++	    (map_data && map_data->from_user)) {
++		ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
+ 		if (ret)
+ 			goto cleanup;
+ 	}
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -723,6 +723,7 @@ struct rq_map_data {
+ 	int nr_entries;
+ 	unsigned long offset;
+ 	int null_mapped;
++	int from_user;
+ };
+ 
+ struct req_iterator {

Added: dists/sid/linux-2.6/debian/patches/bugfix/all/sched_rt-fix-overload-bug-on-rt-group-scheduling.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/bugfix/all/sched_rt-fix-overload-bug-on-rt-group-scheduling.patch	Fri Jul 31 13:19:41 2009	(r14064)
@@ -0,0 +1,96 @@
+From a1ba4d8ba9f06a397e97cbd67a93ee306860b40a Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz at infradead.org>
+Date: Wed, 1 Apr 2009 18:40:15 +0200
+Subject: sched_rt: Fix overload bug on rt group scheduling
+
+From: Peter Zijlstra <peterz at infradead.org>
+
+commit a1ba4d8ba9f06a397e97cbd67a93ee306860b40a upstream.
+
+Fixes an easily triggerable BUG() when setting process affinities.
+
+Make sure to count the number of migratable tasks in the same place:
+the root rt_rq. Otherwise the number doesn't make sense and we'll hit
+the BUG in set_cpus_allowed_rt().
+
+Also, make sure we only count tasks, not groups (this is probably
+already taken care of by the fact that rt_se->nr_cpus_allowed will be 0
+for groups, but be more explicit)
+
+Tested-by: Thomas Gleixner <tglx at linutronix.de>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
+Acked-by: Gregory Haskins <ghaskins at novell.com>
+LKML-Reference: <1247067476.9777.57.camel at twins>
+Signed-off-by: Ingo Molnar <mingo at elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+
+---
+ kernel/sched.c    |    1 +
+ kernel/sched_rt.c |   18 +++++++++++++++++-
+ 2 files changed, 18 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -497,6 +497,7 @@ struct rt_rq {
+ #endif
+ #ifdef CONFIG_SMP
+ 	unsigned long rt_nr_migratory;
++	unsigned long rt_nr_total;
+ 	int overloaded;
+ 	struct plist_head pushable_tasks;
+ #endif
+--- a/kernel/sched_rt.c
++++ b/kernel/sched_rt.c
+@@ -10,6 +10,8 @@ static inline struct task_struct *rt_tas
+ 
+ #ifdef CONFIG_RT_GROUP_SCHED
+ 
++#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
++
+ static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+ {
+ 	return rt_rq->rq;
+@@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(
+ 
+ #else /* CONFIG_RT_GROUP_SCHED */
+ 
++#define rt_entity_is_task(rt_se) (1)
++
+ static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+ {
+ 	return container_of(rt_rq, struct rq, rt);
+@@ -73,7 +77,7 @@ static inline void rt_clear_overload(str
+ 
+ static void update_rt_migration(struct rt_rq *rt_rq)
+ {
+-	if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
++	if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
+ 		if (!rt_rq->overloaded) {
+ 			rt_set_overload(rq_of_rt_rq(rt_rq));
+ 			rt_rq->overloaded = 1;
+@@ -86,6 +90,12 @@ static void update_rt_migration(struct r
+ 
+ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
++	if (!rt_entity_is_task(rt_se))
++		return;
++
++	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
++
++	rt_rq->rt_nr_total++;
+ 	if (rt_se->nr_cpus_allowed > 1)
+ 		rt_rq->rt_nr_migratory++;
+ 
+@@ -94,6 +104,12 @@ static void inc_rt_migration(struct sche
+ 
+ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
+ {
++	if (!rt_entity_is_task(rt_se))
++		return;
++
++	rt_rq = &rq_of_rt_rq(rt_rq)->rt;
++
++	rt_rq->rt_nr_total--;
+ 	if (rt_se->nr_cpus_allowed > 1)
+ 		rt_rq->rt_nr_migratory--;
+ 

Modified: dists/sid/linux-2.6/debian/patches/series/5
==============================================================================
--- dists/sid/linux-2.6/debian/patches/series/5	Fri Jul 31 13:19:34 2009	(r14063)
+++ dists/sid/linux-2.6/debian/patches/series/5	Fri Jul 31 13:19:41 2009	(r14064)
@@ -1,3 +1,5 @@
 - bugfix/parisc/ensure-broadcast-tlb-purge-runs-single-threaded.patch
 - bugfix/parisc/fix-ldcw-inline-assembler.patch
 + bugfix/all/stable/2.6.30.4.patch
+- bugfix/all/block-fix-sg-sg_dxfer_to_from_dev-regression.patch
+- bugfix/all/sched_rt-fix-overload-bug-on-rt-group-scheduling.patch



More information about the Kernel-svn-changes mailing list