[kernel] r16345 - in dists/sid/linux-2.6/debian: . patches/features/all/openvz patches/series

Maximilian Attems maks at alioth.debian.org
Thu Sep 23 19:53:07 UTC 2010


Author: maks
Date: Thu Sep 23 19:53:01 2010
New Revision: 16345

Log:
update openvz patch

they merged 2.6.32.22 in.
one less conflict in tun.c thanks to stable patch.
not much other activity.

Modified:
   dists/sid/linux-2.6/debian/changelog
   dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch
   dists/sid/linux-2.6/debian/patches/series/24-extra

Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog	Thu Sep 23 18:47:45 2010	(r16344)
+++ dists/sid/linux-2.6/debian/changelog	Thu Sep 23 19:53:01 2010	(r16345)
@@ -6,7 +6,7 @@
   * snd-hda-codec-via: Fix syntax error when CONFIG_SND_HDA_POWER_SAVE is
     disabled (Closes: #597043)
   * Add stable 2.6.32.22:
-    - [openvz,vserver] Revert sched changes since they conflict with
+    - [vserver] Revert sched changes since they conflict with
       these featuresets
   * Recommend use of 'make deb-pkg' to build custom kernel packages
   * [x86] Revert "i915: Blacklist i830, i845, i855 for KMS". The current X
@@ -20,6 +20,9 @@
   [ Ian Campbell ]
   * [x86/xen] Disable netfront's smartpoll mode by default. (Closes: #596635)
 
+  [ maximilian attems ]
+  * [openvz] Update upstream patch to 2.6.32-dyomin.
+
  -- Ben Hutchings <ben at decadent.org.uk>  Sat, 18 Sep 2010 17:29:34 +0100
 
 linux-2.6 (2.6.32-23) unstable; urgency=low

Modified: dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch	Thu Sep 23 18:47:45 2010	(r16344)
+++ dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch	Thu Sep 23 19:53:01 2010	(r16345)
@@ -1,3 +1,37 @@
+commit 763921f076cdfd79359ffdc279edf8ee45d31691
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Tue Sep 21 18:24:37 2010 +0400
+
+    OpenVZ kernel 2.6.32-dyomin released
+    
+    Named after Lev Stepanovich Dyomin - a soviet cosmonaut
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 47ac42d7e3717b4f795ae01fd8a3267ccd744816
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Tue Sep 21 18:37:02 2010 +0400
+
+    ubc: Remove cpuset_(un)lock calls
+    
+    These were removed with .22 stable update
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit a2c26f9437204f91c587d272d0e8c4548e2023b3
+Merge: d38b56f eaa1ace
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Tue Sep 21 18:22:39 2010 +0400
+
+    Merged linux-2.6.32.22
+    
+    Conflicts:
+    
+    	Makefile
+    	drivers/net/tun.c
+    	kernel/cpu.c
+    	kernel/sched.c
+
 commit d38b56fd0dcacadcaeaa2e6b66260028cde13931
 Author: Pavel Emelyanov <xemul at openvz.org>
 Date:   Fri Sep 3 17:34:05 2010 +0400
@@ -6453,14 +6487,14 @@
 +library.  If this is what you want to do, use the GNU Library General
 +Public License instead of this License.
 diff --git a/Makefile b/Makefile
-index 3e7196f..0dae08c 100644
+index 1786938..c11ec6e 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -2,6 +2,7 @@ VERSION = 2
  PATCHLEVEL = 6
  SUBLEVEL = 32
  EXTRAVERSION =
-+VZVERSION = dobrovolskiy
++VZVERSION = dyomin
  NAME = Man-Eating Seals of Antiquity
  
  # *DOCUMENTATION*
@@ -6503,10 +6537,10 @@
 +
 +source "kernel/bc/Kconfig"
 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 5294d84..a920d42 100644
+index 4edd8eb..94568d9 100644
 --- a/arch/x86/ia32/ia32entry.S
 +++ b/arch/x86/ia32/ia32entry.S
-@@ -617,7 +617,7 @@ ia32_sys_call_table:
+@@ -623,7 +623,7 @@ ia32_sys_call_table:
  	.quad stub32_iopl		/* 110 */
  	.quad sys_vhangup
  	.quad quiet_ni_syscall	/* old "idle" system call */
@@ -6515,7 +6549,7 @@
  	.quad compat_sys_wait4
  	.quad sys_swapoff		/* 115 */
  	.quad compat_sys_sysinfo
-@@ -670,7 +670,7 @@ ia32_sys_call_table:
+@@ -676,7 +676,7 @@ ia32_sys_call_table:
  	.quad sys_mremap
  	.quad sys_setresuid16
  	.quad sys_getresuid16	/* 165 */
@@ -6524,7 +6558,7 @@
  	.quad quiet_ni_syscall	/* query_module */
  	.quad sys_poll
  	.quad compat_sys_nfsservctl
-@@ -841,4 +841,25 @@ ia32_sys_call_table:
+@@ -847,4 +847,25 @@ ia32_sys_call_table:
  	.quad compat_sys_pwritev
  	.quad compat_sys_rt_tgsigqueueinfo	/* 335 */
  	.quad sys_perf_event_open
@@ -6669,7 +6703,7 @@
  #ifdef CONFIG_X86_32
  
 diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
-index c042729..6e7f232 100644
+index 1ca132f..0eb0ce3 100644
 --- a/arch/x86/include/asm/tsc.h
 +++ b/arch/x86/include/asm/tsc.h
 @@ -24,7 +24,7 @@ static inline cycles_t get_cycles(void)
@@ -12270,7 +12304,7 @@
  	if (!sk)
  		goto out;
 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
-index 4fdfa2a..a052759 100644
+index 0f77aca..a052759 100644
 --- a/drivers/net/tun.c
 +++ b/drivers/net/tun.c
 @@ -61,6 +61,7 @@
@@ -12448,7 +12482,22 @@
  		tun_net_init(dev);
  
  		if (strchr(dev->name, '%')) {
-@@ -1316,6 +1341,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
+@@ -1006,10 +1030,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+ 		if (err < 0)
+ 			goto err_free_sk;
+ 
+-		if (!net_eq(dev_net(tun->dev), &init_net) ||
+-		    device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
+-		    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
+-		    device_create_file(&tun->dev->dev, &dev_attr_group))
++		if ((dev_net(tun->dev) == &init_net) &&
++			(device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
++			device_create_file(&tun->dev->dev, &dev_attr_owner) ||
++			device_create_file(&tun->dev->dev, &dev_attr_group)))
+ 			printk(KERN_ERR "Failed to create tun sysfs files\n");
+ 
+ 		sk->sk_destruct = tun_sock_destruct;
+@@ -1317,6 +1341,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
  	tfile->tun = NULL;
  	tfile->net = get_net(current->nsproxy->net_ns);
  	file->private_data = tfile;
@@ -12456,7 +12505,7 @@
  	return 0;
  }
  
-@@ -1457,6 +1483,226 @@ static const struct ethtool_ops tun_ethtool_ops = {
+@@ -1458,6 +1483,226 @@ static const struct ethtool_ops tun_ethtool_ops = {
  	.set_rx_csum	= tun_set_rx_csum
  };
  
@@ -12683,7 +12732,7 @@
  
  static int __init tun_init(void)
  {
-@@ -1476,6 +1722,8 @@ static int __init tun_init(void)
+@@ -1477,6 +1722,8 @@ static int __init tun_init(void)
  		printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
  		goto err_misc;
  	}
@@ -12692,7 +12741,7 @@
  	return  0;
  err_misc:
  	rtnl_link_unregister(&tun_link_ops);
-@@ -1485,6 +1733,7 @@ err_linkops:
+@@ -1486,6 +1733,7 @@ err_linkops:
  
  static void tun_cleanup(void)
  {
@@ -14773,7 +14822,7 @@
  		send_sig(SIGKILL, current, 0);
  		goto out;
 diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
-index c4e8353..8180165 100644
+index 42b60b0..5b4a80b 100644
 --- a/fs/binfmt_misc.c
 +++ b/fs/binfmt_misc.c
 @@ -28,6 +28,7 @@
@@ -18347,7 +18396,7 @@
  	mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
  
 diff --git a/fs/nfs/client.c b/fs/nfs/client.c
-index 127ed5c..95a31c8 100644
+index 19cbbf7..d7ec963 100644
 --- a/fs/nfs/client.c
 +++ b/fs/nfs/client.c
 @@ -125,6 +125,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
@@ -27802,7 +27851,7 @@
  	error = 0;
  	if (sd->s_parent == new_parent_sd)
 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
-index f5ea468..b135ba6 100644
+index 7118a38..00bae21 100644
 --- a/fs/sysfs/file.c
 +++ b/fs/sysfs/file.c
 @@ -536,6 +536,8 @@ int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
@@ -30310,7 +30359,7 @@
 +
 +/* */
 diff --git a/include/linux/compat.h b/include/linux/compat.h
-index af931ee..499d84a 100644
+index cab23f2..9b9fa9c 100644
 --- a/include/linux/compat.h
 +++ b/include/linux/compat.h
 @@ -258,6 +258,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
@@ -34425,7 +34474,7 @@
  static inline void page_dup_rmap(struct page *page)
  {
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index cc24beb..e876c91 100644
+index 957a25f..184afdc 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -94,6 +94,8 @@ struct sched_param {
@@ -34485,7 +34534,7 @@
  
  
  extern void calc_global_load(void);
-@@ -553,6 +580,9 @@ struct thread_group_cputimer {
+@@ -552,6 +579,9 @@ struct thread_group_cputimer {
  	spinlock_t lock;
  };
  
@@ -34495,7 +34544,7 @@
  /*
   * NOTE! "signal_struct" does not have it's own
   * locking, because a shared signal_struct always
-@@ -1286,6 +1316,7 @@ struct task_struct {
+@@ -1289,6 +1319,7 @@ struct task_struct {
  	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
  				 * execve */
  	unsigned in_iowait:1;
@@ -34503,7 +34552,7 @@
  
  
  	/* Revert to default priority/policy when forking */
-@@ -1501,6 +1532,14 @@ struct task_struct {
+@@ -1504,6 +1535,14 @@ struct task_struct {
  	struct rcu_head rcu;
  
  	/*
@@ -34518,7 +34567,7 @@
  	 * cache last used pipe for splice
  	 */
  	struct pipe_inode_info *splice_pipe;
-@@ -1544,6 +1583,19 @@ struct task_struct {
+@@ -1547,6 +1586,19 @@ struct task_struct {
  	unsigned long trace_recursion;
  #endif /* CONFIG_TRACING */
  	unsigned long stack_start;
@@ -34538,7 +34587,7 @@
  };
  
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
-@@ -1730,6 +1782,43 @@ extern cputime_t task_stime(struct task_struct *p);
+@@ -1733,6 +1785,43 @@ extern cputime_t task_stime(struct task_struct *p);
  extern cputime_t task_gtime(struct task_struct *p);
  extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
  
@@ -34582,7 +34631,7 @@
  /*
   * Per process flags
   */
-@@ -1739,6 +1828,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
+@@ -1742,6 +1831,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
  #define PF_EXITING	0x00000004	/* getting shut down */
  #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
  #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
@@ -34590,7 +34639,7 @@
  #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
  #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
  #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
-@@ -1875,6 +1965,21 @@ extern unsigned long long
+@@ -1878,6 +1968,21 @@ extern unsigned long long
  task_sched_runtime(struct task_struct *task);
  extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
  
@@ -34612,7 +34661,7 @@
  /* sched_exec is called by processes performing an exec */
  #ifdef CONFIG_SMP
  extern void sched_exec(void);
-@@ -2154,6 +2259,13 @@ extern int disallow_signal(int);
+@@ -2158,6 +2263,13 @@ extern int disallow_signal(int);
  
  extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
  extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
@@ -34626,7 +34675,7 @@
  struct task_struct *fork_idle(int);
  
  extern void set_task_comm(struct task_struct *tsk, char *from);
-@@ -2171,11 +2283,11 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
+@@ -2175,11 +2287,11 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
  }
  #endif
  
@@ -34641,7 +34690,7 @@
  
  extern bool current_is_single_threaded(void);
  
-@@ -2183,10 +2295,10 @@ extern bool current_is_single_threaded(void);
+@@ -2187,10 +2299,10 @@ extern bool current_is_single_threaded(void);
   * Careful: do_each_thread/while_each_thread is a double loop so
   *          'break' will not work as expected - use goto instead.
   */
@@ -34655,7 +34704,7 @@
  	while ((t = next_thread(t)) != g)
  
  /* de_thread depends on thread_group_leader not being a pid based check */
-@@ -2211,8 +2323,14 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+@@ -2215,8 +2327,14 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
  
  static inline struct task_struct *next_thread(const struct task_struct *p)
  {
@@ -34671,7 +34720,7 @@
  }
  
  static inline int thread_group_empty(struct task_struct *p)
-@@ -2257,6 +2375,98 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
+@@ -2261,6 +2379,98 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
  	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
  }
  
@@ -43328,10 +43377,10 @@
 +}
 diff --git a/kernel/bc/oom_kill.c b/kernel/bc/oom_kill.c
 new file mode 100644
-index 0000000..c79e826
+index 0000000..106e41f
 --- /dev/null
 +++ b/kernel/bc/oom_kill.c
-@@ -0,0 +1,200 @@
+@@ -0,0 +1,195 @@
 +#include <linux/wait.h>
 +#include <linux/sched.h>
 +#include <linux/mm.h>
@@ -43371,7 +43420,6 @@
 +	rcu_read_unlock();
 +}
 +
-+/* Called with cpuset_lock held */
 +int ub_oom_lock(void)
 +{
 +	int timeout;
@@ -43397,11 +43445,9 @@
 +		__set_current_state(TASK_UNINTERRUPTIBLE);
 +		add_wait_queue(&oom_wq, &oom_w);
 +		spin_unlock(&oom_lock);
-+		cpuset_unlock();
 +
 +		timeout = schedule_timeout(timeout);
 +
-+		cpuset_lock();
 +		spin_lock(&oom_lock);
 +		remove_wait_queue(&oom_wq, &oom_w);
 +	}
@@ -43510,7 +43556,6 @@
 +	struct user_beancounter *ub;
 +	struct task_struct *p;
 +
-+	cpuset_lock();
 +	spin_lock(&oom_lock);
 +	ub_clear_oom();
 +	ub = get_beancounter(scope);
@@ -43529,7 +43574,6 @@
 +unlock:
 +	read_unlock(&tasklist_lock);
 +	spin_unlock(&oom_lock);
-+	cpuset_unlock();
 +}
 +EXPORT_SYMBOL(ub_out_of_memory);
 diff --git a/kernel/bc/proc.c b/kernel/bc/proc.c
@@ -46273,7 +46317,7 @@
 +	return 0;
 +}
 diff --git a/kernel/compat.c b/kernel/compat.c
-index 180d188..a0c2fa9 100644
+index 8bc5578..4a5f07f 100644
 --- a/kernel/compat.c
 +++ b/kernel/compat.c
 @@ -22,6 +22,7 @@
@@ -46284,7 +46328,7 @@
  #include <linux/posix-timers.h>
  #include <linux/times.h>
  #include <linux/ptrace.h>
-@@ -100,7 +101,7 @@ int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user
+@@ -101,7 +102,7 @@ int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user
  			__put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
  }
  
@@ -46293,7 +46337,7 @@
  {
  	struct compat_timespec __user *rmtp;
  	struct timespec rmt;
-@@ -122,6 +123,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
+@@ -123,6 +124,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
  
  	return ret;
  }
@@ -68097,7 +68141,7 @@
 +	return 0;
 +}
 diff --git a/kernel/cpu.c b/kernel/cpu.c
-index 291ac58..63381db 100644
+index 7e8b6ac..cc7cdaf 100644
 --- a/kernel/cpu.c
 +++ b/kernel/cpu.c
 @@ -150,7 +150,7 @@ static inline void check_for_tasks(int cpu)
@@ -68106,7 +68150,7 @@
  	write_lock_irq(&tasklist_lock);
 -	for_each_process(p) {
 +	for_each_process_all(p) {
- 		if (task_cpu(p) == cpu &&
+ 		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
  		    (!cputime_eq(p->utime, cputime_zero) ||
  		     !cputime_eq(p->stime, cputime_zero)))
 diff --git a/kernel/exit.c b/kernel/exit.c
@@ -68989,7 +69033,7 @@
 +
 +#endif /* CONFIG_PROC_FS */
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 9f3b066..1932409 100644
+index 4bde56f..b091ef4 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
 @@ -64,6 +64,8 @@
@@ -69219,7 +69263,7 @@
  		}
  	}
  
-@@ -1268,7 +1313,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1253,7 +1298,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  	 * thread can't slip out of an OOM kill (or normal SIGKILL).
   	 */
  	recalc_sigpending();
@@ -69228,7 +69272,7 @@
  		spin_unlock(&current->sighand->siglock);
  		write_unlock_irq(&tasklist_lock);
  		retval = -ERESTARTNOINTR;
-@@ -1296,14 +1341,24 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1281,14 +1326,24 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
  			attach_pid(p, PIDTYPE_SID, task_session(current));
  			list_add_tail_rcu(&p->tasks, &init_task.tasks);
@@ -69253,7 +69297,7 @@
  	write_unlock_irq(&tasklist_lock);
  	proc_fork_connector(p);
  	cgroup_post_fork(p);
-@@ -1346,6 +1401,9 @@ bad_fork_cleanup_count:
+@@ -1331,6 +1386,9 @@ bad_fork_cleanup_count:
  	atomic_dec(&p->cred->user->processes);
  	exit_creds(p);
  bad_fork_free:
@@ -69263,7 +69307,7 @@
  	free_task(p);
  fork_out:
  	return ERR_PTR(retval);
-@@ -1363,7 +1421,7 @@ struct task_struct * __cpuinit fork_idle(int cpu)
+@@ -1348,7 +1406,7 @@ struct task_struct * __cpuinit fork_idle(int cpu)
  	struct pt_regs regs;
  
  	task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
@@ -69272,7 +69316,7 @@
  	if (!IS_ERR(task))
  		init_idle(task, cpu);
  
-@@ -1376,12 +1434,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
+@@ -1361,12 +1419,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
   * It copies the process, and if successful kick-starts
   * it and waits for it to finish using the VM if required.
   */
@@ -69288,7 +69332,7 @@
  {
  	struct task_struct *p;
  	int trace = 0;
-@@ -1419,6 +1478,10 @@ long do_fork(unsigned long clone_flags,
+@@ -1404,6 +1463,10 @@ long do_fork(unsigned long clone_flags,
  		}
  	}
  
@@ -69299,7 +69343,7 @@
  	/*
  	 * When called from kernel_thread, don't do user tracing stuff.
  	 */
-@@ -1426,7 +1489,7 @@ long do_fork(unsigned long clone_flags,
+@@ -1411,7 +1474,7 @@ long do_fork(unsigned long clone_flags,
  		trace = tracehook_prepare_clone(clone_flags);
  
  	p = copy_process(clone_flags, stack_start, regs, stack_size,
@@ -69308,7 +69352,7 @@
  	/*
  	 * Do this prior waking up the new thread - the thread pointer
  	 * might get invalid after that point, if the thread exits quickly.
-@@ -1457,6 +1520,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1442,6 +1505,8 @@ long do_fork(unsigned long clone_flags,
  		 */
  		p->flags &= ~PF_STARTING;
  
@@ -69317,7 +69361,7 @@
  		if (unlikely(clone_flags & CLONE_STOPPED)) {
  			/*
  			 * We'll start up with an immediate SIGSTOP.
-@@ -1480,6 +1545,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1465,6 +1530,8 @@ long do_fork(unsigned long clone_flags,
  	} else {
  		nr = PTR_ERR(p);
  	}
@@ -69326,7 +69370,7 @@
  	return nr;
  }
  
-@@ -1495,25 +1562,38 @@ static void sighand_ctor(void *data)
+@@ -1480,25 +1547,38 @@ static void sighand_ctor(void *data)
  	init_waitqueue_head(&sighand->signalfd_wqh);
  }
  
@@ -70849,7 +70893,7 @@
  	child = find_task_by_vpid(pid);
  	if (child)
 diff --git a/kernel/sched.c b/kernel/sched.c
-index 9990074..cdbd1b9 100644
+index 152214d..c9f9161 100644
 --- a/kernel/sched.c
 +++ b/kernel/sched.c
 @@ -71,6 +71,8 @@
@@ -70870,7 +70914,7 @@
  #else
  	tg = &init_task_group;
  #endif
-@@ -563,6 +567,9 @@ struct rq {
+@@ -562,6 +566,9 @@ struct rq {
  	 */
  	unsigned long nr_uninterruptible;
  
@@ -70880,7 +70924,7 @@
  	struct task_struct *curr, *idle;
  	unsigned long next_balance;
  	struct mm_struct *prev_mm;
-@@ -647,6 +654,12 @@ static inline int cpu_of(struct rq *rq)
+@@ -646,6 +653,12 @@ static inline int cpu_of(struct rq *rq)
  #endif
  }
  
@@ -70893,7 +70937,7 @@
  /*
   * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
   * See detach_destroy_domains: synchronize_sched for details.
-@@ -998,6 +1011,220 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
+@@ -1008,6 +1021,220 @@ static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
  	spin_unlock_irqrestore(&rq->lock, *flags);
  }
  
@@ -71114,7 +71158,7 @@
  /*
   * this_rq_lock - lock this runqueue and disable interrupts.
   */
-@@ -1950,11 +2177,21 @@ static int effective_prio(struct task_struct *p)
+@@ -1975,11 +2202,21 @@ static int effective_prio(struct task_struct *p)
   */
  static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  {
@@ -71131,13 +71175,13 @@
 +		ve_nr_unint_dec(VE_TASK_INFO(p)->owner_env, task_cpu(p));
 +	}
  
- 	enqueue_task(rq, p, wakeup);
+ 	enqueue_task(rq, p, wakeup, false);
  	inc_nr_running(rq);
 +	ve_nr_running_inc(VE_TASK_INFO(p)->owner_env, task_cpu(p), cycles);
  }
  
  /*
-@@ -1962,11 +2199,31 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
+@@ -1987,11 +2224,31 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
   */
  static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
  {
@@ -71170,7 +71214,7 @@
  }
  
  /**
-@@ -2283,6 +2540,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+@@ -2276,6 +2533,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  
  	return ncsw;
  }
@@ -71178,20 +71222,15 @@
  
  /***
   * kick_process - kick a running thread to enter/exit the kernel
-@@ -2379,8 +2637,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
- 	 *
+@@ -2436,6 +2694,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
  	 * First fix up the nr_uninterruptible count:
  	 */
--	if (task_contributes_to_load(p))
-+	if (task_contributes_to_load(p)) {
- 		rq->nr_uninterruptible--;
+ 	if (task_contributes_to_load(p)) {
 +		ve_nr_unint_dec(VE_TASK_INFO(p)->owner_env, cpu);
-+	}
-+
- 	p->state = TASK_WAKING;
- 	task_rq_unlock(rq, &flags);
- 
-@@ -2614,6 +2875,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
+ 		if (likely(cpu_online(orig_cpu)))
+ 			rq->nr_uninterruptible--;
+ 		else
+@@ -2679,6 +2938,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
  	/* Want to start with kernel preemption disabled. */
  	task_thread_info(p)->preempt_count = 1;
  #endif
@@ -71202,16 +71241,7 @@
  	plist_node_init(&p->pushable_tasks, MAX_PRIO);
  
  	put_cpu();
-@@ -2644,6 +2909,8 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
- 		 */
- 		p->sched_class->task_new(rq, p);
- 		inc_nr_running(rq);
-+		ve_nr_running_inc(VE_TASK_INFO(p)->owner_env, task_cpu(p),
-+				  get_cycles());
- 	}
- 	trace_sched_wakeup_new(rq, p, 1);
- 	check_preempt_curr(rq, p, WF_FORK);
-@@ -2846,6 +3113,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
+@@ -2921,6 +3184,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
  	if (current->set_child_tid)
  		put_user(task_pid_vnr(current), current->set_child_tid);
  }
@@ -71219,7 +71249,7 @@
  
  /*
   * context_switch - switch to the new MM and the new
-@@ -2917,6 +3185,7 @@ unsigned long nr_running(void)
+@@ -2992,6 +3256,7 @@ unsigned long nr_running(void)
  
  	return sum;
  }
@@ -71227,7 +71257,7 @@
  
  unsigned long nr_uninterruptible(void)
  {
-@@ -2934,6 +3203,7 @@ unsigned long nr_uninterruptible(void)
+@@ -3009,6 +3274,7 @@ unsigned long nr_uninterruptible(void)
  
  	return sum;
  }
@@ -71235,7 +71265,7 @@
  
  unsigned long long nr_context_switches(void)
  {
-@@ -2969,6 +3239,72 @@ unsigned long this_cpu_load(void)
+@@ -3044,6 +3310,72 @@ unsigned long this_cpu_load(void)
  }
  
  
@@ -71308,7 +71338,7 @@
  /* Variables and functions for calc_load */
  static atomic_long_t calc_load_tasks;
  static unsigned long calc_load_update;
-@@ -2990,6 +3326,16 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+@@ -3065,6 +3397,16 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  	loads[2] = (avenrun[2] + offset) << shift;
  }
  
@@ -71325,7 +71355,7 @@
  static unsigned long
  calc_load(unsigned long load, unsigned long exp, unsigned long active)
  {
-@@ -2998,6 +3344,35 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+@@ -3073,6 +3415,35 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
  	return load >> FSHIFT;
  }
  
@@ -71361,7 +71391,7 @@
  /*
   * calc_load - update the avenrun load estimates 10 ticks after the
   * CPUs have updated calc_load_tasks.
-@@ -3017,6 +3392,8 @@ void calc_global_load(void)
+@@ -3092,6 +3463,8 @@ void calc_global_load(void)
  	avenrun[1] = calc_load(avenrun[1], EXP_5, active);
  	avenrun[2] = calc_load(avenrun[2], EXP_15, active);
  
@@ -71370,7 +71400,7 @@
  	calc_load_update += LOAD_FREQ;
  }
  
-@@ -3081,6 +3458,16 @@ static void update_cpu_load(struct rq *this_rq)
+@@ -3147,6 +3520,16 @@ static void update_cpu_load(struct rq *this_rq)
  	}
  }
  
@@ -71387,7 +71417,7 @@
  #ifdef CONFIG_SMP
  
  /*
-@@ -3181,8 +3568,15 @@ void sched_exec(void)
+@@ -3238,8 +3621,15 @@ unlock:
  static void pull_task(struct rq *src_rq, struct task_struct *p,
  		      struct rq *this_rq, int this_cpu)
  {
@@ -71403,7 +71433,7 @@
  	activate_task(this_rq, p, 0);
  	check_preempt_curr(this_rq, p, 0);
  }
-@@ -5059,10 +5453,13 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
+@@ -5116,10 +5506,13 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
  
  	/* Add user time to cpustat. */
  	tmp = cputime_to_cputime64(cputime);
@@ -71419,7 +71449,7 @@
  
  	cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
  	/* Account for user time used */
-@@ -5119,6 +5516,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
+@@ -5176,6 +5569,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
  
  	/* Add system time to cpustat. */
  	tmp = cputime_to_cputime64(cputime);
@@ -71427,7 +71457,7 @@
  	if (hardirq_count() - hardirq_offset)
  		cpustat->irq = cputime64_add(cpustat->irq, tmp);
  	else if (softirq_count())
-@@ -5542,6 +5940,8 @@ need_resched_nonpreemptible:
+@@ -5599,6 +5993,8 @@ need_resched_nonpreemptible:
  	next = pick_next_task(rq);
  
  	if (likely(prev != next)) {
@@ -71436,7 +71466,7 @@
  		sched_info_switch(prev, next);
  		perf_event_task_sched_out(prev, next, cpu);
  
-@@ -5549,6 +5949,22 @@ need_resched_nonpreemptible:
+@@ -5606,6 +6002,22 @@ need_resched_nonpreemptible:
  		rq->curr = next;
  		++*switch_count;
  
@@ -71459,7 +71489,7 @@
  		context_switch(rq, prev, next); /* unlocks the rq */
  		/*
  		 * the context switch might have flipped the stack from under
-@@ -5556,8 +5972,10 @@ need_resched_nonpreemptible:
+@@ -5613,8 +6025,10 @@ need_resched_nonpreemptible:
  		 */
  		cpu = smp_processor_id();
  		rq = cpu_rq(cpu);
@@ -71471,7 +71501,7 @@
  
  	post_schedule(rq);
  
-@@ -6341,7 +6759,7 @@ recheck:
+@@ -6400,7 +6814,7 @@ recheck:
  	/*
  	 * Allow unprivileged RT tasks to decrease priority:
  	 */
@@ -71480,7 +71510,7 @@
  		if (rt_policy(policy)) {
  			unsigned long rlim_rtprio;
  
-@@ -6852,11 +7270,16 @@ EXPORT_SYMBOL(yield);
+@@ -6911,11 +7325,16 @@ EXPORT_SYMBOL(yield);
  void __sched io_schedule(void)
  {
  	struct rq *rq = raw_rq();
@@ -71497,7 +71527,7 @@
  	current->in_iowait = 0;
  	atomic_dec(&rq->nr_iowait);
  	delayacct_blkio_end();
-@@ -6867,11 +7290,16 @@ long __sched io_schedule_timeout(long timeout)
+@@ -6926,11 +7345,16 @@ long __sched io_schedule_timeout(long timeout)
  {
  	struct rq *rq = raw_rq();
  	long ret;
@@ -71514,7 +71544,7 @@
  	current->in_iowait = 0;
  	atomic_dec(&rq->nr_iowait);
  	delayacct_blkio_end();
-@@ -6978,17 +7406,7 @@ void sched_show_task(struct task_struct *p)
+@@ -7041,17 +7465,7 @@ void sched_show_task(struct task_struct *p)
  	state = p->state ? __ffs(p->state) + 1 : 0;
  	printk(KERN_INFO "%-13.13s %c", p->comm,
  		state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
@@ -71533,7 +71563,7 @@
  #ifdef CONFIG_DEBUG_STACK_USAGE
  	free = stack_not_used(p);
  #endif
-@@ -7005,13 +7423,13 @@ void show_state_filter(unsigned long state_filter)
+@@ -7068,13 +7482,13 @@ void show_state_filter(unsigned long state_filter)
  
  #if BITS_PER_LONG == 32
  	printk(KERN_INFO
@@ -71550,7 +71580,7 @@
  		/*
  		 * reset the NMI-timeout, listing all files on a slow
  		 * console might take alot of time:
-@@ -7019,7 +7437,7 @@ void show_state_filter(unsigned long state_filter)
+@@ -7082,7 +7496,7 @@ void show_state_filter(unsigned long state_filter)
  		touch_nmi_watchdog();
  		if (!state_filter || (p->state & state_filter))
  			sched_show_task(p);
@@ -71559,7 +71589,7 @@
  
  	touch_all_softlockup_watchdogs();
  
-@@ -7388,13 +7806,13 @@ static void migrate_live_tasks(int src_cpu)
+@@ -7434,13 +7848,13 @@ static void migrate_live_tasks(int src_cpu)
  
  	read_lock(&tasklist_lock);
  
@@ -71575,7 +71605,7 @@
  
  	read_unlock(&tasklist_lock);
  }
-@@ -9542,6 +9960,7 @@ void __init sched_init(void)
+@@ -9599,6 +10013,7 @@ void __init sched_init(void)
  	update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
  					    __alignof__(unsigned long));
  #endif
@@ -71583,7 +71613,7 @@
  	for_each_possible_cpu(i) {
  		struct rq *rq;
  
-@@ -9555,7 +9974,7 @@ void __init sched_init(void)
+@@ -9612,7 +10027,7 @@ void __init sched_init(void)
  #ifdef CONFIG_FAIR_GROUP_SCHED
  		init_task_group.shares = init_task_group_load;
  		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
@@ -71592,7 +71622,7 @@
  		/*
  		 * How much cpu bandwidth does init_task_group get?
  		 *
-@@ -9601,7 +10020,7 @@ void __init sched_init(void)
+@@ -9658,7 +10073,7 @@ void __init sched_init(void)
  		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
  #ifdef CONFIG_RT_GROUP_SCHED
  		INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
@@ -71601,7 +71631,7 @@
  		init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
  #elif defined CONFIG_USER_SCHED
  		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
-@@ -9667,6 +10086,7 @@ void __init sched_init(void)
+@@ -9724,6 +10139,7 @@ void __init sched_init(void)
  	 * During early bootup we pretend to be a normal task:
  	 */
  	current->sched_class = &fair_sched_class;
@@ -71609,7 +71639,7 @@
  
  	/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
  	zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
-@@ -9745,7 +10165,7 @@ void normalize_rt_tasks(void)
+@@ -9802,7 +10218,7 @@ void normalize_rt_tasks(void)
  	struct rq *rq;
  
  	read_lock_irqsave(&tasklist_lock, flags);
@@ -71618,7 +71648,7 @@
  		/*
  		 * Only normalize user tasks:
  		 */
-@@ -9776,7 +10196,7 @@ void normalize_rt_tasks(void)
+@@ -9833,7 +10249,7 @@ void normalize_rt_tasks(void)
  
  		__task_rq_unlock(rq);
  		spin_unlock(&p->pi_lock);
@@ -71627,7 +71657,7 @@
  
  	read_unlock_irqrestore(&tasklist_lock, flags);
  }
-@@ -10222,10 +10642,10 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
+@@ -10279,10 +10695,10 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
  {
  	struct task_struct *g, *p;
  
@@ -71641,7 +71671,7 @@
  	return 0;
  }
 diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
-index 6988cf0..95930c1 100644
+index 6f836a8..755b1f9 100644
 --- a/kernel/sched_debug.c
 +++ b/kernel/sched_debug.c
 @@ -135,12 +135,12 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
@@ -72681,10 +72711,10 @@
  	 * If the sum of all the available memory (i.e. ram + swap)
  	 * is less than can be stored in a 32 bit unsigned long then
 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 0cccb6c..03d83f5 100644
+index 22cf21e..a01e1db 100644
 --- a/kernel/trace/ftrace.c
 +++ b/kernel/trace/ftrace.c
-@@ -3091,7 +3091,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
+@@ -3100,7 +3100,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
  	}
  
  	read_lock_irqsave(&tasklist_lock, flags);
@@ -72693,7 +72723,7 @@
  		if (start == end) {
  			ret = -EAGAIN;
  			goto unlock;
-@@ -3105,7 +3105,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
+@@ -3114,7 +3114,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
  			smp_wmb();
  			t->ret_stack = ret_stack_list[start++];
  		}
@@ -89394,7 +89424,7 @@
 +}
 +#endif
 diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
-index 27a2378..93cb0c5 100644
+index ea1e6de..e83dd73 100644
 --- a/net/sunrpc/rpc_pipe.c
 +++ b/net/sunrpc/rpc_pipe.c
 @@ -1028,6 +1028,7 @@ static struct file_system_type rpc_pipe_fs_type = {

Modified: dists/sid/linux-2.6/debian/patches/series/24-extra
==============================================================================
--- dists/sid/linux-2.6/debian/patches/series/24-extra	Thu Sep 23 18:47:45 2010	(r16344)
+++ dists/sid/linux-2.6/debian/patches/series/24-extra	Thu Sep 23 19:53:01 2010	(r16345)
@@ -1,4 +1,3 @@
-+ debian/revert-sched-2.6.32.22-changes.patch featureset=openvz
 + features/all/openvz/openvz.patch featureset=openvz
 + features/all/openvz/revert-cgroup-lite-add-cgroup-id-for-blk-cgroups.patch featureset=openvz
 + features/all/openvz/partially-revert-CPT-Replace-legacy-net-statistics.patch featureset=openvz



More information about the Kernel-svn-changes mailing list