[kernel] r15801 - in dists/sid/linux-2.6/debian: . patches/features/all/openvz

Maximilian Attems maks at alioth.debian.org
Fri May 28 15:59:03 UTC 2010


Author: maks
Date: Fri May 28 15:59:00 2010
New Revision: 15801

Log:
update openvz patch

Modified:
   dists/sid/linux-2.6/debian/changelog
   dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch

Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog	Fri May 28 01:42:31 2010	(r15800)
+++ dists/sid/linux-2.6/debian/changelog	Fri May 28 15:59:00 2010	(r15801)
@@ -32,7 +32,7 @@
   [ maximilian attems]
   * Add drm changes from stable 2.6.33.5:
     - i915: Disable FBC on 915GM and 945GM (Closes: #582427)
-  * Update openvz patch to 509eb1f29c43.
+  * Update openvz patch to e7399c239fad.
 
   [ Martin Michlmayr ]
   * QNAP TS-419P: Export GPIO indicating jumper setting of JP1.

Modified: dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch	Fri May 28 01:42:31 2010	(r15800)
+++ dists/sid/linux-2.6/debian/patches/features/all/openvz/openvz.patch	Fri May 28 15:59:00 2010	(r15801)
@@ -1,3 +1,68 @@
+commit e7399c239fadcc813adcf4f947b00ec199d6a11b
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Thu May 27 20:07:25 2010 +0400
+
+    OpenVZ kernel 2.6.32-belyayev released
+    
+    Named after Pavel Ivanovich Belyayev - a Russian cosmonaut
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit f11aece8614b06328cfbb7283622d0c1f392a783
+Author: Kir Kolyshkin <kir at openvz.org>
+Date:   Thu May 27 20:05:53 2010 +0400
+
+    Fix/enlarge description of CONFIG_SYSFS_DEPRECATED_DYN
+    
+    Commit f40134386 adds this option, unfortunately the description
+    has a high ratio of typos per word, plus it is incomplete.
+    
+    Fix both issues.
+    
+    Signed-off-by: Kir Kolyshkin <kir at openvz.org>
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 537027d8abbb3f78c9c80b7574a12c920c7af4f6
+Author: Cyrill Gorcunov <gorcunov at openvz.org>
+Date:   Tue Apr 20 23:14:04 2010 +0400
+
+    mm: SLUB -- implement show_slab_info
+    
+    Note that we had to introduce cache_chain_lock spinlock,
+    otherwise we could touch entity being removed.
+    
+    Signed-off-by: Cyrill Gorcunov <gorcunov at openvz.org>
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 0eb474428f7f5fcae74048682be692033060f21e
+Author: Cyrill Gorcunov <gorcunov at openvz.org>
+Date:   Tue Apr 20 21:14:24 2010 +0400
+
+    mm: SLAB -- use static cache_chain/_lock initializers
+    
+    There is no need to initialize cache_chain list and spinlock dynamically.
+    Which save some cpu cycles on the kernel startup.
+    
+    Signed-off-by: Cyrill Gorcunov <gorcunov at openvz.org>
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
+commit 9bab271973963a03bb1e0d5a1ecaee1e3810907c
+Merge: 509eb1f 7b7a917
+Author: Pavel Emelyanov <xemul at openvz.org>
+Date:   Thu May 27 17:57:10 2010 +0400
+
+    Merged linux-2.6.32.14
+    
+    Conflicts:
+    
+    	Makefile
+    	fs/notify/inotify/inotify_fsnotify.c
+    	fs/notify/inotify/inotify_user.c
+    	fs/proc/array.c
+    	include/linux/sched.h
+    
+    Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+
 commit 509eb1f29c4301126a0ccda8e001dfd0af0d56d2
 Author: Pavel Emelyanov <xemul at openvz.org>
 Date:   Mon May 24 14:27:05 2010 +0400
@@ -192,20 +257,6 @@
     Signed-off-by: Konstantin Khlebnikov <khlebnikov at openvz.org>
     Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
 
-commit 1cd8211f07663ebeac04b19ac849de7ed5eef969
-Author: Greg Kroah-Hartman <gregkh at suse.de>
-Date:   Wed May 12 15:11:42 2010 -0700
-
-    Revert "module: fix __module_ref_addr()"
-    
-    This reverts commit d150a2b96558a7349cbf3a72a279c37bc67d50fb.
-    
-    Thanks to Jiri Benc for finding the problem that this patch is
-    not correct for the 2.6.32-stable series.
-    
-    Cc: Jiri Kosina <jkosina at suse.cz>
-    Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
-
 commit dd480cee5d48b5fd88f4f074743b542fab6d9e70
 Author: Shaohua Li <shaohua.li at intel.com>
 Date:   Tue Apr 27 16:52:01 2010 +0400
@@ -5843,20 +5894,6 @@
     Neither compiles, nor works.
     
     Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
-
-Fixups for 2.6.32.14 by Ben Hutchings <ben at decadent.org.uk>:
-
-Remove changes included both in this and in 2.6.32.14:
-002fdeea3c2cff663452bb92035074bb8bbb84ac = b3b38d842fa367d862b83e7670af4e0fd6a80fc0
-1cd8211f07663ebeac04b19ac849de7ed5eef969
-
-Adjust context in some other places.
-
-Add a call to fsnotify_put_mark() on the error path added to
-inotify_new_watch(), needed following
-c606e701ef469582a991aabd3a6f7816a9a9d351
-"inotify: race use after free/double free in inotify inode marks".
-
 diff --git a/COPYING.Parallels b/COPYING.Parallels
 new file mode 100644
 index 0000000..9856a2b
@@ -6214,14 +6251,14 @@
 +library.  If this is what you want to do, use the GNU Library General
 +Public License instead of this License.
 diff --git a/Makefile b/Makefile
-index 801d0e1..4eac9f7 100644
+index 47866f8..7503318 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -2,6 +2,7 @@ VERSION = 2
  PATCHLEVEL = 6
  SUBLEVEL = 32
  EXTRAVERSION =
-+VZVERSION = balandin
++VZVERSION = belyayev
  NAME = Man-Eating Seals of Antiquity
  
  # *DOCUMENTATION*
@@ -14831,7 +14868,7 @@
  }
  
 diff --git a/fs/compat.c b/fs/compat.c
-index 6c19040..5141257 100644
+index d576b55..284386f 100644
 --- a/fs/compat.c
 +++ b/fs/compat.c
 @@ -26,6 +26,7 @@
@@ -15838,7 +15875,7 @@
  /*
   * The following function implements the controller interface for
 diff --git a/fs/exec.c b/fs/exec.c
-index a2a3944..bac9cfa 100644
+index 56da15f..6ea8efa 100644
 --- a/fs/exec.c
 +++ b/fs/exec.c
 @@ -26,6 +26,7 @@
@@ -15982,7 +16019,7 @@
  	retval = unshare_files(&displaced);
  	if (retval)
  		goto out_ret;
-@@ -1568,7 +1602,7 @@ static int zap_process(struct task_struct *start)
+@@ -1566,7 +1600,7 @@ static int zap_process(struct task_struct *start)
  			signal_wake_up(t, 1);
  			nr++;
  		}
@@ -15991,7 +16028,7 @@
  
  	return nr;
  }
-@@ -1623,7 +1657,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1621,7 +1655,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
  	 *	next_thread().
  	 */
  	rcu_read_lock();
@@ -16000,7 +16037,7 @@
  		if (g == tsk->group_leader)
  			continue;
  		if (g->flags & PF_KTHREAD)
-@@ -1638,7 +1672,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1636,7 +1670,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
  				}
  				break;
  			}
@@ -16009,7 +16046,7 @@
  	}
  	rcu_read_unlock();
  done:
-@@ -1806,7 +1840,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -1804,7 +1838,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
  	/*
  	 * If another thread got here first, or we are not dumpable, bail out.
  	 */
@@ -18432,10 +18469,10 @@
  
  extern void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
 diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
-index 1afb0a1..9b31a34 100644
+index e27960c..9b31a34 100644
 --- a/fs/notify/inotify/inotify_fsnotify.c
 +++ b/fs/notify/inotify/inotify_fsnotify.c
-@@ -28,6 +28,7 @@
+@@ -29,6 +29,7 @@
  #include <linux/slab.h> /* kmem_* */
  #include <linux/types.h>
  #include <linux/sched.h>
@@ -18443,7 +18480,7 @@
  
  #include "inotify.h"
  
-@@ -159,10 +162,25 @@ void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
+@@ -161,10 +162,25 @@ void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv)
  	kmem_cache_free(event_priv_cachep, event_priv);
  }
  
@@ -18470,7 +18507,7 @@
 +	.detach_mnt = inotify_detach_mnt,
  };
 diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
-index ca44337..745983d 100644
+index 22ef16a..d9909cd 100644
 --- a/fs/notify/inotify/inotify_user.c
 +++ b/fs/notify/inotify/inotify_user.c
 @@ -40,6 +40,7 @@
@@ -18540,7 +18577,7 @@
  
  	ret = -ENOSPC;
  	if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
-@@ -556,13 +563,17 @@ retry:
+@@ -556,13 +563,16 @@ retry:
  	if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
  		goto out_err;
  
@@ -18548,7 +18585,6 @@
 +		start_wd = group->inotify_data.last_wd + 1;
 +	else
 +		start_wd = wd;
-+
  	/* we are putting the mark on the idr, take a reference */
  	fsnotify_get_mark(&tmp_ientry->fsn_entry);
  
@@ -18559,16 +18595,14 @@
 +				start_wd, &tmp_ientry->wd);
  	spin_unlock(&group->inotify_data.idr_lock);
  	if (ret) {
- 		/* idr was out of memory allocate and try again */
-@@ -568,8 +579,17 @@ retry:
+ 		/* we didn't get on the idr, drop the idr reference */
+@@ -574,8 +584,15 @@ retry:
  		goto out_err;
  	}
  
 +	if (wd != -1 && tmp_ientry->wd != wd) {
-+		/* we didn't get on the idr, drop the idr reference */
-+		fsnotify_put_mark(&tmp_ientry->fsn_entry);
-+
 +		ret = -EBUSY;
++		fsnotify_put_mark(&tmp_ientry->fsn_entry);
 +		inotify_remove_from_idr(group, tmp_ientry);
 +		goto out_err;
 +	}
@@ -18579,7 +18613,7 @@
  	if (ret) {
  		/* we failed to get on the inode, get off the idr */
  		inotify_remove_from_idr(group, tmp_ientry);
-@@ -585,6 +602,12 @@ retry:
+@@ -588,6 +605,12 @@ retry:
  	/* increment the number of watches the user has */
  	atomic_inc(&group->inotify_data.user->inotify_watches);
  
@@ -18592,7 +18626,7 @@
  	/* return the watch descriptor for this new entry */
  	ret = tmp_ientry->wd;
  
-@@ -601,17 +624,24 @@ out_err:
+@@ -604,17 +627,24 @@ out_err:
  
  	return ret;
  }
@@ -18620,7 +18654,7 @@
  	/*
  	 * inotify_new_watch could race with another thread which did an
  	 * inotify_new_watch between the update_existing and the add watch
-@@ -711,12 +741,12 @@ SYSCALL_DEFINE0(inotify_init)
+@@ -714,12 +744,12 @@ SYSCALL_DEFINE0(inotify_init)
  {
  	return sys_inotify_init1(0);
  }
@@ -18634,7 +18668,7 @@
  	struct path path;
  	struct file *filp;
  	int ret, fput_needed;
-@@ -741,12 +771,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
+@@ -744,12 +774,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
  	if (ret)
  		goto fput_and_out;
  
@@ -18924,10 +18958,10 @@
  /*
   * sys_pipe() is the normal C calling standard for creating
 diff --git a/fs/proc/array.c b/fs/proc/array.c
-index 822c2d5..d29461e 100644
+index 725a650..7de3905 100644
 --- a/fs/proc/array.c
 +++ b/fs/proc/array.c
-@@ -84,6 +84,8 @@
+@@ -83,6 +83,8 @@
  #include <linux/ptrace.h>
  #include <linux/tracehook.h>
  
@@ -18936,7 +18970,7 @@
  #include <asm/pgtable.h>
  #include <asm/processor.h>
  #include "internal.h"
-@@ -155,6 +157,18 @@ static inline const char *get_task_state(struct task_struct *tsk)
+@@ -154,6 +156,18 @@ static inline const char *get_task_state(struct task_struct *tsk)
  	return *p;
  }
  
@@ -18955,7 +18989,7 @@
  static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
  				struct pid *pid, struct task_struct *p)
  {
-@@ -162,7 +176,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
+@@ -161,7 +175,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
  	int g;
  	struct fdtable *fdt = NULL;
  	const struct cred *cred;
@@ -18964,7 +18998,7 @@
  
  	rcu_read_lock();
  	ppid = pid_alive(p) ?
-@@ -173,6 +187,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
+@@ -172,6 +186,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
  		if (tracer)
  			tpid = task_pid_nr_ns(tracer, ns);
  	}
@@ -18972,7 +19006,7 @@
  	cred = get_cred((struct cred *) __task_cred(p));
  	seq_printf(m,
  		"State:\t%s\n"
-@@ -206,6 +221,11 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
+@@ -205,6 +220,11 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
  	put_cred(cred);
  
  	seq_printf(m, "\n");
@@ -18984,7 +19018,7 @@
  }
  
  static void render_sigset_t(struct seq_file *m, const char *header,
-@@ -245,10 +265,10 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
+@@ -244,10 +264,10 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
  	}
  }
  
@@ -18997,7 +19031,7 @@
  	int num_threads = 0;
  	unsigned long qsize = 0;
  	unsigned long qlim = 0;
-@@ -258,11 +278,13 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
+@@ -257,11 +277,13 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
  	sigemptyset(&blocked);
  	sigemptyset(&ignored);
  	sigemptyset(&caught);
@@ -19011,7 +19045,7 @@
  		collect_sigign_sigcatch(p, &ignored, &caught);
  		num_threads = atomic_read(&p->signal->count);
  		qsize = atomic_read(&__task_cred(p)->user->sigpending);
-@@ -279,6 +301,7 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
+@@ -278,6 +300,7 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p)
  	render_sigset_t(m, "SigBlk:\t", &blocked);
  	render_sigset_t(m, "SigIgn:\t", &ignored);
  	render_sigset_t(m, "SigCgt:\t", &caught);
@@ -19019,7 +19053,7 @@
  }
  
  static void render_cap_t(struct seq_file *m, const char *header,
-@@ -313,6 +336,20 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
+@@ -312,6 +335,20 @@ static inline void task_cap(struct seq_file *m, struct task_struct *p)
  	render_cap_t(m, "CapBnd:\t", &cap_bset);
  }
  
@@ -19040,7 +19074,7 @@
  static inline void task_context_switch_counts(struct seq_file *m,
  						struct task_struct *p)
  {
-@@ -414,6 +451,9 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+@@ -325,6 +362,9 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
  			struct pid *pid, struct task_struct *task)
  {
  	struct mm_struct *mm = get_task_mm(task);
@@ -19050,7 +19084,7 @@
  
  	task_name(m, task);
  	task_state(m, ns, pid, task);
-@@ -430,6 +470,14 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+@@ -340,6 +380,14 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
  	task_show_regs(m, task);
  #endif
  	task_context_switch_counts(m, task);
@@ -19065,7 +19099,7 @@
  	return 0;
  }
  
-@@ -453,6 +501,10 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+@@ -363,6 +411,10 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
  	unsigned long rsslim = 0;
  	char tcomm[sizeof(task->comm)];
  	unsigned long flags;
@@ -19076,7 +19110,7 @@
  
  	state = *get_task_state(task);
  	vsize = eip = esp = 0;
-@@ -534,6 +586,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+@@ -444,6 +496,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
  	priority = task_prio(task);
  	nice = task_nice(task);
  
@@ -19084,7 +19118,7 @@
  	/* Temporary variable needed for gcc-2.96 */
  	/* convert timespec -> nsec*/
  	start_time =
-@@ -541,10 +594,25 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+@@ -451,10 +504,25 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
  				+ task->real_start_time.tv_nsec;
  	/* convert nsec -> ticks */
  	start_time = nsec_to_clock_t(start_time);
@@ -19111,7 +19145,7 @@
  		pid_nr_ns(pid, ns),
  		tcomm,
  		state,
-@@ -591,7 +659,16 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+@@ -501,7 +569,16 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
  		task->policy,
  		(unsigned long long)delayacct_blkio_ticks(task),
  		cputime_to_clock_t(gtime),
@@ -34203,7 +34237,7 @@
  static inline void page_dup_rmap(struct page *page)
  {
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 70abfd3..d6155c1 100644
+index b253434..1412d9a 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -94,6 +94,8 @@ struct sched_param {
@@ -34296,10 +34330,10 @@
  	 * cache last used pipe for splice
  	 */
  	struct pipe_inode_info *splice_pipe;
-@@ -1542,6 +1581,19 @@ struct task_struct {
+@@ -1541,6 +1580,19 @@ struct task_struct {
+ 	/* bitmask of trace recursion */
  	unsigned long trace_recursion;
  #endif /* CONFIG_TRACING */
- 	unsigned long stack_start;
 +#ifdef CONFIG_BEANCOUNTERS
 +	struct task_beancounter task_bc;
 +#endif
@@ -34316,7 +34350,7 @@
  };
  
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
-@@ -1727,6 +1779,43 @@ extern cputime_t task_utime(struct task_struct *p);
+@@ -1726,6 +1778,43 @@ extern cputime_t task_utime(struct task_struct *p);
  extern cputime_t task_stime(struct task_struct *p);
  extern cputime_t task_gtime(struct task_struct *p);
  
@@ -34360,7 +34394,7 @@
  /*
   * Per process flags
   */
-@@ -1736,6 +1825,7 @@ extern cputime_t task_gtime(struct task_struct *p);
+@@ -1735,6 +1824,7 @@ extern cputime_t task_gtime(struct task_struct *p);
  #define PF_EXITING	0x00000004	/* getting shut down */
  #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
  #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
@@ -34368,7 +34402,7 @@
  #define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
  #define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
  #define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
-@@ -1872,6 +1962,21 @@ extern unsigned long long
+@@ -1871,6 +1961,21 @@ extern unsigned long long
  task_sched_runtime(struct task_struct *task);
  extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
  
@@ -34390,7 +34424,7 @@
  /* sched_exec is called by processes performing an exec */
  #ifdef CONFIG_SMP
  extern void sched_exec(void);
-@@ -2151,6 +2256,13 @@ extern int disallow_signal(int);
+@@ -2150,6 +2255,13 @@ extern int disallow_signal(int);
  
  extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
  extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
@@ -34404,7 +34438,7 @@
  struct task_struct *fork_idle(int);
  
  extern void set_task_comm(struct task_struct *tsk, char *from);
-@@ -2168,11 +2280,11 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
+@@ -2167,11 +2279,11 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
  }
  #endif
  
@@ -34419,7 +34453,7 @@
  
  extern bool current_is_single_threaded(void);
  
-@@ -2180,10 +2292,10 @@ extern bool current_is_single_threaded(void);
+@@ -2179,10 +2291,10 @@ extern bool current_is_single_threaded(void);
   * Careful: do_each_thread/while_each_thread is a double loop so
   *          'break' will not work as expected - use goto instead.
   */
@@ -34433,7 +34467,7 @@
  	while ((t = next_thread(t)) != g)
  
  /* de_thread depends on thread_group_leader not being a pid based check */
-@@ -2208,8 +2320,14 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+@@ -2207,8 +2319,14 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
  
  static inline struct task_struct *next_thread(const struct task_struct *p)
  {
@@ -34449,7 +34483,7 @@
  }
  
  static inline int thread_group_empty(struct task_struct *p)
-@@ -2254,6 +2372,98 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
+@@ -2253,6 +2371,98 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
  	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
  }
  
@@ -38129,7 +38163,7 @@
  /* This is what the send packet queuing engine uses to pass
   * TCP per-packet control information to the transmission
 diff --git a/init/Kconfig b/init/Kconfig
-index eb4b337..0f0a0a4 100644
+index eb4b337..b480300 100644
 --- a/init/Kconfig
 +++ b/init/Kconfig
 @@ -279,7 +279,7 @@ config TASK_XACCT
@@ -38218,18 +38252,19 @@
  	help
  	  This option enables controller independent resource accounting
  	  infrastructure that works with cgroups.
-@@ -638,6 +646,15 @@ config SYSFS_DEPRECATED_V2
+@@ -638,6 +646,16 @@ config SYSFS_DEPRECATED_V2
  	  if the original kernel, that came with your distribution, has
  	  this option set to N.
  
 +config SYSFS_DEPRECATED_DYN
-+	bool "make deprecatd sysfs leayou dynamically"
++	bool "make deprecated sysfs layout dynamically"
 +	depends on SYSFS
 +	default y
 +	select SYSFS_DEPRECATED_V2
 +	help
-+	  This option works like the DEPRECATED_V2 but alows selecting the
-+	  sysfs layout dynamically
++	  This option works like the DEPRECATED_V2 but allows selecting the
++	  sysfs layout dynamically, i.e. on boot. To select the old
++	  (deprecated) layout, supply old_sysfs kernel boot parameter.
 +
  config RELAY
  	bool "Kernel->user space relay support (formerly relayfs)"
@@ -68750,7 +68785,7 @@
 +
 +#endif /* CONFIG_PROC_FS */
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 166b8c4..1d620a6 100644
+index 28b4874..be960e6 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
 @@ -64,6 +64,8 @@
@@ -68953,7 +68988,7 @@
  #ifdef CONFIG_PROVE_LOCKING
  	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
  	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
-@@ -1147,7 +1190,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1145,7 +1188,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  		goto bad_fork_cleanup_sighand;
  	if ((retval = copy_mm(clone_flags, p)))
  		goto bad_fork_cleanup_signal;
@@ -68962,7 +68997,7 @@
  		goto bad_fork_cleanup_mm;
  	if ((retval = copy_io(clone_flags, p)))
  		goto bad_fork_cleanup_namespaces;
-@@ -1157,7 +1200,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1155,7 +1198,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  
  	if (pid != &init_struct_pid) {
  		retval = -ENOMEM;
@@ -68971,7 +69006,7 @@
  		if (!pid)
  			goto bad_fork_cleanup_io;
  
-@@ -1165,6 +1208,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1163,6 +1206,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  			retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
  			if (retval < 0)
  				goto bad_fork_free_pid;
@@ -68980,7 +69015,7 @@
  		}
  	}
  
-@@ -1264,7 +1309,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1262,7 +1307,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  	 * thread can't slip out of an OOM kill (or normal SIGKILL).
   	 */
  	recalc_sigpending();
@@ -68989,7 +69024,7 @@
  		spin_unlock(&current->sighand->siglock);
  		write_unlock_irq(&tasklist_lock);
  		retval = -ERESTARTNOINTR;
-@@ -1292,14 +1337,24 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+@@ -1290,14 +1335,24 @@ static struct task_struct *copy_process(unsigned long clone_flags,
  			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
  			attach_pid(p, PIDTYPE_SID, task_session(current));
  			list_add_tail_rcu(&p->tasks, &init_task.tasks);
@@ -69014,7 +69049,7 @@
  	write_unlock_irq(&tasklist_lock);
  	proc_fork_connector(p);
  	cgroup_post_fork(p);
-@@ -1342,6 +1397,9 @@ bad_fork_cleanup_count:
+@@ -1340,6 +1395,9 @@ bad_fork_cleanup_count:
  	atomic_dec(&p->cred->user->processes);
  	exit_creds(p);
  bad_fork_free:
@@ -69024,7 +69059,7 @@
  	free_task(p);
  fork_out:
  	return ERR_PTR(retval);
-@@ -1359,7 +1417,7 @@ struct task_struct * __cpuinit fork_idle(int cpu)
+@@ -1357,7 +1415,7 @@ struct task_struct * __cpuinit fork_idle(int cpu)
  	struct pt_regs regs;
  
  	task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
@@ -69033,7 +69068,7 @@
  	if (!IS_ERR(task))
  		init_idle(task, cpu);
  
-@@ -1372,12 +1430,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
+@@ -1370,12 +1428,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
   * It copies the process, and if successful kick-starts
   * it and waits for it to finish using the VM if required.
   */
@@ -69049,7 +69084,7 @@
  {
  	struct task_struct *p;
  	int trace = 0;
-@@ -1415,6 +1474,10 @@ long do_fork(unsigned long clone_flags,
+@@ -1413,6 +1472,10 @@ long do_fork(unsigned long clone_flags,
  		}
  	}
  
@@ -69060,7 +69095,7 @@
  	/*
  	 * When called from kernel_thread, don't do user tracing stuff.
  	 */
-@@ -1422,7 +1485,7 @@ long do_fork(unsigned long clone_flags,
+@@ -1420,7 +1483,7 @@ long do_fork(unsigned long clone_flags,
  		trace = tracehook_prepare_clone(clone_flags);
  
  	p = copy_process(clone_flags, stack_start, regs, stack_size,
@@ -69069,7 +69104,7 @@
  	/*
  	 * Do this prior waking up the new thread - the thread pointer
  	 * might get invalid after that point, if the thread exits quickly.
-@@ -1453,6 +1516,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1451,6 +1514,8 @@ long do_fork(unsigned long clone_flags,
  		 */
  		p->flags &= ~PF_STARTING;
  
@@ -69078,7 +69113,7 @@
  		if (unlikely(clone_flags & CLONE_STOPPED)) {
  			/*
  			 * We'll start up with an immediate SIGSTOP.
-@@ -1476,6 +1541,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1474,6 +1539,8 @@ long do_fork(unsigned long clone_flags,
  	} else {
  		nr = PTR_ERR(p);
  	}
@@ -69087,7 +69122,7 @@
  	return nr;
  }
  
-@@ -1491,25 +1558,38 @@ static void sighand_ctor(void *data)
+@@ -1489,25 +1556,38 @@ static void sighand_ctor(void *data)
  	init_waitqueue_head(&sighand->signalfd_wqh);
  }
  
@@ -78281,7 +78316,7 @@
  	vma->vm_ops = &shmem_vm_ops;
  	return 0;
 diff --git a/mm/slab.c b/mm/slab.c
-index 5d1a782..2502955 100644
+index 5d1a782..f23819e 100644
 --- a/mm/slab.c
 +++ b/mm/slab.c
 @@ -115,30 +115,14 @@
@@ -78410,11 +78445,13 @@
  static struct arraycache_init initarray_cache __initdata =
      { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
  static struct arraycache_init initarray_generic =
-@@ -664,6 +662,7 @@ static inline void init_lock_keys(void)
+@@ -663,7 +661,8 @@ static inline void init_lock_keys(void)
+  * Guard access to the cache-chain.
   */
  static DEFINE_MUTEX(cache_chain_mutex);
- static struct list_head cache_chain;
-+static spinlock_t cache_chain_lock;
+-static struct list_head cache_chain;
++static LIST_HEAD(cache_chain);
++static DEFINE_SPINLOCK(cache_chain_lock);
  
  /*
   * chicken and egg problem: delay the per-cpu array allocation
@@ -78570,15 +78607,15 @@
  
  /*
   * For setting up all the kmem_list3s for cache whose buffer_size is same as
-@@ -1409,6 +1507,7 @@ void __init kmem_cache_init(void)
+@@ -1408,7 +1506,6 @@ void __init kmem_cache_init(void)
+ 	node = numa_node_id();
  
  	/* 1) create the cache_cache */
- 	INIT_LIST_HEAD(&cache_chain);
-+	spin_lock_init(&cache_chain_lock);
+-	INIT_LIST_HEAD(&cache_chain);
  	list_add(&cache_cache.next, &cache_chain);
  	cache_cache.colour_off = cache_line_size();
  	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
-@@ -1420,7 +1519,7 @@ void __init kmem_cache_init(void)
+@@ -1420,7 +1517,7 @@ void __init kmem_cache_init(void)
  	 */
  	cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
  				 nr_node_ids * sizeof(struct kmem_list3 *);
@@ -78587,7 +78624,7 @@
  	cache_cache.obj_size = cache_cache.buffer_size;
  #endif
  	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
-@@ -1467,6 +1566,7 @@ void __init kmem_cache_init(void)
+@@ -1467,6 +1564,7 @@ void __init kmem_cache_init(void)
  
  	slab_early_init = 0;
  
@@ -78595,7 +78632,7 @@
  	while (sizes->cs_size != ULONG_MAX) {
  		/*
  		 * For performance, all the general caches are L1 aligned.
-@@ -1479,21 +1579,30 @@ void __init kmem_cache_init(void)
+@@ -1479,21 +1577,30 @@ void __init kmem_cache_init(void)
  			sizes->cs_cachep = kmem_cache_create(names->name,
  					sizes->cs_size,
  					ARCH_KMALLOC_MINALIGN,
@@ -78629,7 +78666,7 @@
  	/* 4) Replace the bootstrap head arrays */
  	{
  		struct array_cache *ptr;
-@@ -1674,7 +1783,7 @@ static void kmem_rcu_free(struct rcu_head *head)
+@@ -1674,7 +1781,7 @@ static void kmem_rcu_free(struct rcu_head *head)
  		kmem_cache_free(cachep->slabp_cache, slab_rcu);
  }
  
@@ -78638,7 +78675,7 @@
  
  #ifdef CONFIG_DEBUG_PAGEALLOC
  static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
-@@ -1751,7 +1860,7 @@ static void dump_line(char *data, int offset, int limit)
+@@ -1751,7 +1858,7 @@ static void dump_line(char *data, int offset, int limit)
  }
  #endif
  
@@ -78647,7 +78684,7 @@
  
  static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
  {
-@@ -1844,7 +1953,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
+@@ -1844,7 +1951,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
  }
  #endif
  
@@ -78656,7 +78693,7 @@
  static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
  {
  	int i;
-@@ -1944,7 +2053,6 @@ static void __kmem_cache_destroy(struct kmem_cache *cachep)
+@@ -1944,7 +2051,6 @@ static void __kmem_cache_destroy(struct kmem_cache *cachep)
  static size_t calculate_slab_order(struct kmem_cache *cachep,
  			size_t size, size_t align, unsigned long flags)
  {
@@ -78664,7 +78701,7 @@
  	size_t left_over = 0;
  	int gfporder;
  
-@@ -1957,15 +2065,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
+@@ -1957,15 +2063,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
  			continue;
  
  		if (flags & CFLGS_OFF_SLAB) {
@@ -78683,7 +78720,7 @@
  				break;
  		}
  
-@@ -2133,9 +2236,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
+@@ -2133,9 +2234,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
  		}
  	}
  
@@ -78695,7 +78732,7 @@
  	/*
  	 * Enable redzoning and last user accounting, except for caches with
  	 * large objects, if the increased size would increase the object size
-@@ -2225,7 +2328,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
+@@ -2225,7 +2326,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
  	if (!cachep)
  		goto oops;
  
@@ -78704,7 +78741,7 @@
  	cachep->obj_size = size;
  
  	/*
-@@ -2247,7 +2350,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
+@@ -2247,7 +2348,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
  		else
  			size += BYTES_PER_WORD;
  	}
@@ -78713,7 +78750,7 @@
  	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
  	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
  		cachep->obj_offset += PAGE_SIZE - size;
-@@ -2279,8 +2382,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
+@@ -2279,8 +2380,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
  		cachep = NULL;
  		goto oops;
  	}
@@ -78723,7 +78760,7 @@
  
  	/*
  	 * If the slab has been placed off-slab, and we have enough space then
-@@ -2293,8 +2395,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
+@@ -2293,8 +2393,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
  
  	if (flags & CFLGS_OFF_SLAB) {
  		/* really off slab. No need for manual alignment */
@@ -78733,7 +78770,7 @@
  
  #ifdef CONFIG_PAGE_POISONING
  		/* If we're going to use the generic kernel_map_pages()
-@@ -2340,7 +2441,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
+@@ -2340,7 +2439,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
  	}
  
  	/* cache setup completed, link it into the list */
@@ -78744,7 +78781,7 @@
  oops:
  	if (!cachep && (flags & SLAB_PANIC))
  		panic("kmem_cache_create(): failed to create slab `%s'\n",
-@@ -2353,7 +2457,7 @@ oops:
+@@ -2353,7 +2455,7 @@ oops:
  }
  EXPORT_SYMBOL(kmem_cache_create);
  
@@ -78753,7 +78790,7 @@
  static void check_irq_off(void)
  {
  	BUG_ON(!irqs_disabled());
-@@ -2449,10 +2553,11 @@ static int drain_freelist(struct kmem_cache *cache,
+@@ -2449,10 +2551,11 @@ static int drain_freelist(struct kmem_cache *cache,
  		}
  
  		slabp = list_entry(p, struct slab, list);
@@ -78766,7 +78803,7 @@
  		/*
  		 * Safe to drop the lock. The slab is no longer linked
  		 * to the cache.
-@@ -2535,10 +2640,14 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
+@@ -2535,10 +2638,14 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
  	/*
  	 * the chain is never empty, cache_cache is never destroyed
  	 */
@@ -78781,7 +78818,7 @@
  		mutex_unlock(&cache_chain_mutex);
  		put_online_cpus();
  		return;
-@@ -2547,6 +2656,8 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
+@@ -2547,6 +2654,8 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
  	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
  		rcu_barrier();
  
@@ -78790,7 +78827,7 @@
  	__kmem_cache_destroy(cachep);
  	mutex_unlock(&cache_chain_mutex);
  	put_online_cpus();
-@@ -2573,7 +2684,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
+@@ -2573,7 +2682,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
  	if (OFF_SLAB(cachep)) {
  		/* Slab management obj is off-slab. */
  		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
@@ -78799,7 +78836,7 @@
  		/*
  		 * If the first object in the slab is leaked (it's allocated
  		 * but no one has a reference to it), we want to make sure
-@@ -2593,14 +2704,10 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
+@@ -2593,14 +2702,10 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
  	slabp->s_mem = objp + colour_off;
  	slabp->nodeid = nodeid;
  	slabp->free = 0;
@@ -78815,7 +78852,7 @@
  static void cache_init_objs(struct kmem_cache *cachep,
  			    struct slab *slabp)
  {
-@@ -2608,7 +2715,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
+@@ -2608,7 +2713,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
  
  	for (i = 0; i < cachep->num; i++) {
  		void *objp = index_to_obj(cachep, slabp, i);
@@ -78824,7 +78861,7 @@
  		/* need to poison the objs? */
  		if (cachep->flags & SLAB_POISON)
  			poison_obj(cachep, objp, POISON_FREE);
-@@ -2666,7 +2773,7 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
+@@ -2666,7 +2771,7 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
  
  	slabp->inuse++;
  	next = slab_bufctl(slabp)[slabp->free];
@@ -78833,7 +78870,7 @@
  	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
  	WARN_ON(slabp->nodeid != nodeid);
  #endif
-@@ -2680,7 +2787,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
+@@ -2680,7 +2785,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
  {
  	unsigned int objnr = obj_to_index(cachep, slabp, objp);
  
@@ -78842,7 +78879,7 @@
  	/* Verify that the slab belongs to the intended node */
  	WARN_ON(slabp->nodeid != nodeid);
  
-@@ -2768,7 +2875,7 @@ static int cache_grow(struct kmem_cache *cachep,
+@@ -2768,7 +2873,7 @@ static int cache_grow(struct kmem_cache *cachep,
  	 * 'nodeid'.
  	 */
  	if (!objp)
@@ -78851,7 +78888,7 @@
  	if (!objp)
  		goto failed;
  
-@@ -2801,7 +2908,7 @@ failed:
+@@ -2801,7 +2906,7 @@ failed:
  	return 0;
  }
  
@@ -78860,7 +78897,7 @@
  
  /*
   * Perform extra freeing checks:
-@@ -3014,12 +3121,12 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
+@@ -3014,12 +3119,12 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
  						gfp_t flags)
  {
  	might_sleep_if(flags & __GFP_WAIT);
@@ -78875,7 +78912,7 @@
  static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
  				gfp_t flags, void *objp, void *caller)
  {
-@@ -3389,11 +3496,16 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
+@@ -3389,11 +3494,16 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
  	cache_alloc_debugcheck_before(cachep, flags);
  	local_irq_save(save_flags);
  	objp = __do_cache_alloc(cachep, flags);
@@ -78893,7 +78930,7 @@
  
  	if (likely(objp))
  		kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
-@@ -3430,6 +3542,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
+@@ -3430,6 +3540,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
  		/* fixup slab chains */
  		if (slabp->inuse == 0) {
  			if (l3->free_objects > l3->free_limit) {
@@ -78901,7 +78938,7 @@
  				l3->free_objects -= cachep->num;
  				/* No need to drop any previously held
  				 * lock here, even if we have a off-slab slab
-@@ -3458,7 +3571,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
+@@ -3458,7 +3569,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
  	int node = numa_node_id();
  
  	batchcount = ac->batchcount;
@@ -78910,7 +78947,7 @@
  	BUG_ON(!batchcount || batchcount > ac->avail);
  #endif
  	check_irq_off();
-@@ -3479,7 +3592,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
+@@ -3479,7 +3590,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
  
  	free_block(cachep, ac->entry, batchcount, node);
  free_done:
@@ -78919,7 +78956,7 @@
  	{
  		int i = 0;
  		struct list_head *p;
-@@ -3516,6 +3629,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
+@@ -3516,6 +3627,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
  
  	kmemcheck_slab_free(cachep, objp, obj_size(cachep));
  
@@ -78929,7 +78966,7 @@
  	/*
  	 * Skip calling cache_free_alien() when the platform is not numa.
  	 * This will avoid cache misses that happen while accessing slabp (which
-@@ -3970,7 +4086,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
+@@ -3970,7 +4084,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
  	if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
  		shared = 8;
  
@@ -78938,7 +78975,7 @@
  	/*
  	 * With debugging enabled, large batchcount lead to excessively long
  	 * periods with disabled local interrupts. Limit the batchcount
-@@ -4037,6 +4153,7 @@ static void cache_reap(struct work_struct *w)
+@@ -4037,6 +4151,7 @@ static void cache_reap(struct work_struct *w)
  		/* Give up. Setup the next iteration. */
  		goto out;
  
@@ -78946,7 +78983,7 @@
  	list_for_each_entry(searchp, &cache_chain, next) {
  		check_irq_on();
  
-@@ -4077,6 +4194,7 @@ next:
+@@ -4077,6 +4192,7 @@ next:
  	check_irq_on();
  	mutex_unlock(&cache_chain_mutex);
  	next_reap_node();
@@ -78954,7 +78991,7 @@
  out:
  	/* Set up the next iteration */
  	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
-@@ -4090,7 +4208,7 @@ static void print_slabinfo_header(struct seq_file *m)
+@@ -4090,7 +4206,7 @@ static void print_slabinfo_header(struct seq_file *m)
  	 * Output format version, so at least we can change it
  	 * without _too_ many complaints.
  	 */
@@ -78963,7 +79000,7 @@
  	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
  #else
  	seq_puts(m, "slabinfo - version: 2.1\n");
-@@ -4099,14 +4217,82 @@ static void print_slabinfo_header(struct seq_file *m)
+@@ -4099,14 +4215,82 @@ static void print_slabinfo_header(struct seq_file *m)
  		 "<objperslab> <pagesperslab>");
  	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
@@ -79048,7 +79085,7 @@
  static void *s_start(struct seq_file *m, loff_t *pos)
  {
  	loff_t n = *pos;
-@@ -4185,19 +4371,20 @@ static int s_show(struct seq_file *m, void *p)
+@@ -4185,19 +4369,20 @@ static int s_show(struct seq_file *m, void *p)
  	if (error)
  		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
  
@@ -79071,7 +79108,7 @@
  		unsigned long errors = cachep->errors;
  		unsigned long max_freeable = cachep->max_freeable;
  		unsigned long node_allocs = cachep->node_allocs;
-@@ -4205,9 +4392,10 @@ static int s_show(struct seq_file *m, void *p)
+@@ -4205,9 +4390,10 @@ static int s_show(struct seq_file *m, void *p)
  		unsigned long overflows = cachep->node_overflow;
  
  		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
@@ -79085,7 +79122,7 @@
  	/* cpu stats */
  	{
 diff --git a/mm/slub.c b/mm/slub.c
-index 4996fc7..fad5f48 100644
+index 4996fc7..22ae4a8 100644
 --- a/mm/slub.c
 +++ b/mm/slub.c
 @@ -29,6 +29,8 @@
@@ -79110,7 +79147,16 @@
  
  #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
  		SLAB_CACHE_DMA | SLAB_NOTRACK)
-@@ -321,6 +325,95 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
+@@ -201,6 +205,8 @@ struct track {
+ 
+ enum track_item { TRACK_ALLOC, TRACK_FREE };
+ 
++static DEFINE_SPINLOCK(cache_chain_lock);
++
+ #ifdef CONFIG_SLUB_DEBUG
+ static int sysfs_slab_add(struct kmem_cache *);
+ static int sysfs_slab_alias(struct kmem_cache *, const char *);
+@@ -321,6 +327,90 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
  	return x.x & OO_MASK;
  }
  
@@ -79198,15 +79244,10 @@
 +}
 +#endif
 +
-+void show_slab_info(void)
-+{
-+	/* FIXME - show it */
-+}
-+
  #ifdef CONFIG_SLUB_DEBUG
  /*
   * Debug settings:
-@@ -1105,6 +1198,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -1105,6 +1195,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  	struct kmem_cache_order_objects oo = s->oo;
  	gfp_t alloc_gfp;
  
@@ -79214,7 +79255,7 @@
  	flags |= s->allocflags;
  
  	/*
-@@ -1149,9 +1243,12 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -1149,9 +1240,12 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
  		1 << oo_order(oo));
  
@@ -79227,7 +79268,7 @@
  static void setup_object(struct kmem_cache *s, struct page *page,
  				void *object)
  {
-@@ -1174,6 +1271,18 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+@@ -1174,6 +1268,18 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
  	if (!page)
  		goto out;
  
@@ -79246,7 +79287,7 @@
  	inc_slabs_node(s, page_to_nid(page), page->objects);
  	page->slab = s;
  	page->flags |= 1 << PG_slab;
-@@ -1225,6 +1334,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
+@@ -1225,6 +1331,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
  
  	__ClearPageSlab(page);
  	reset_page_mapcount(page);
@@ -79260,7 +79301,7 @@
  	if (current->reclaim_state)
  		current->reclaim_state->reclaimed_slab += pages;
  	__free_pages(page, order);
-@@ -1249,6 +1365,8 @@ static void free_slab(struct kmem_cache *s, struct page *page)
+@@ -1249,6 +1362,8 @@ static void free_slab(struct kmem_cache *s, struct page *page)
  		call_rcu(head, rcu_free_slab);
  	} else
  		__free_slab(s, page);
@@ -79269,7 +79310,7 @@
  }
  
  static void discard_slab(struct kmem_cache *s, struct page *page)
-@@ -1733,6 +1851,13 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
+@@ -1733,6 +1848,13 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
  		c->freelist = object[c->offset];
  		stat(c, ALLOC_FASTPATH);
  	}
@@ -79283,7 +79324,7 @@
  	local_irq_restore(flags);
  
  	if (unlikely((gfpflags & __GFP_ZERO) && object))
-@@ -1875,6 +2000,9 @@ static __always_inline void slab_free(struct kmem_cache *s,
+@@ -1875,6 +1997,9 @@ static __always_inline void slab_free(struct kmem_cache *s,
  	c = get_cpu_slab(s, smp_processor_id());
  	kmemcheck_slab_free(s, object, c->objsize);
  	debug_check_no_locks_freed(object, c->objsize);
@@ -79293,7 +79334,7 @@
  	if (!(s->flags & SLAB_DEBUG_OBJECTS))
  		debug_check_no_obj_freed(object, c->objsize);
  	if (likely(page == c->page && c->node >= 0)) {
-@@ -2497,6 +2625,9 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
+@@ -2497,6 +2622,9 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
  #ifdef CONFIG_NUMA
  	s->remote_node_defrag_ratio = 1000;
  #endif
@@ -79303,7 +79344,31 @@
  	if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
  		goto error;
  
-@@ -2653,6 +2784,10 @@ EXPORT_SYMBOL(kmem_cache_destroy);
+@@ -2630,9 +2758,11 @@ static inline int kmem_cache_close(struct kmem_cache *s)
+ void kmem_cache_destroy(struct kmem_cache *s)
+ {
+ 	down_write(&slub_lock);
++	spin_lock(&cache_chain_lock);
+ 	s->refcount--;
+ 	if (!s->refcount) {
+ 		list_del(&s->list);
++		spin_unlock(&cache_chain_lock);
+ 		up_write(&slub_lock);
+ 		if (kmem_cache_close(s)) {
+ 			printk(KERN_ERR "SLUB %s: %s called for cache that "
+@@ -2642,8 +2772,10 @@ void kmem_cache_destroy(struct kmem_cache *s)
+ 		if (s->flags & SLAB_DESTROY_BY_RCU)
+ 			rcu_barrier();
+ 		sysfs_slab_remove(s);
+-	} else
++	} else {
++		spin_unlock(&cache_chain_lock);
+ 		up_write(&slub_lock);
++	}
+ }
+ EXPORT_SYMBOL(kmem_cache_destroy);
+ 
+@@ -2653,6 +2785,10 @@ EXPORT_SYMBOL(kmem_cache_destroy);
  
  struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
  EXPORT_SYMBOL(kmalloc_caches);
@@ -79314,7 +79379,7 @@
  
  static int __init setup_slub_min_order(char *str)
  {
-@@ -2695,6 +2830,11 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
+@@ -2695,6 +2831,11 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
  {
  	unsigned int flags = 0;
  
@@ -79326,7 +79391,27 @@
  	if (gfp_flags & SLUB_DMA)
  		flags = SLAB_CACHE_DMA;
  
-@@ -2843,11 +2983,14 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
+@@ -2706,7 +2847,9 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
+ 								flags, NULL))
+ 		goto panic;
+ 
++	spin_lock(&cache_chain_lock);
+ 	list_add(&s->list, &slab_caches);
++	spin_unlock(&cache_chain_lock);
+ 
+ 	if (sysfs_slab_add(s))
+ 		goto panic;
+@@ -2779,7 +2922,9 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
+ 		goto unlock_out;
+ 	}
+ 
++	spin_lock(&cache_chain_lock);
+ 	list_add(&s->list, &slab_caches);
++	spin_unlock(&cache_chain_lock);
+ 	kmalloc_caches_dma[index] = s;
+ 
+ 	if (slab_state >= SYSFS)
+@@ -2843,11 +2988,14 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
  		index = fls(size - 1);
  
  #ifdef CONFIG_ZONE_DMA
@@ -79343,7 +79428,7 @@
  }
  
  void *__kmalloc(size_t size, gfp_t flags)
-@@ -3187,6 +3330,11 @@ void __init kmem_cache_init(void)
+@@ -3187,6 +3335,11 @@ void __init kmem_cache_init(void)
  	create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
  		sizeof(struct kmem_cache_node), GFP_NOWAIT);
  	kmalloc_caches[0].refcount = -1;
@@ -79355,7 +79440,7 @@
  	caches++;
  
  	hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
-@@ -3199,17 +3347,29 @@ void __init kmem_cache_init(void)
+@@ -3199,17 +3352,29 @@ void __init kmem_cache_init(void)
  	if (KMALLOC_MIN_SIZE <= 32) {
  		create_kmalloc_cache(&kmalloc_caches[1],
  				"kmalloc-96", 96, GFP_NOWAIT);
@@ -79385,7 +79470,7 @@
  		caches++;
  	}
  
-@@ -3255,9 +3415,14 @@ void __init kmem_cache_init(void)
+@@ -3255,9 +3420,14 @@ void __init kmem_cache_init(void)
  	slab_state = UP;
  
  	/* Provide the correct kmalloc names now that the caches are up */
@@ -79401,7 +79486,23 @@
  
  #ifdef CONFIG_SMP
  	register_cpu_notifier(&slab_notifier);
-@@ -4555,6 +4720,8 @@ static char *create_unique_id(struct kmem_cache *s)
+@@ -3383,11 +3553,15 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+ 	if (s) {
+ 		if (kmem_cache_open(s, GFP_KERNEL, name,
+ 				size, align, flags, ctor)) {
++			spin_lock(&cache_chain_lock);
+ 			list_add(&s->list, &slab_caches);
++			spin_unlock(&cache_chain_lock);
+ 			up_write(&slub_lock);
+ 			if (sysfs_slab_add(s)) {
+ 				down_write(&slub_lock);
++				spin_lock(&cache_chain_lock);
+ 				list_del(&s->list);
++				spin_unlock(&cache_chain_lock);
+ 				up_write(&slub_lock);
+ 				kfree(s);
+ 				goto err;
+@@ -4555,6 +4729,8 @@ static char *create_unique_id(struct kmem_cache *s)
  		*p++ = 'a';
  	if (s->flags & SLAB_DEBUG_FREE)
  		*p++ = 'F';
@@ -79410,6 +79511,83 @@
  	if (!(s->flags & SLAB_NOTRACK))
  		*p++ = 't';
  	if (p != name + 1)
+@@ -4707,6 +4883,76 @@ static void print_slabinfo_header(struct seq_file *m)
+ 	seq_putc(m, '\n');
+ }
+ 
++#define SHOW_TOP_SLABS	10
++
++static unsigned long get_cache_size(struct kmem_cache *cache)
++{
++	unsigned long flags;
++	unsigned long slabs;
++	struct kmem_cache_node *n;
++	struct list_head *lh;
++	int cpu, node;
++
++	slabs = 0;
++
++	for_each_online_cpu(cpu)
++		slabs++;
++
++	for_each_online_node(node) {
++		n = get_node(cache, node);
++		if (!n)
++			continue;
++		spin_lock_irqsave(&n->list_lock, flags);
++#ifdef CONFIG_SLUB_DEBUG
++		list_for_each(lh, &n->full)
++			slabs++;
++#endif
++		list_for_each(lh, &n->partial)
++			slabs++;
++		spin_unlock_irqrestore(&n->list_lock, flags);
++	}
++
++	return slabs * (PAGE_SIZE << oo_order(cache->oo));
++}
++
++void show_slab_info(void)
++{
++	int i, j;
++	unsigned long size;
++	struct kmem_cache *ptr;
++	unsigned long sizes[SHOW_TOP_SLABS];
++	struct kmem_cache *top[SHOW_TOP_SLABS];
++
++	memset(top, 0, sizeof(top));
++	memset(sizes, 0, sizeof(sizes));
++
++	printk("Top %d caches:\n", SHOW_TOP_SLABS);
++
++	spin_lock(&cache_chain_lock);
++	list_for_each_entry(ptr, &slab_caches, list) {
++		size = get_cache_size(ptr);
++
++		j = 0;
++		for (i = 1; i < SHOW_TOP_SLABS; i++) {
++			if (sizes[i] < sizes[j])
++				j = i;
++		}
++		if (size > sizes[j]) {
++			sizes[j] = size;
++			top[j] = ptr;
++		}
++	}
++
++	for (i = 0; i < SHOW_TOP_SLABS; i++) {
++		if (top[i])
++			printk("%-21s: size %10lu objsize %10u\n",
++				top[i]->name, sizes[i],
++				top[i]->size);
++	}
++
++	spin_unlock(&cache_chain_lock);
++}
++
+ static void *s_start(struct seq_file *m, loff_t *pos)
+ {
+ 	loff_t n = *pos;
 diff --git a/mm/swap_state.c b/mm/swap_state.c
 index 6d1daeb..8e4805b 100644
 --- a/mm/swap_state.c
@@ -85159,7 +85337,7 @@
 +	(void)set_exec_env(ve);
  }
 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
-index 0fa9f70..ca1c6bf 100644
+index c322f44..2c1435a 100644
 --- a/net/ipv4/udp.c
 +++ b/net/ipv4/udp.c
 @@ -138,6 +138,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,



More information about the Kernel-svn-changes mailing list