[kernel] r18782 - in dists/sid/linux-2.6/debian: . patches/features/all/rt patches/series
Uwe Kleine-König
ukleinek-guest at alioth.debian.org
Sun Mar 4 13:31:37 UTC 2012
Author: ukleinek-guest
Date: Sun Mar 4 13:31:34 2012
New Revision: 18782
Log:
[x86] Update rt featureset to 3.2.9-rt15
Added:
dists/sid/linux-2.6/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/fs-dentry-use-seqlock.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/fs-protect-opencoded-isize-seqcount.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/fs-struct-use-seqlock.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/ia64-vdso-use-seqcount.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/net-u64-stat-protect-seqcount.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-futex-rt-fix-possible-lockup-when-taking-pi_lock-in-proxy-handler.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-lglock-rt-use-non-rt-for_each_cpu-in-rt-code.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-timer-fix-hotplug-for-rt.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-provide-seq-spin-lock.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/time-remove-bogus-comments.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/timekeeping-split-xtime-lock.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/x86-vdso-remove-bogus-locking-in-update_vsyscall_tz.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/x86-vdso-use-seqcount.patch
Deleted:
dists/sid/linux-2.6/debian/patches/features/all/rt/acpi-gpe-use-wait-simple.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/acpi-make-ec-lock-raw-as-well.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/acpi-make-gbl-hardware-lock-raw.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-raw-seqlock.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/timekeeping-covert-xtimelock.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/wait-simple-version.patch
Modified:
dists/sid/linux-2.6/debian/changelog
dists/sid/linux-2.6/debian/patches/features/all/rt/arm-enable-interrupts-in-signal-code.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/filemap-fix-up.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/latency-hist.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/localversion.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/net-wireless-warn-nort.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-re-_3_0-rt4.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/sched-delay-put-task.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/sched-migrate-disable.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/sched-no-work-when-pi-blocked.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-remove-unused-functions.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-use-seqcount.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/series
dists/sid/linux-2.6/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/softirq-local-lock.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/stomp-machine-mark-stomper-thread.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch
dists/sid/linux-2.6/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch
dists/sid/linux-2.6/debian/patches/series/base-extra
Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/changelog Sun Mar 4 13:31:34 2012 (r18782)
@@ -37,6 +37,9 @@
* gspca: Enable USB_GSPCA_TOPRO as module
* dvb-usb: Enable DVB_USB_PCTV452E, DVB_USB_MXL111SF as modules
+ [ Uwe Kleine-König ]
+ * [x86] Update rt featureset to 3.2.9-rt15
+
-- Bastian Blank <waldi at debian.org> Thu, 01 Mar 2012 11:47:17 +0100
linux-2.6 (3.2.7-1) unstable; urgency=low
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/arm-enable-interrupts-in-signal-code.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/arm-enable-interrupts-in-signal-code.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/arm-enable-interrupts-in-signal-code.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -11,7 +11,7 @@
===================================================================
--- linux-3.2.orig/arch/arm/kernel/signal.c
+++ linux-3.2/arch/arm/kernel/signal.c
-@@ -673,6 +673,9 @@ static void do_signal(struct pt_regs *re
+@@ -672,6 +672,9 @@ static void do_signal(struct pt_regs *re
if (!user_mode(regs))
return;
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/cond-resched-lock-rt-tweak.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -11,7 +11,7 @@
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
-@@ -2595,7 +2595,7 @@ extern int _cond_resched(void);
+@@ -2599,7 +2599,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/cond-resched-softirq-rt.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/cond-resched-softirq-rt.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/cond-resched-softirq-rt.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -12,7 +12,7 @@
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
-@@ -2598,12 +2598,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -2602,12 +2602,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,115 @@
+Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Fri, 02 Mar 2012 10:36:57 -0500
+
+Tasks can block on hotplug.lock in pin_current_cpu(), but their state
+might be != RUNNING. So the mutex wakeup will set the state
+unconditionally to RUNNING. That might cause spurious unexpected
+wakeups. We could provide a state preserving mutex_lock() function,
+but this is semantically backwards. So instead we convert the
+hotplug.lock() to a spinlock for RT, which has the state preserving
+semantics already.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index fa40834..c25b5ff 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -46,7 +46,12 @@ static int cpu_hotplug_disabled;
+
+ static struct {
+ struct task_struct *active_writer;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /* Makes the lock keep the task's state */
++ spinlock_t lock;
++#else
+ struct mutex lock; /* Synchronizes accesses to refcount, */
++#endif
+ /*
+ * Also blocks the new readers during
+ * an ongoing cpu hotplug operation.
+@@ -58,6 +63,14 @@ static struct {
+ .refcount = 0,
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define hotplug_lock() spin_lock(&cpu_hotplug.lock)
++# define hotplug_unlock() spin_unlock(&cpu_hotplug.lock)
++#else
++# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
++# define hotplug_lock() mutex_unlock(&cpu_hotplug.lock)
++#endif
++
+ struct hotplug_pcp {
+ struct task_struct *unplug;
+ int refcount;
+@@ -87,8 +100,8 @@ retry:
+ return;
+ }
+ preempt_enable();
+- mutex_lock(&cpu_hotplug.lock);
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_lock();
++ hotplug_unlock();
+ preempt_disable();
+ goto retry;
+ }
+@@ -161,9 +174,9 @@ void get_online_cpus(void)
+ might_sleep();
+ if (cpu_hotplug.active_writer == current)
+ return;
+- mutex_lock(&cpu_hotplug.lock);
++ hotplug_lock();
+ cpu_hotplug.refcount++;
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+
+ }
+ EXPORT_SYMBOL_GPL(get_online_cpus);
+@@ -172,10 +185,10 @@ void put_online_cpus(void)
+ {
+ if (cpu_hotplug.active_writer == current)
+ return;
+- mutex_lock(&cpu_hotplug.lock);
++ hotplug_lock();
+ if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
+ wake_up_process(cpu_hotplug.active_writer);
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+
+ }
+ EXPORT_SYMBOL_GPL(put_online_cpus);
+@@ -207,11 +220,11 @@ static void cpu_hotplug_begin(void)
+ cpu_hotplug.active_writer = current;
+
+ for (;;) {
+- mutex_lock(&cpu_hotplug.lock);
++ hotplug_lock();
+ if (likely(!cpu_hotplug.refcount))
+ break;
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+ schedule();
+ }
+ }
+@@ -219,7 +232,7 @@ static void cpu_hotplug_begin(void)
+ static void cpu_hotplug_done(void)
+ {
+ cpu_hotplug.active_writer = NULL;
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+ }
+
+ #else /* #if CONFIG_HOTPLUG_CPU */
+
+
+
+
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/epoll-use-get-cpu-light.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -11,7 +11,7 @@
===================================================================
--- linux-3.2.orig/fs/eventpoll.c
+++ linux-3.2/fs/eventpoll.c
-@@ -438,12 +438,12 @@ static int ep_poll_wakeup_proc(void *pri
+@@ -464,12 +464,12 @@ static int ep_poll_wakeup_proc(void *pri
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
@@ -25,4 +25,4 @@
+ put_cpu_light();
}
- /*
+ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/filemap-fix-up.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/filemap-fix-up.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/filemap-fix-up.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -13,7 +13,7 @@
===================================================================
--- linux-3.2.orig/mm/filemap.c
+++ linux-3.2/mm/filemap.c
-@@ -2058,7 +2058,7 @@ size_t iov_iter_copy_from_user_atomic(st
+@@ -2044,7 +2044,7 @@ size_t iov_iter_copy_from_user_atomic(st
char *kaddr;
size_t copied;
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -18,10 +18,14 @@
Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
-Index: linux-rt.git/arch/x86/kernel/traps.c
+---
+ arch/x86/kernel/traps.c | 32 +++++++++++++++++++++++---------
+ 1 file changed, 23 insertions(+), 9 deletions(-)
+
+Index: linux-3.2/arch/x86/kernel/traps.c
===================================================================
---- linux-rt.git.orig/arch/x86/kernel/traps.c
-+++ linux-rt.git/arch/x86/kernel/traps.c
+--- linux-3.2.orig/arch/x86/kernel/traps.c
++++ linux-3.2/arch/x86/kernel/traps.c
@@ -87,9 +87,21 @@ static inline void conditional_sti(struc
local_irq_enable();
}
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/fs-dentry-use-seqlock.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/fs-dentry-use-seqlock.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,3033 @@
+Subject: fs: dentry use seqlock
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 27 Feb 2012 18:08:46 +0100
+
+Replace the open coded seqlock with a real seqlock, so RT can handle
+it.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+
+---
+ arch/powerpc/platforms/cell/spufs/inode.c | 6
+ drivers/infiniband/hw/ipath/ipath_fs.c | 6
+ drivers/infiniband/hw/qib/qib_fs.c | 6
+ drivers/usb/core/inode.c | 12 -
+ fs/9p/vfs_dir.c | 4
+ fs/afs/dir.c | 4
+ fs/autofs4/autofs_i.h | 24 +-
+ fs/autofs4/expire.c | 44 ++---
+ fs/autofs4/root.c | 38 ++--
+ fs/btrfs/export.c | 4
+ fs/btrfs/inode.c | 4
+ fs/ceph/caps.c | 8
+ fs/ceph/debugfs.c | 8
+ fs/ceph/dir.c | 30 +--
+ fs/ceph/export.c | 4
+ fs/ceph/inode.c | 20 +-
+ fs/ceph/mds_client.c | 18 +-
+ fs/cifs/dir.c | 6
+ fs/coda/cache.c | 4
+ fs/configfs/configfs_internal.h | 4
+ fs/configfs/inode.c | 6
+ fs/dcache.c | 253 ++++++++++++++----------------
+ fs/dcookies.c | 8
+ fs/exportfs/expfs.c | 12 -
+ fs/fat/inode.c | 4
+ fs/fat/namei_vfat.c | 4
+ fs/fs-writeback.c | 4
+ fs/fuse/inode.c | 4
+ fs/gfs2/export.c | 4
+ fs/isofs/export.c | 4
+ fs/libfs.c | 36 ++--
+ fs/namei.c | 42 ++--
+ fs/namespace.c | 8
+ fs/ncpfs/dir.c | 6
+ fs/ncpfs/ncplib_kernel.h | 8
+ fs/nfs/dir.c | 6
+ fs/nfs/getroot.c | 12 -
+ fs/nfs/namespace.c | 16 -
+ fs/nfs/unlink.c | 20 +-
+ fs/nilfs2/namei.c | 4
+ fs/notify/fsnotify.c | 8
+ fs/notify/vfsmount_mark.c | 24 +-
+ fs/ocfs2/dcache.c | 6
+ fs/ocfs2/export.c | 4
+ fs/reiserfs/inode.c | 4
+ fs/udf/namei.c | 4
+ fs/xfs/xfs_export.c | 8
+ include/linux/dcache.h | 15 -
+ include/linux/fs.h | 4
+ include/linux/fsnotify_backend.h | 6
+ kernel/cgroup.c | 22 +-
+ net/sunrpc/rpc_pipe.c | 6
+ security/selinux/selinuxfs.c | 14 -
+ 53 files changed, 418 insertions(+), 422 deletions(-)
+
+Index: linux-3.2/arch/powerpc/platforms/cell/spufs/inode.c
+===================================================================
+--- linux-3.2.orig/arch/powerpc/platforms/cell/spufs/inode.c
++++ linux-3.2/arch/powerpc/platforms/cell/spufs/inode.c
+@@ -165,18 +165,18 @@ static void spufs_prune_dir(struct dentr
+
+ mutex_lock(&dir->d_inode->i_mutex);
+ list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (!(d_unhashed(dentry)) && dentry->d_inode) {
+ dget_dlock(dentry);
+ __d_drop(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ simple_unlink(dir->d_inode, dentry);
+ /* XXX: what was dcache_lock protecting here? Other
+ * filesystems (IB, configfs) release dcache_lock
+ * before unlink */
+ dput(dentry);
+ } else {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ }
+ shrink_dcache_parent(dir);
+Index: linux-3.2/drivers/infiniband/hw/ipath/ipath_fs.c
+===================================================================
+--- linux-3.2.orig/drivers/infiniband/hw/ipath/ipath_fs.c
++++ linux-3.2/drivers/infiniband/hw/ipath/ipath_fs.c
+@@ -277,14 +277,14 @@ static int remove_file(struct dentry *pa
+ goto bail;
+ }
+
+- spin_lock(&tmp->d_lock);
++ seq_spin_lock(&tmp->d_lock);
+ if (!(d_unhashed(tmp) && tmp->d_inode)) {
+ dget_dlock(tmp);
+ __d_drop(tmp);
+- spin_unlock(&tmp->d_lock);
++ seq_spin_unlock(&tmp->d_lock);
+ simple_unlink(parent->d_inode, tmp);
+ } else
+- spin_unlock(&tmp->d_lock);
++ seq_spin_unlock(&tmp->d_lock);
+
+ ret = 0;
+ bail:
+Index: linux-3.2/drivers/infiniband/hw/qib/qib_fs.c
+===================================================================
+--- linux-3.2.orig/drivers/infiniband/hw/qib/qib_fs.c
++++ linux-3.2/drivers/infiniband/hw/qib/qib_fs.c
+@@ -453,14 +453,14 @@ static int remove_file(struct dentry *pa
+ goto bail;
+ }
+
+- spin_lock(&tmp->d_lock);
++ seq_spin_lock(&tmp->d_lock);
+ if (!(d_unhashed(tmp) && tmp->d_inode)) {
+ dget_dlock(tmp);
+ __d_drop(tmp);
+- spin_unlock(&tmp->d_lock);
++ seq_spin_unlock(&tmp->d_lock);
+ simple_unlink(parent->d_inode, tmp);
+ } else {
+- spin_unlock(&tmp->d_lock);
++ seq_spin_unlock(&tmp->d_lock);
+ }
+
+ ret = 0;
+Index: linux-3.2/drivers/usb/core/inode.c
+===================================================================
+--- linux-3.2.orig/drivers/usb/core/inode.c
++++ linux-3.2/drivers/usb/core/inode.c
+@@ -343,19 +343,19 @@ static int usbfs_empty (struct dentry *d
+ {
+ struct list_head *list;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ list_for_each(list, &dentry->d_subdirs) {
+ struct dentry *de = list_entry(list, struct dentry, d_u.d_child);
+
+- spin_lock_nested(&de->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&de->d_lock, DENTRY_D_LOCK_NESTED);
+ if (usbfs_positive(de)) {
+- spin_unlock(&de->d_lock);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&de->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return 0;
+ }
+- spin_unlock(&de->d_lock);
++ seq_spin_unlock(&de->d_lock);
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return 1;
+ }
+
+Index: linux-3.2/fs/9p/vfs_dir.c
+===================================================================
+--- linux-3.2.orig/fs/9p/vfs_dir.c
++++ linux-3.2/fs/9p/vfs_dir.c
+@@ -107,7 +107,7 @@ static int v9fs_alloc_rdir_buf(struct fi
+ err = -ENOMEM;
+ goto exit;
+ }
+- spin_lock(&filp->f_dentry->d_lock);
++ seq_spin_lock(&filp->f_dentry->d_lock);
+ if (!fid->rdir) {
+ rdir->buf = (uint8_t *)rdir + sizeof(struct p9_rdir);
+ mutex_init(&rdir->mutex);
+@@ -115,7 +115,7 @@ static int v9fs_alloc_rdir_buf(struct fi
+ fid->rdir = (void *) rdir;
+ rdir = NULL;
+ }
+- spin_unlock(&filp->f_dentry->d_lock);
++ seq_spin_unlock(&filp->f_dentry->d_lock);
+ kfree(rdir);
+ }
+ exit:
+Index: linux-3.2/fs/afs/dir.c
+===================================================================
+--- linux-3.2.orig/fs/afs/dir.c
++++ linux-3.2/fs/afs/dir.c
+@@ -705,9 +705,9 @@ out_skip:
+
+ /* the dirent, if it exists, now points to a different vnode */
+ not_found:
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_NFSFS_RENAMED;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ out_bad:
+ if (dentry->d_inode) {
+Index: linux-3.2/fs/autofs4/autofs_i.h
+===================================================================
+--- linux-3.2.orig/fs/autofs4/autofs_i.h
++++ linux-3.2/fs/autofs4/autofs_i.h
+@@ -197,9 +197,9 @@ static inline void __managed_dentry_set_
+
+ static inline void managed_dentry_set_automount(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __managed_dentry_set_automount(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ static inline void __managed_dentry_clear_automount(struct dentry *dentry)
+@@ -209,9 +209,9 @@ static inline void __managed_dentry_clea
+
+ static inline void managed_dentry_clear_automount(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __managed_dentry_clear_automount(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ static inline void __managed_dentry_set_transit(struct dentry *dentry)
+@@ -221,9 +221,9 @@ static inline void __managed_dentry_set_
+
+ static inline void managed_dentry_set_transit(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __managed_dentry_set_transit(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ static inline void __managed_dentry_clear_transit(struct dentry *dentry)
+@@ -233,9 +233,9 @@ static inline void __managed_dentry_clea
+
+ static inline void managed_dentry_clear_transit(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __managed_dentry_clear_transit(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ static inline void __managed_dentry_set_managed(struct dentry *dentry)
+@@ -245,9 +245,9 @@ static inline void __managed_dentry_set_
+
+ static inline void managed_dentry_set_managed(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __managed_dentry_set_managed(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ static inline void __managed_dentry_clear_managed(struct dentry *dentry)
+@@ -257,9 +257,9 @@ static inline void __managed_dentry_clea
+
+ static inline void managed_dentry_clear_managed(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __managed_dentry_clear_managed(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ /* Initializing function */
+Index: linux-3.2/fs/autofs4/expire.c
+===================================================================
+--- linux-3.2.orig/fs/autofs4/expire.c
++++ linux-3.2/fs/autofs4/expire.c
+@@ -99,7 +99,7 @@ static struct dentry *get_next_positive_
+ spin_lock(&sbi->lookup_lock);
+
+ if (prev == NULL) {
+- spin_lock(&root->d_lock);
++ seq_spin_lock(&root->d_lock);
+ prev = dget_dlock(root);
+ next = prev->d_subdirs.next;
+ p = prev;
+@@ -107,12 +107,12 @@ static struct dentry *get_next_positive_
+ }
+
+ p = prev;
+- spin_lock(&p->d_lock);
++ seq_spin_lock(&p->d_lock);
+ again:
+ next = p->d_u.d_child.next;
+ start:
+ if (next == &root->d_subdirs) {
+- spin_unlock(&p->d_lock);
++ seq_spin_unlock(&p->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ dput(prev);
+ return NULL;
+@@ -120,16 +120,16 @@ start:
+
+ q = list_entry(next, struct dentry, d_u.d_child);
+
+- spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
+ /* Negative dentry - try next */
+ if (!simple_positive(q)) {
+- spin_unlock(&p->d_lock);
++ seq_spin_unlock(&p->d_lock);
+ p = q;
+ goto again;
+ }
+ dget_dlock(q);
+- spin_unlock(&q->d_lock);
+- spin_unlock(&p->d_lock);
++ seq_spin_unlock(&q->d_lock);
++ seq_spin_unlock(&p->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+
+ dput(prev);
+@@ -153,7 +153,7 @@ static struct dentry *get_next_positive_
+ spin_lock(&sbi->lookup_lock);
+ relock:
+ p = prev;
+- spin_lock(&p->d_lock);
++ seq_spin_lock(&p->d_lock);
+ again:
+ next = p->d_subdirs.next;
+ if (next == &p->d_subdirs) {
+@@ -161,19 +161,19 @@ again:
+ struct dentry *parent;
+
+ if (p == root) {
+- spin_unlock(&p->d_lock);
++ seq_spin_unlock(&p->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ dput(prev);
+ return NULL;
+ }
+
+ parent = p->d_parent;
+- if (!spin_trylock(&parent->d_lock)) {
+- spin_unlock(&p->d_lock);
++ if (!seq_spin_trylock(&parent->d_lock)) {
++ seq_spin_unlock(&p->d_lock);
+ cpu_relax();
+ goto relock;
+ }
+- spin_unlock(&p->d_lock);
++ seq_spin_unlock(&p->d_lock);
+ next = p->d_u.d_child.next;
+ p = parent;
+ if (next != &parent->d_subdirs)
+@@ -182,16 +182,16 @@ again:
+ }
+ ret = list_entry(next, struct dentry, d_u.d_child);
+
+- spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
+ /* Negative dentry - try next */
+ if (!simple_positive(ret)) {
+- spin_unlock(&p->d_lock);
++ seq_spin_unlock(&p->d_lock);
+ p = ret;
+ goto again;
+ }
+ dget_dlock(ret);
+- spin_unlock(&ret->d_lock);
+- spin_unlock(&p->d_lock);
++ seq_spin_unlock(&ret->d_lock);
++ seq_spin_unlock(&p->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+
+ dput(prev);
+@@ -462,11 +462,11 @@ found:
+ init_completion(&ino->expire_complete);
+ spin_unlock(&sbi->fs_lock);
+ spin_lock(&sbi->lookup_lock);
+- spin_lock(&expired->d_parent->d_lock);
+- spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock(&expired->d_parent->d_lock);
++ seq_spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
+ list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
+- spin_unlock(&expired->d_lock);
+- spin_unlock(&expired->d_parent->d_lock);
++ seq_spin_unlock(&expired->d_lock);
++ seq_spin_unlock(&expired->d_parent->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ return expired;
+ }
+@@ -556,7 +556,7 @@ int autofs4_do_expire_multi(struct super
+
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_EXPIRING;
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (!ret) {
+ if ((IS_ROOT(dentry) ||
+ (autofs_type_indirect(sbi->type) &&
+@@ -564,7 +564,7 @@ int autofs4_do_expire_multi(struct super
+ !(dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
+ __managed_dentry_set_automount(dentry);
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ complete_all(&ino->expire_complete);
+ spin_unlock(&sbi->fs_lock);
+ dput(dentry);
+Index: linux-3.2/fs/autofs4/root.c
+===================================================================
+--- linux-3.2.orig/fs/autofs4/root.c
++++ linux-3.2/fs/autofs4/root.c
+@@ -124,13 +124,13 @@ static int autofs4_dir_open(struct inode
+ * it.
+ */
+ spin_lock(&sbi->lookup_lock);
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ return -ENOENT;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+
+ out:
+@@ -179,7 +179,7 @@ static struct dentry *autofs4_lookup_act
+ ino = list_entry(p, struct autofs_info, active);
+ active = ino->dentry;
+
+- spin_lock(&active->d_lock);
++ seq_spin_lock(&active->d_lock);
+
+ /* Already gone? */
+ if (active->d_count == 0)
+@@ -199,12 +199,12 @@ static struct dentry *autofs4_lookup_act
+
+ if (d_unhashed(active)) {
+ dget_dlock(active);
+- spin_unlock(&active->d_lock);
++ seq_spin_unlock(&active->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ return active;
+ }
+ next:
+- spin_unlock(&active->d_lock);
++ seq_spin_unlock(&active->d_lock);
+ }
+ spin_unlock(&sbi->lookup_lock);
+
+@@ -231,7 +231,7 @@ static struct dentry *autofs4_lookup_exp
+ ino = list_entry(p, struct autofs_info, expiring);
+ expiring = ino->dentry;
+
+- spin_lock(&expiring->d_lock);
++ seq_spin_lock(&expiring->d_lock);
+
+ /* Bad luck, we've already been dentry_iput */
+ if (!expiring->d_inode)
+@@ -251,12 +251,12 @@ static struct dentry *autofs4_lookup_exp
+
+ if (d_unhashed(expiring)) {
+ dget_dlock(expiring);
+- spin_unlock(&expiring->d_lock);
++ seq_spin_unlock(&expiring->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ return expiring;
+ }
+ next:
+- spin_unlock(&expiring->d_lock);
++ seq_spin_unlock(&expiring->d_lock);
+ }
+ spin_unlock(&sbi->lookup_lock);
+
+@@ -382,12 +382,12 @@ static struct vfsmount *autofs4_d_automo
+ if (have_submounts(dentry))
+ goto done;
+ } else {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (!list_empty(&dentry->d_subdirs)) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ goto done;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ ino->flags |= AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+@@ -410,12 +410,12 @@ done:
+ * an actual mount so ->d_automount() won't be called during
+ * the follow.
+ */
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if ((!d_mountpoint(dentry) &&
+ !list_empty(&dentry->d_subdirs)) ||
+ (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
+ __managed_dentry_clear_automount(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ spin_unlock(&sbi->fs_lock);
+
+@@ -597,9 +597,9 @@ static int autofs4_dir_unlink(struct ino
+
+ spin_lock(&sbi->lookup_lock);
+ __autofs4_add_expiring(dentry);
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __d_drop(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+
+ return 0;
+@@ -670,15 +670,15 @@ static int autofs4_dir_rmdir(struct inod
+ return -EACCES;
+
+ spin_lock(&sbi->lookup_lock);
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (!list_empty(&dentry->d_subdirs)) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+ return -ENOTEMPTY;
+ }
+ __autofs4_add_expiring(dentry);
+ __d_drop(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ spin_unlock(&sbi->lookup_lock);
+
+ if (sbi->version < 5)
+Index: linux-3.2/fs/btrfs/export.c
+===================================================================
+--- linux-3.2.orig/fs/btrfs/export.c
++++ linux-3.2/fs/btrfs/export.c
+@@ -40,14 +40,14 @@ static int btrfs_encode_fh(struct dentry
+ struct inode *parent;
+ u64 parent_root_id;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+
+ parent = dentry->d_parent->d_inode;
+ fid->parent_objectid = BTRFS_I(parent)->location.objectid;
+ fid->parent_gen = parent->i_generation;
+ parent_root_id = BTRFS_I(parent)->root->objectid;
+
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ if (parent_root_id != fid->root_objectid) {
+ fid->parent_root_objectid = parent_root_id;
+Index: linux-3.2/fs/btrfs/inode.c
+===================================================================
+--- linux-3.2.orig/fs/btrfs/inode.c
++++ linux-3.2/fs/btrfs/inode.c
+@@ -4002,9 +4002,9 @@ static struct dentry *btrfs_lookup(struc
+
+ ret = d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry);
+ if (unlikely(d_need_lookup(dentry))) {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ return ret;
+ }
+Index: linux-3.2/fs/ceph/caps.c
+===================================================================
+--- linux-3.2.orig/fs/ceph/caps.c
++++ linux-3.2/fs/ceph/caps.c
+@@ -3066,14 +3066,14 @@ int ceph_encode_dentry_release(void **p,
+ * doesn't have to be perfect; the mds will revoke anything we don't
+ * release.
+ */
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (di->lease_session && di->lease_session->s_mds == mds)
+ force = 1;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (ret && di->lease_session && di->lease_session->s_mds == mds) {
+ dout("encode_dentry_release %p mds%d seq %d\n",
+ dentry, mds, (int)di->lease_seq);
+@@ -3083,6 +3083,6 @@ int ceph_encode_dentry_release(void **p,
+ rel->dname_seq = cpu_to_le32(di->lease_seq);
+ __ceph_mdsc_drop_dentry_lease(dentry);
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return ret;
+ }
+Index: linux-3.2/fs/ceph/debugfs.c
+===================================================================
+--- linux-3.2.orig/fs/ceph/debugfs.c
++++ linux-3.2/fs/ceph/debugfs.c
+@@ -82,13 +82,13 @@ static int mdsc_show(struct seq_file *s,
+ &pathbase, 0);
+ if (IS_ERR(path))
+ path = NULL;
+- spin_lock(&req->r_dentry->d_lock);
++ seq_spin_lock(&req->r_dentry->d_lock);
+ seq_printf(s, " #%llx/%.*s (%s)",
+ ceph_ino(req->r_dentry->d_parent->d_inode),
+ req->r_dentry->d_name.len,
+ req->r_dentry->d_name.name,
+ path ? path : "");
+- spin_unlock(&req->r_dentry->d_lock);
++ seq_spin_unlock(&req->r_dentry->d_lock);
+ kfree(path);
+ } else if (req->r_path1) {
+ seq_printf(s, " #%llx/%s", req->r_ino1.ino,
+@@ -100,13 +100,13 @@ static int mdsc_show(struct seq_file *s,
+ &pathbase, 0);
+ if (IS_ERR(path))
+ path = NULL;
+- spin_lock(&req->r_old_dentry->d_lock);
++ seq_spin_lock(&req->r_old_dentry->d_lock);
+ seq_printf(s, " #%llx/%.*s (%s)",
+ ceph_ino(req->r_old_dentry_dir),
+ req->r_old_dentry->d_name.len,
+ req->r_old_dentry->d_name.name,
+ path ? path : "");
+- spin_unlock(&req->r_old_dentry->d_lock);
++ seq_spin_unlock(&req->r_old_dentry->d_lock);
+ kfree(path);
+ } else if (req->r_path2) {
+ if (req->r_ino2.ino)
+Index: linux-3.2/fs/ceph/dir.c
+===================================================================
+--- linux-3.2.orig/fs/ceph/dir.c
++++ linux-3.2/fs/ceph/dir.c
+@@ -44,7 +44,7 @@ int ceph_init_dentry(struct dentry *dent
+ if (!di)
+ return -ENOMEM; /* oh well */
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry->d_fsdata) {
+ /* lost a race */
+ kmem_cache_free(ceph_dentry_cachep, di);
+@@ -67,7 +67,7 @@ int ceph_init_dentry(struct dentry *dent
+ dentry->d_fsdata = di;
+ ceph_dentry_lru_add(dentry);
+ out_unlock:
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return 0;
+ }
+
+@@ -78,12 +78,12 @@ struct inode *ceph_get_dentry_parent_ino
+ if (!dentry)
+ return NULL;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry->d_parent) {
+ inode = dentry->d_parent->d_inode;
+ ihold(inode);
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return inode;
+ }
+
+@@ -130,7 +130,7 @@ static int __dcache_readdir(struct file
+ dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
+ last);
+
+- spin_lock(&parent->d_lock);
++ seq_spin_lock(&parent->d_lock);
+
+ /* start at beginning? */
+ if (filp->f_pos == 2 || last == NULL ||
+@@ -154,7 +154,7 @@ more:
+ fi->flags |= CEPH_F_ATEND;
+ goto out_unlock;
+ }
+- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ if (!d_unhashed(dentry) && dentry->d_inode &&
+ ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
+ ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
+@@ -164,15 +164,15 @@ more:
+ dentry->d_name.len, dentry->d_name.name, di->offset,
+ filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
+ !dentry->d_inode ? " null" : "");
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ p = p->prev;
+ dentry = list_entry(p, struct dentry, d_u.d_child);
+ di = ceph_dentry(dentry);
+ }
+
+ dget_dlock(dentry);
+- spin_unlock(&dentry->d_lock);
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+
+ dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
+ dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
+@@ -205,12 +205,12 @@ more:
+ goto out;
+ }
+
+- spin_lock(&parent->d_lock);
++ seq_spin_lock(&parent->d_lock);
+ p = p->prev; /* advance to next dentry */
+ goto more;
+
+ out_unlock:
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ out:
+ if (last)
+ dput(last);
+@@ -950,10 +950,10 @@ static int ceph_rename(struct inode *old
+ */
+ void ceph_invalidate_dentry_lease(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ dentry->d_time = jiffies;
+ ceph_dentry(dentry)->lease_shared_gen = 0;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ /*
+@@ -971,7 +971,7 @@ static int dentry_lease_is_valid(struct
+ struct inode *dir = NULL;
+ u32 seq = 0;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ di = ceph_dentry(dentry);
+ if (di && di->lease_session) {
+ s = di->lease_session;
+@@ -995,7 +995,7 @@ static int dentry_lease_is_valid(struct
+ }
+ }
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ if (session) {
+ ceph_mdsc_lease_send_msg(session, dir, dentry,
+Index: linux-3.2/fs/ceph/export.c
+===================================================================
+--- linux-3.2.orig/fs/ceph/export.c
++++ linux-3.2/fs/ceph/export.c
+@@ -55,9 +55,9 @@ static int ceph_encode_fh(struct dentry
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+ return -EINVAL;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ parent = dget(dentry->d_parent);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ if (*max_len >= connected_handle_length) {
+ dout("encode_fh %p connectable\n", dentry);
+Index: linux-3.2/fs/ceph/inode.c
+===================================================================
+--- linux-3.2.orig/fs/ceph/inode.c
++++ linux-3.2/fs/ceph/inode.c
+@@ -809,7 +809,7 @@ static void update_dentry_lease(struct d
+ if (dentry->d_op != &ceph_dentry_ops)
+ return;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
+ dentry, duration, ttl);
+
+@@ -837,7 +837,7 @@ static void update_dentry_lease(struct d
+ di->lease_renew_from = 0;
+ dentry->d_time = ttl;
+ out_unlock:
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return;
+ }
+
+@@ -866,13 +866,13 @@ static void ceph_set_dentry_offset(struc
+ di->offset = ceph_inode(inode)->i_max_offset++;
+ spin_unlock(&ci->i_ceph_lock);
+
+- spin_lock(&dir->d_lock);
+- spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock(&dir->d_lock);
++ seq_spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
+ list_move(&dn->d_u.d_child, &dir->d_subdirs);
+ dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
+ dn->d_u.d_child.prev, dn->d_u.d_child.next);
+- spin_unlock(&dn->d_lock);
+- spin_unlock(&dir->d_lock);
++ seq_spin_unlock(&dn->d_lock);
++ seq_spin_unlock(&dir->d_lock);
+ }
+
+ /*
+@@ -1254,11 +1254,11 @@ retry_lookup:
+ goto retry_lookup;
+ } else {
+ /* reorder parent's d_subdirs */
+- spin_lock(&parent->d_lock);
+- spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock(&parent->d_lock);
++ seq_spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
+ list_move(&dn->d_u.d_child, &parent->d_subdirs);
+- spin_unlock(&dn->d_lock);
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&dn->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ }
+
+ di = dn->d_fsdata;
+Index: linux-3.2/fs/ceph/mds_client.c
+===================================================================
+--- linux-3.2.orig/fs/ceph/mds_client.c
++++ linux-3.2/fs/ceph/mds_client.c
+@@ -1488,25 +1488,25 @@ retry:
+ for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
+ struct inode *inode;
+
+- spin_lock(&temp->d_lock);
++ seq_spin_lock(&temp->d_lock);
+ inode = temp->d_inode;
+ if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
+ dout("build_path path+%d: %p SNAPDIR\n",
+ pos, temp);
+ } else if (stop_on_nosnap && inode &&
+ ceph_snap(inode) == CEPH_NOSNAP) {
+- spin_unlock(&temp->d_lock);
++ seq_spin_unlock(&temp->d_lock);
+ break;
+ } else {
+ pos -= temp->d_name.len;
+ if (pos < 0) {
+- spin_unlock(&temp->d_lock);
++ seq_spin_unlock(&temp->d_lock);
+ break;
+ }
+ strncpy(path + pos, temp->d_name.name,
+ temp->d_name.len);
+ }
+- spin_unlock(&temp->d_lock);
++ seq_spin_unlock(&temp->d_lock);
+ if (pos)
+ path[--pos] = '/';
+ temp = temp->d_parent;
+@@ -2768,7 +2768,7 @@ static void handle_lease(struct ceph_mds
+ if (!dentry)
+ goto release;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ di = ceph_dentry(dentry);
+ switch (h->action) {
+ case CEPH_MDS_LEASE_REVOKE:
+@@ -2796,7 +2796,7 @@ static void handle_lease(struct ceph_mds
+ }
+ break;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ dput(dentry);
+
+ if (!release)
+@@ -2869,7 +2869,7 @@ void ceph_mdsc_lease_release(struct ceph
+ BUG_ON(dentry == NULL);
+
+ /* is dentry lease valid? */
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ di = ceph_dentry(dentry);
+ if (!di || !di->lease_session ||
+ di->lease_session->s_mds < 0 ||
+@@ -2878,7 +2878,7 @@ void ceph_mdsc_lease_release(struct ceph
+ dout("lease_release inode %p dentry %p -- "
+ "no lease\n",
+ inode, dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return;
+ }
+
+@@ -2886,7 +2886,7 @@ void ceph_mdsc_lease_release(struct ceph
+ session = ceph_get_mds_session(di->lease_session);
+ seq = di->lease_seq;
+ __ceph_mdsc_drop_dentry_lease(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ dout("lease_release inode %p dentry %p to mds%d\n",
+ inode, dentry, session->s_mds);
+Index: linux-3.2/fs/cifs/dir.c
+===================================================================
+--- linux-3.2.orig/fs/cifs/dir.c
++++ linux-3.2/fs/cifs/dir.c
+@@ -83,10 +83,10 @@ cifs_bp_rename_retry:
+ full_path[namelen] = 0; /* trailing null */
+ rcu_read_lock();
+ for (temp = direntry; !IS_ROOT(temp);) {
+- spin_lock(&temp->d_lock);
++ seq_spin_lock(&temp->d_lock);
+ namelen -= 1 + temp->d_name.len;
+ if (namelen < 0) {
+- spin_unlock(&temp->d_lock);
++ seq_spin_unlock(&temp->d_lock);
+ break;
+ } else {
+ full_path[namelen] = dirsep;
+@@ -94,7 +94,7 @@ cifs_bp_rename_retry:
+ temp->d_name.len);
+ cFYI(0, "name: %s", full_path + namelen);
+ }
+- spin_unlock(&temp->d_lock);
++ seq_spin_unlock(&temp->d_lock);
+ temp = temp->d_parent;
+ if (temp == NULL) {
+ cERROR(1, "corrupt dentry");
+Index: linux-3.2/fs/coda/cache.c
+===================================================================
+--- linux-3.2.orig/fs/coda/cache.c
++++ linux-3.2/fs/coda/cache.c
+@@ -92,7 +92,7 @@ static void coda_flag_children(struct de
+ struct list_head *child;
+ struct dentry *de;
+
+- spin_lock(&parent->d_lock);
++ seq_spin_lock(&parent->d_lock);
+ list_for_each(child, &parent->d_subdirs)
+ {
+ de = list_entry(child, struct dentry, d_u.d_child);
+@@ -101,7 +101,7 @@ static void coda_flag_children(struct de
+ continue;
+ coda_flag_inode(de->d_inode, flag);
+ }
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ return;
+ }
+
+Index: linux-3.2/fs/configfs/configfs_internal.h
+===================================================================
+--- linux-3.2.orig/fs/configfs/configfs_internal.h
++++ linux-3.2/fs/configfs/configfs_internal.h
+@@ -121,7 +121,7 @@ static inline struct config_item *config
+ {
+ struct config_item * item = NULL;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (!d_unhashed(dentry)) {
+ struct configfs_dirent * sd = dentry->d_fsdata;
+ if (sd->s_type & CONFIGFS_ITEM_LINK) {
+@@ -130,7 +130,7 @@ static inline struct config_item *config
+ } else
+ item = config_item_get(sd->s_element);
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ return item;
+ }
+Index: linux-3.2/fs/configfs/inode.c
+===================================================================
+--- linux-3.2.orig/fs/configfs/inode.c
++++ linux-3.2/fs/configfs/inode.c
+@@ -251,14 +251,14 @@ void configfs_drop_dentry(struct configf
+ struct dentry * dentry = sd->s_dentry;
+
+ if (dentry) {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (!(d_unhashed(dentry) && dentry->d_inode)) {
+ dget_dlock(dentry);
+ __d_drop(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ simple_unlink(parent->d_inode, dentry);
+ } else
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ }
+
+Index: linux-3.2/fs/dcache.c
+===================================================================
+--- linux-3.2.orig/fs/dcache.c
++++ linux-3.2/fs/dcache.c
+@@ -172,9 +172,9 @@ static void d_free(struct dentry *dentry
+ */
+ static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
+ {
+- assert_spin_locked(&dentry->d_lock);
++ assert_seq_spin_locked(&dentry->d_lock);
+ /* Go through a barrier */
+- write_seqcount_barrier(&dentry->d_seq);
++ write_seqlock_barrier(&dentry->d_lock);
+ }
+
+ /*
+@@ -190,7 +190,7 @@ static void dentry_iput(struct dentry *
+ if (inode) {
+ dentry->d_inode = NULL;
+ list_del_init(&dentry->d_alias);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ spin_unlock(&inode->i_lock);
+ if (!inode->i_nlink)
+ fsnotify_inoderemove(inode);
+@@ -199,7 +199,7 @@ static void dentry_iput(struct dentry *
+ else
+ iput(inode);
+ } else {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ }
+
+@@ -215,7 +215,7 @@ static void dentry_unlink_inode(struct d
+ dentry->d_inode = NULL;
+ list_del_init(&dentry->d_alias);
+ dentry_rcuwalk_barrier(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ spin_unlock(&inode->i_lock);
+ if (!inode->i_nlink)
+ fsnotify_inoderemove(inode);
+@@ -313,7 +313,7 @@ static struct dentry *d_kill(struct dent
+ */
+ dentry->d_flags |= DCACHE_DISCONNECTED;
+ if (parent)
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ dentry_iput(dentry);
+ /*
+ * dentry_iput drops the locks, at which point nobody (except
+@@ -370,9 +370,9 @@ EXPORT_SYMBOL(__d_drop);
+
+ void d_drop(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __d_drop(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ EXPORT_SYMBOL(d_drop);
+
+@@ -387,10 +387,10 @@ EXPORT_SYMBOL(d_drop);
+ */
+ void d_clear_need_lookup(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __d_drop(dentry);
+ dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ EXPORT_SYMBOL(d_clear_need_lookup);
+
+@@ -409,7 +409,7 @@ static inline struct dentry *dentry_kill
+ inode = dentry->d_inode;
+ if (inode && !spin_trylock(&inode->i_lock)) {
+ relock:
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ cpu_relax();
+ return dentry; /* try again with same dentry */
+ }
+@@ -417,7 +417,7 @@ relock:
+ parent = NULL;
+ else
+ parent = dentry->d_parent;
+- if (parent && !spin_trylock(&parent->d_lock)) {
++ if (parent && !seq_spin_trylock(&parent->d_lock)) {
+ if (inode)
+ spin_unlock(&inode->i_lock);
+ goto relock;
+@@ -470,11 +470,11 @@ void dput(struct dentry *dentry)
+ repeat:
+ if (dentry->d_count == 1)
+ might_sleep();
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ BUG_ON(!dentry->d_count);
+ if (dentry->d_count > 1) {
+ dentry->d_count--;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return;
+ }
+
+@@ -497,7 +497,7 @@ repeat:
+ dentry_lru_add(dentry);
+
+ dentry->d_count--;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return;
+
+ kill_it:
+@@ -524,9 +524,9 @@ int d_invalidate(struct dentry * dentry)
+ /*
+ * If it's already been dropped, return OK.
+ */
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (d_unhashed(dentry)) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return 0;
+ }
+ /*
+@@ -534,9 +534,9 @@ int d_invalidate(struct dentry * dentry)
+ * to get rid of unused child entries.
+ */
+ if (!list_empty(&dentry->d_subdirs)) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ shrink_dcache_parent(dentry);
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ }
+
+ /*
+@@ -553,13 +553,13 @@ int d_invalidate(struct dentry * dentry)
+ */
+ if (dentry->d_count > 1 && dentry->d_inode) {
+ if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return -EBUSY;
+ }
+ }
+
+ __d_drop(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return 0;
+ }
+ EXPORT_SYMBOL(d_invalidate);
+@@ -572,9 +572,9 @@ static inline void __dget_dlock(struct d
+
+ static inline void __dget(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __dget_dlock(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ struct dentry *dget_parent(struct dentry *dentry)
+@@ -588,16 +588,16 @@ repeat:
+ */
+ rcu_read_lock();
+ ret = dentry->d_parent;
+- spin_lock(&ret->d_lock);
++ seq_spin_lock(&ret->d_lock);
+ if (unlikely(ret != dentry->d_parent)) {
+- spin_unlock(&ret->d_lock);
++ seq_spin_unlock(&ret->d_lock);
+ rcu_read_unlock();
+ goto repeat;
+ }
+ rcu_read_unlock();
+ BUG_ON(!ret->d_count);
+ ret->d_count++;
+- spin_unlock(&ret->d_lock);
++ seq_spin_unlock(&ret->d_lock);
+ return ret;
+ }
+ EXPORT_SYMBOL(dget_parent);
+@@ -625,31 +625,31 @@ static struct dentry *__d_find_alias(str
+ again:
+ discon_alias = NULL;
+ list_for_each_entry(alias, &inode->i_dentry, d_alias) {
+- spin_lock(&alias->d_lock);
++ seq_spin_lock(&alias->d_lock);
+ if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
+ if (IS_ROOT(alias) &&
+ (alias->d_flags & DCACHE_DISCONNECTED)) {
+ discon_alias = alias;
+ } else if (!want_discon) {
+ __dget_dlock(alias);
+- spin_unlock(&alias->d_lock);
++ seq_spin_unlock(&alias->d_lock);
+ return alias;
+ }
+ }
+- spin_unlock(&alias->d_lock);
++ seq_spin_unlock(&alias->d_lock);
+ }
+ if (discon_alias) {
+ alias = discon_alias;
+- spin_lock(&alias->d_lock);
++ seq_spin_lock(&alias->d_lock);
+ if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
+ if (IS_ROOT(alias) &&
+ (alias->d_flags & DCACHE_DISCONNECTED)) {
+ __dget_dlock(alias);
+- spin_unlock(&alias->d_lock);
++ seq_spin_unlock(&alias->d_lock);
+ return alias;
+ }
+ }
+- spin_unlock(&alias->d_lock);
++ seq_spin_unlock(&alias->d_lock);
+ goto again;
+ }
+ return NULL;
+@@ -678,16 +678,16 @@ void d_prune_aliases(struct inode *inode
+ restart:
+ spin_lock(&inode->i_lock);
+ list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (!dentry->d_count) {
+ __dget_dlock(dentry);
+ __d_drop(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ spin_unlock(&inode->i_lock);
+ dput(dentry);
+ goto restart;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ }
+@@ -724,10 +724,10 @@ static void try_prune_one_dentry(struct
+ /* Prune ancestors. */
+ dentry = parent;
+ while (dentry) {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry->d_count > 1) {
+ dentry->d_count--;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return;
+ }
+ dentry = dentry_kill(dentry, 1);
+@@ -743,9 +743,9 @@ static void shrink_dentry_list(struct li
+ dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
+ if (&dentry->d_lru == list)
+ break; /* empty */
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ continue;
+ }
+
+@@ -756,7 +756,7 @@ static void shrink_dentry_list(struct li
+ */
+ if (dentry->d_count) {
+ dentry_lru_del(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ continue;
+ }
+
+@@ -794,7 +794,7 @@ relock:
+ struct dentry, d_lru);
+ BUG_ON(dentry->d_sb != sb);
+
+- if (!spin_trylock(&dentry->d_lock)) {
++ if (!seq_spin_trylock(&dentry->d_lock)) {
+ spin_unlock(&dcache_lru_lock);
+ cpu_relax();
+ goto relock;
+@@ -803,11 +803,11 @@ relock:
+ if (dentry->d_flags & DCACHE_REFERENCED) {
+ dentry->d_flags &= ~DCACHE_REFERENCED;
+ list_move(&dentry->d_lru, &referenced);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ } else {
+ list_move_tail(&dentry->d_lru, &tmp);
+ dentry->d_flags |= DCACHE_SHRINK_LIST;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ if (!--count)
+ break;
+ }
+@@ -960,8 +960,8 @@ static struct dentry *try_to_ascend(stru
+ struct dentry *new = old->d_parent;
+
+ rcu_read_lock();
+- spin_unlock(&old->d_lock);
+- spin_lock(&new->d_lock);
++ seq_spin_unlock(&old->d_lock);
++ seq_spin_lock(&new->d_lock);
+
+ /*
+ * might go back up the wrong parent if we have had a rename
+@@ -970,7 +970,7 @@ static struct dentry *try_to_ascend(stru
+ if (new != old->d_parent ||
+ (old->d_flags & DCACHE_DISCONNECTED) ||
+ (!locked && read_seqretry(&rename_lock, seq))) {
+- spin_unlock(&new->d_lock);
++ seq_spin_unlock(&new->d_lock);
+ new = NULL;
+ }
+ rcu_read_unlock();
+@@ -1004,7 +1004,7 @@ again:
+
+ if (d_mountpoint(parent))
+ goto positive;
+- spin_lock(&this_parent->d_lock);
++ seq_spin_lock(&this_parent->d_lock);
+ repeat:
+ next = this_parent->d_subdirs.next;
+ resume:
+@@ -1013,21 +1013,21 @@ resume:
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
+ next = tmp->next;
+
+- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ /* Have we found a mount point ? */
+ if (d_mountpoint(dentry)) {
+- spin_unlock(&dentry->d_lock);
+- spin_unlock(&this_parent->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&this_parent->d_lock);
+ goto positive;
+ }
+ if (!list_empty(&dentry->d_subdirs)) {
+- spin_unlock(&this_parent->d_lock);
+- spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
++ seq_spin_unlock(&this_parent->d_lock);
++ spin_release(&dentry->d_lock.lock.dep_map, 1, _RET_IP_);
+ this_parent = dentry;
+- spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
++ spin_acquire(&this_parent->d_lock.lock.dep_map, 0, 1, _RET_IP_);
+ goto repeat;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ /*
+ * All done at this level ... ascend and resume the search.
+@@ -1040,7 +1040,7 @@ resume:
+ next = child->d_u.d_child.next;
+ goto resume;
+ }
+- spin_unlock(&this_parent->d_lock);
++ seq_spin_unlock(&this_parent->d_lock);
+ if (!locked && read_seqretry(&rename_lock, seq))
+ goto rename_retry;
+ if (locked)
+@@ -1085,7 +1085,7 @@ static int select_parent(struct dentry *
+ seq = read_seqbegin(&rename_lock);
+ again:
+ this_parent = parent;
+- spin_lock(&this_parent->d_lock);
++ seq_spin_lock(&this_parent->d_lock);
+ repeat:
+ next = this_parent->d_subdirs.next;
+ resume:
+@@ -1094,7 +1094,7 @@ resume:
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
+ next = tmp->next;
+
+- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+
+ /*
+ * move only zero ref count dentries to the dispose list.
+@@ -1117,7 +1117,7 @@ resume:
+ * the rest.
+ */
+ if (found && need_resched()) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ goto out;
+ }
+
+@@ -1125,14 +1125,14 @@ resume:
+ * Descend a level if the d_subdirs list is non-empty.
+ */
+ if (!list_empty(&dentry->d_subdirs)) {
+- spin_unlock(&this_parent->d_lock);
+- spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
++ seq_spin_unlock(&this_parent->d_lock);
++ spin_release(&dentry->d_lock.lock.dep_map, 1, _RET_IP_);
+ this_parent = dentry;
+- spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
++ spin_acquire(&this_parent->d_lock.lock.dep_map, 0, 1, _RET_IP_);
+ goto repeat;
+ }
+
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ /*
+ * All done at this level ... ascend and resume the search.
+@@ -1146,7 +1146,7 @@ resume:
+ goto resume;
+ }
+ out:
+- spin_unlock(&this_parent->d_lock);
++ seq_spin_unlock(&this_parent->d_lock);
+ if (!locked && read_seqretry(&rename_lock, seq))
+ goto rename_retry;
+ if (locked)
+@@ -1214,8 +1214,7 @@ struct dentry *__d_alloc(struct super_bl
+
+ dentry->d_count = 1;
+ dentry->d_flags = 0;
+- spin_lock_init(&dentry->d_lock);
+- seqcount_init(&dentry->d_seq);
++ seqlock_init(&dentry->d_lock);
+ dentry->d_inode = NULL;
+ dentry->d_parent = dentry;
+ dentry->d_sb = sb;
+@@ -1248,7 +1247,7 @@ struct dentry *d_alloc(struct dentry * p
+ if (!dentry)
+ return NULL;
+
+- spin_lock(&parent->d_lock);
++ seq_spin_lock(&parent->d_lock);
+ /*
+ * don't need child lock because it is not subject
+ * to concurrency here
+@@ -1256,7 +1255,7 @@ struct dentry *d_alloc(struct dentry * p
+ __dget_dlock(parent);
+ dentry->d_parent = parent;
+ list_add(&dentry->d_u.d_child, &parent->d_subdirs);
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+
+ return dentry;
+ }
+@@ -1308,7 +1307,7 @@ EXPORT_SYMBOL(d_set_d_op);
+
+ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (inode) {
+ if (unlikely(IS_AUTOMOUNT(inode)))
+ dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
+@@ -1316,7 +1315,7 @@ static void __d_instantiate(struct dentr
+ }
+ dentry->d_inode = inode;
+ dentry_rcuwalk_barrier(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ fsnotify_d_instantiate(dentry, inode);
+ }
+
+@@ -1516,14 +1515,14 @@ struct dentry *d_obtain_alias(struct ino
+ }
+
+ /* attach a disconnected dentry */
+- spin_lock(&tmp->d_lock);
++ seq_spin_lock(&tmp->d_lock);
+ tmp->d_inode = inode;
+ tmp->d_flags |= DCACHE_DISCONNECTED;
+ list_add(&tmp->d_alias, &inode->i_dentry);
+ hlist_bl_lock(&tmp->d_sb->s_anon);
+ hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
+ hlist_bl_unlock(&tmp->d_sb->s_anon);
+- spin_unlock(&tmp->d_lock);
++ seq_spin_unlock(&tmp->d_lock);
+ spin_unlock(&inode->i_lock);
+ security_d_instantiate(tmp, inode);
+
+@@ -1731,7 +1730,7 @@ struct dentry *__d_lookup_rcu(struct den
+ continue;
+
+ seqretry:
+- *seq = read_seqcount_begin(&dentry->d_seq);
++ *seq = read_seqbegin(&dentry->d_lock);
+ if (dentry->d_parent != parent)
+ continue;
+ if (d_unhashed(dentry))
+@@ -1746,7 +1745,7 @@ seqretry:
+ * edge of memory when walking. If we could load this
+ * atomically some other way, we could drop this check.
+ */
+- if (read_seqcount_retry(&dentry->d_seq, *seq))
++ if (read_seqretry(&dentry->d_lock, *seq))
+ goto seqretry;
+ if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
+ if (parent->d_op->d_compare(parent, *inode,
+@@ -1849,7 +1848,7 @@ struct dentry *__d_lookup(struct dentry
+ if (dentry->d_name.hash != hash)
+ continue;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry->d_parent != parent)
+ goto next;
+ if (d_unhashed(dentry))
+@@ -1873,10 +1872,10 @@ struct dentry *__d_lookup(struct dentry
+
+ dentry->d_count++;
+ found = dentry;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ break;
+ next:
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ rcu_read_unlock();
+
+@@ -1924,17 +1923,17 @@ int d_validate(struct dentry *dentry, st
+ {
+ struct dentry *child;
+
+- spin_lock(&dparent->d_lock);
++ seq_spin_lock(&dparent->d_lock);
+ list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
+ if (dentry == child) {
+- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ __dget_dlock(dentry);
+- spin_unlock(&dentry->d_lock);
+- spin_unlock(&dparent->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dparent->d_lock);
+ return 1;
+ }
+ }
+- spin_unlock(&dparent->d_lock);
++ seq_spin_unlock(&dparent->d_lock);
+
+ return 0;
+ }
+@@ -1969,12 +1968,12 @@ void d_delete(struct dentry * dentry)
+ * Are we the only user?
+ */
+ again:
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ inode = dentry->d_inode;
+ isdir = S_ISDIR(inode->i_mode);
+ if (dentry->d_count == 1) {
+ if (inode && !spin_trylock(&inode->i_lock)) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ cpu_relax();
+ goto again;
+ }
+@@ -1987,7 +1986,7 @@ again:
+ if (!d_unhashed(dentry))
+ __d_drop(dentry);
+
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ fsnotify_nameremove(dentry, isdir);
+ }
+@@ -2016,9 +2015,9 @@ static void _d_rehash(struct dentry * en
+
+ void d_rehash(struct dentry * entry)
+ {
+- spin_lock(&entry->d_lock);
++ seq_spin_lock(&entry->d_lock);
+ _d_rehash(entry);
+- spin_unlock(&entry->d_lock);
++ seq_spin_unlock(&entry->d_lock);
+ }
+ EXPORT_SYMBOL(d_rehash);
+
+@@ -2041,11 +2040,9 @@ void dentry_update_name_case(struct dent
+ BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
+ BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
+
+- spin_lock(&dentry->d_lock);
+- write_seqcount_begin(&dentry->d_seq);
++ write_seqlock(&dentry->d_lock);
+ memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
+- write_seqcount_end(&dentry->d_seq);
+- spin_unlock(&dentry->d_lock);
++ write_sequnlock(&dentry->d_lock);
+ }
+ EXPORT_SYMBOL(dentry_update_name_case);
+
+@@ -2096,24 +2093,24 @@ static void dentry_lock_for_move(struct
+ * XXXX: do we really need to take target->d_lock?
+ */
+ if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
+- spin_lock(&target->d_parent->d_lock);
++ seq_spin_lock(&target->d_parent->d_lock);
+ else {
+ if (d_ancestor(dentry->d_parent, target->d_parent)) {
+- spin_lock(&dentry->d_parent->d_lock);
+- spin_lock_nested(&target->d_parent->d_lock,
+- DENTRY_D_LOCK_NESTED);
++ seq_spin_lock(&dentry->d_parent->d_lock);
++ seq_spin_lock_nested(&target->d_parent->d_lock,
++ DENTRY_D_LOCK_NESTED);
+ } else {
+- spin_lock(&target->d_parent->d_lock);
+- spin_lock_nested(&dentry->d_parent->d_lock,
+- DENTRY_D_LOCK_NESTED);
++ seq_spin_lock(&target->d_parent->d_lock);
++ seq_spin_lock_nested(&dentry->d_parent->d_lock,
++ DENTRY_D_LOCK_NESTED);
+ }
+ }
+ if (target < dentry) {
+- spin_lock_nested(&target->d_lock, 2);
+- spin_lock_nested(&dentry->d_lock, 3);
++ seq_spin_lock_nested(&target->d_lock, 2);
++ seq_spin_lock_nested(&dentry->d_lock, 3);
+ } else {
+- spin_lock_nested(&dentry->d_lock, 2);
+- spin_lock_nested(&target->d_lock, 3);
++ seq_spin_lock_nested(&dentry->d_lock, 2);
++ seq_spin_lock_nested(&target->d_lock, 3);
+ }
+ }
+
+@@ -2121,9 +2118,9 @@ static void dentry_unlock_parents_for_mo
+ struct dentry *target)
+ {
+ if (target->d_parent != dentry->d_parent)
+- spin_unlock(&dentry->d_parent->d_lock);
++ seq_spin_unlock(&dentry->d_parent->d_lock);
+ if (target->d_parent != target)
+- spin_unlock(&target->d_parent->d_lock);
++ seq_spin_unlock(&target->d_parent->d_lock);
+ }
+
+ /*
+@@ -2157,8 +2154,8 @@ static void __d_move(struct dentry * den
+
+ dentry_lock_for_move(dentry, target);
+
+- write_seqcount_begin(&dentry->d_seq);
+- write_seqcount_begin(&target->d_seq);
++ write_seqlock_begin(&dentry->d_lock);
++ write_seqlock_begin(&target->d_lock);
+
+ /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
+
+@@ -2193,13 +2190,13 @@ static void __d_move(struct dentry * den
+
+ list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
+
+- write_seqcount_end(&target->d_seq);
+- write_seqcount_end(&dentry->d_seq);
++ write_seqlock_end(&target->d_lock);
++ write_seqlock_end(&dentry->d_lock);
+
+ dentry_unlock_parents_for_move(dentry, target);
+- spin_unlock(&target->d_lock);
++ seq_spin_unlock(&target->d_lock);
+ fsnotify_d_move(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ /*
+@@ -2288,8 +2285,8 @@ static void __d_materialise_dentry(struc
+
+ dentry_lock_for_move(anon, dentry);
+
+- write_seqcount_begin(&dentry->d_seq);
+- write_seqcount_begin(&anon->d_seq);
++ write_seqlock_begin(&dentry->d_lock);
++ write_seqlock_begin(&anon->d_lock);
+
+ dparent = dentry->d_parent;
+ aparent = anon->d_parent;
+@@ -2311,11 +2308,11 @@ static void __d_materialise_dentry(struc
+ else
+ INIT_LIST_HEAD(&anon->d_u.d_child);
+
+- write_seqcount_end(&dentry->d_seq);
+- write_seqcount_end(&anon->d_seq);
++ write_seqlock_end(&dentry->d_lock);
++ write_seqlock_end(&anon->d_lock);
+
+ dentry_unlock_parents_for_move(anon, dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ /* anon->d_lock still locked, returns locked */
+ anon->d_flags &= ~DCACHE_DISCONNECTED;
+@@ -2391,10 +2388,10 @@ struct dentry *d_materialise_unique(stru
+ else
+ BUG_ON(!d_unhashed(actual));
+
+- spin_lock(&actual->d_lock);
++ seq_spin_lock(&actual->d_lock);
+ found:
+ _d_rehash(actual);
+- spin_unlock(&actual->d_lock);
++ seq_spin_unlock(&actual->d_lock);
+ spin_unlock(&inode->i_lock);
+ out_nolock:
+ if (actual == dentry) {
+@@ -2455,9 +2452,9 @@ static int prepend_path(const struct pat
+ }
+ parent = dentry->d_parent;
+ prefetch(parent);
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ error = prepend_name(buffer, buflen, &dentry->d_name);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ if (!error)
+ error = prepend(buffer, buflen, "/", 1);
+ if (error)
+@@ -2682,9 +2679,9 @@ static char *__dentry_path(struct dentry
+ int error;
+
+ prefetch(parent);
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ error = prepend_name(&end, &buflen, &dentry->d_name);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
+ goto Elong;
+
+@@ -2874,7 +2871,7 @@ void d_genocide(struct dentry *root)
+ seq = read_seqbegin(&rename_lock);
+ again:
+ this_parent = root;
+- spin_lock(&this_parent->d_lock);
++ seq_spin_lock(&this_parent->d_lock);
+ repeat:
+ next = this_parent->d_subdirs.next;
+ resume:
+@@ -2883,23 +2880,23 @@ resume:
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
+ next = tmp->next;
+
+- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ if (d_unhashed(dentry) || !dentry->d_inode) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ continue;
+ }
+ if (!list_empty(&dentry->d_subdirs)) {
+- spin_unlock(&this_parent->d_lock);
+- spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
++ seq_spin_unlock(&this_parent->d_lock);
++ spin_release(&dentry->d_lock.lock.dep_map, 1, _RET_IP_);
+ this_parent = dentry;
+- spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
++ spin_acquire(&this_parent->d_lock.lock.dep_map, 0, 1, _RET_IP_);
+ goto repeat;
+ }
+ if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
+ dentry->d_flags |= DCACHE_GENOCIDE;
+ dentry->d_count--;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ if (this_parent != root) {
+ struct dentry *child = this_parent;
+@@ -2913,7 +2910,7 @@ resume:
+ next = child->d_u.d_child.next;
+ goto resume;
+ }
+- spin_unlock(&this_parent->d_lock);
++ seq_spin_unlock(&this_parent->d_lock);
+ if (!locked && read_seqretry(&rename_lock, seq))
+ goto rename_retry;
+ if (locked)
+Index: linux-3.2/fs/dcookies.c
+===================================================================
+--- linux-3.2.orig/fs/dcookies.c
++++ linux-3.2/fs/dcookies.c
+@@ -98,9 +98,9 @@ static struct dcookie_struct *alloc_dcoo
+ return NULL;
+
+ d = path->dentry;
+- spin_lock(&d->d_lock);
++ seq_spin_lock(&d->d_lock);
+ d->d_flags |= DCACHE_COOKIE;
+- spin_unlock(&d->d_lock);
++ seq_spin_unlock(&d->d_lock);
+
+ dcs->path = *path;
+ path_get(path);
+@@ -267,9 +267,9 @@ static void free_dcookie(struct dcookie_
+ {
+ struct dentry *d = dcs->path.dentry;
+
+- spin_lock(&d->d_lock);
++ seq_spin_lock(&d->d_lock);
+ d->d_flags &= ~DCACHE_COOKIE;
+- spin_unlock(&d->d_lock);
++ seq_spin_unlock(&d->d_lock);
+
+ path_put(&dcs->path);
+ kmem_cache_free(dcookie_cache, dcs);
+Index: linux-3.2/fs/exportfs/expfs.c
+===================================================================
+--- linux-3.2.orig/fs/exportfs/expfs.c
++++ linux-3.2/fs/exportfs/expfs.c
+@@ -114,15 +114,15 @@ reconnect_path(struct vfsmount *mnt, str
+
+ if (!IS_ROOT(pd)) {
+ /* must have found a connected parent - great */
+- spin_lock(&pd->d_lock);
++ seq_spin_lock(&pd->d_lock);
+ pd->d_flags &= ~DCACHE_DISCONNECTED;
+- spin_unlock(&pd->d_lock);
++ seq_spin_unlock(&pd->d_lock);
+ noprogress = 0;
+ } else if (pd == mnt->mnt_sb->s_root) {
+ printk(KERN_ERR "export: Eeek filesystem root is not connected, impossible\n");
+- spin_lock(&pd->d_lock);
++ seq_spin_lock(&pd->d_lock);
+ pd->d_flags &= ~DCACHE_DISCONNECTED;
+- spin_unlock(&pd->d_lock);
++ seq_spin_unlock(&pd->d_lock);
+ noprogress = 0;
+ } else {
+ /*
+@@ -335,11 +335,11 @@ static int export_encode_fh(struct dentr
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+ struct inode *parent;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ parent = dentry->d_parent->d_inode;
+ fid->i32.parent_ino = parent->i_ino;
+ fid->i32.parent_gen = parent->i_generation;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ len = 4;
+ type = FILEID_INO32_GEN_PARENT;
+ }
+Index: linux-3.2/fs/fat/inode.c
+===================================================================
+--- linux-3.2.orig/fs/fat/inode.c
++++ linux-3.2/fs/fat/inode.c
+@@ -772,9 +772,9 @@ fat_encode_fh(struct dentry *de, __u32 *
+ fh[1] = inode->i_generation;
+ fh[2] = ipos_h;
+ fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
+- spin_lock(&de->d_lock);
++ seq_spin_lock(&de->d_lock);
+ fh[4] = ipos_l | MSDOS_I(de->d_parent->d_inode)->i_logstart;
+- spin_unlock(&de->d_lock);
++ seq_spin_unlock(&de->d_lock);
+ return 3;
+ }
+
+Index: linux-3.2/fs/fat/namei_vfat.c
+===================================================================
+--- linux-3.2.orig/fs/fat/namei_vfat.c
++++ linux-3.2/fs/fat/namei_vfat.c
+@@ -34,10 +34,10 @@
+ static int vfat_revalidate_shortname(struct dentry *dentry)
+ {
+ int ret = 1;
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry->d_time != dentry->d_parent->d_inode->i_version)
+ ret = 0;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return ret;
+ }
+
+Index: linux-3.2/fs/fs-writeback.c
+===================================================================
+--- linux-3.2.orig/fs/fs-writeback.c
++++ linux-3.2/fs/fs-writeback.c
+@@ -1010,7 +1010,7 @@ static noinline void block_dump___mark_i
+
+ dentry = d_find_alias(inode);
+ if (dentry) {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ name = (const char *) dentry->d_name.name;
+ }
+ printk(KERN_DEBUG
+@@ -1018,7 +1018,7 @@ static noinline void block_dump___mark_i
+ current->comm, task_pid_nr(current), inode->i_ino,
+ name, inode->i_sb->s_id);
+ if (dentry) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ dput(dentry);
+ }
+ }
+Index: linux-3.2/fs/fuse/inode.c
+===================================================================
+--- linux-3.2.orig/fs/fuse/inode.c
++++ linux-3.2/fs/fuse/inode.c
+@@ -652,11 +652,11 @@ static int fuse_encode_fh(struct dentry
+ if (encode_parent) {
+ struct inode *parent;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ parent = dentry->d_parent->d_inode;
+ nodeid = get_fuse_inode(parent)->nodeid;
+ generation = parent->i_generation;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ fh[3] = (u32)(nodeid >> 32);
+ fh[4] = (u32)(nodeid & 0xffffffff);
+Index: linux-3.2/fs/gfs2/export.c
+===================================================================
+--- linux-3.2.orig/fs/gfs2/export.c
++++ linux-3.2/fs/gfs2/export.c
+@@ -53,11 +53,11 @@ static int gfs2_encode_fh(struct dentry
+ if (!connectable || inode == sb->s_root->d_inode)
+ return *len;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ inode = dentry->d_parent->d_inode;
+ ip = GFS2_I(inode);
+ igrab(inode);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32);
+ fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF);
+Index: linux-3.2/fs/isofs/export.c
+===================================================================
+--- linux-3.2.orig/fs/isofs/export.c
++++ linux-3.2/fs/isofs/export.c
+@@ -139,13 +139,13 @@ isofs_export_encode_fh(struct dentry *de
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+ struct inode *parent;
+ struct iso_inode_info *eparent;
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ parent = dentry->d_parent->d_inode;
+ eparent = ISOFS_I(parent);
+ fh32[3] = eparent->i_iget5_block;
+ fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */
+ fh32[4] = parent->i_generation;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ len = 5;
+ type = 2;
+ }
+Index: linux-3.2/fs/libfs.c
+===================================================================
+--- linux-3.2.orig/fs/libfs.c
++++ linux-3.2/fs/libfs.c
+@@ -102,21 +102,21 @@ loff_t dcache_dir_lseek(struct file *fil
+ struct dentry *cursor = file->private_data;
+ loff_t n = file->f_pos - 2;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ /* d_lock not required for cursor */
+ list_del(&cursor->d_u.d_child);
+ p = dentry->d_subdirs.next;
+ while (n && p != &dentry->d_subdirs) {
+ struct dentry *next;
+ next = list_entry(p, struct dentry, d_u.d_child);
+- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ if (simple_positive(next))
+ n--;
+- spin_unlock(&next->d_lock);
++ seq_spin_unlock(&next->d_lock);
+ p = p->next;
+ }
+ list_add_tail(&cursor->d_u.d_child, p);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ }
+ mutex_unlock(&dentry->d_inode->i_mutex);
+@@ -159,35 +159,35 @@ int dcache_readdir(struct file * filp, v
+ i++;
+ /* fallthrough */
+ default:
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (filp->f_pos == 2)
+ list_move(q, &dentry->d_subdirs);
+
+ for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
+ struct dentry *next;
+ next = list_entry(p, struct dentry, d_u.d_child);
+- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ if (!simple_positive(next)) {
+- spin_unlock(&next->d_lock);
++ seq_spin_unlock(&next->d_lock);
+ continue;
+ }
+
+- spin_unlock(&next->d_lock);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&next->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ if (filldir(dirent, next->d_name.name,
+ next->d_name.len, filp->f_pos,
+ next->d_inode->i_ino,
+ dt_type(next->d_inode)) < 0)
+ return 0;
+- spin_lock(&dentry->d_lock);
+- spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock(&dentry->d_lock);
++ seq_spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
+ /* next is still alive */
+ list_move(q, p);
+- spin_unlock(&next->d_lock);
++ seq_spin_unlock(&next->d_lock);
+ p = q;
+ filp->f_pos++;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ return 0;
+ }
+@@ -281,18 +281,18 @@ int simple_empty(struct dentry *dentry)
+ struct dentry *child;
+ int ret = 0;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
+- spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
+ if (simple_positive(child)) {
+- spin_unlock(&child->d_lock);
++ seq_spin_unlock(&child->d_lock);
+ goto out;
+ }
+- spin_unlock(&child->d_lock);
++ seq_spin_unlock(&child->d_lock);
+ }
+ ret = 1;
+ out:
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return ret;
+ }
+
+Index: linux-3.2/fs/namei.c
+===================================================================
+--- linux-3.2.orig/fs/namei.c
++++ linux-3.2/fs/namei.c
+@@ -433,7 +433,7 @@ static int unlazy_walk(struct nameidata
+ nd->root.dentry != fs->root.dentry)
+ goto err_root;
+ }
+- spin_lock(&parent->d_lock);
++ seq_spin_lock(&parent->d_lock);
+ if (!dentry) {
+ if (!__d_rcu_to_refcount(parent, nd->seq))
+ goto err_parent;
+@@ -441,7 +441,7 @@ static int unlazy_walk(struct nameidata
+ } else {
+ if (dentry->d_parent != parent)
+ goto err_parent;
+- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ if (!__d_rcu_to_refcount(dentry, nd->seq))
+ goto err_child;
+ /*
+@@ -453,9 +453,9 @@ static int unlazy_walk(struct nameidata
+ BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
+ BUG_ON(!parent->d_count);
+ parent->d_count++;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ if (want_root) {
+ path_get(&nd->root);
+ seq_spin_unlock(&fs->lock);
+@@ -468,9 +468,9 @@ static int unlazy_walk(struct nameidata
+ return 0;
+
+ err_child:
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ err_parent:
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ err_root:
+ if (want_root)
+ seq_spin_unlock(&fs->lock);
+@@ -517,15 +517,15 @@ static int complete_walk(struct nameidat
+ nd->flags &= ~LOOKUP_RCU;
+ if (!(nd->flags & LOOKUP_ROOT))
+ nd->root.mnt = NULL;
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ br_read_unlock(vfsmount_lock);
+ return -ECHILD;
+ }
+ BUG_ON(nd->inode != dentry->d_inode);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ mntget(nd->path.mnt);
+ rcu_read_unlock();
+ br_read_unlock(vfsmount_lock);
+@@ -569,7 +569,7 @@ static __always_inline void set_root_rcu
+ do {
+ seq = read_seqbegin(&fs->lock);
+ nd->root = fs->root;
+- nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
++ nd->seq = __read_seqbegin(&nd->root.dentry->d_lock);
+ } while (read_seqretry(&fs->lock, seq));
+ }
+ }
+@@ -901,7 +901,7 @@ static bool __follow_mount_rcu(struct na
+ path->mnt = mounted;
+ path->dentry = mounted->mnt_root;
+ nd->flags |= LOOKUP_JUMPED;
+- nd->seq = read_seqcount_begin(&path->dentry->d_seq);
++ nd->seq = read_seqbegin(&path->dentry->d_lock);
+ /*
+ * Update the inode too. We don't need to re-check the
+ * dentry sequence number here after this d_inode read,
+@@ -921,7 +921,7 @@ static void follow_mount_rcu(struct name
+ break;
+ nd->path.mnt = mounted;
+ nd->path.dentry = mounted->mnt_root;
+- nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
++ nd->seq = read_seqbegin(&nd->path.dentry->d_lock);
+ }
+ }
+
+@@ -939,8 +939,8 @@ static int follow_dotdot_rcu(struct name
+ struct dentry *parent = old->d_parent;
+ unsigned seq;
+
+- seq = read_seqcount_begin(&parent->d_seq);
+- if (read_seqcount_retry(&old->d_seq, nd->seq))
++ seq = read_seqbegin(&parent->d_lock);
++ if (read_seqretry(&old->d_lock, nd->seq))
+ goto failed;
+ nd->path.dentry = parent;
+ nd->seq = seq;
+@@ -948,7 +948,7 @@ static int follow_dotdot_rcu(struct name
+ }
+ if (!follow_up_rcu(&nd->path))
+ break;
+- nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
++ nd->seq = read_seqbegin(&nd->path.dentry->d_lock);
+ }
+ follow_mount_rcu(nd);
+ nd->inode = nd->path.dentry->d_inode;
+@@ -1134,7 +1134,7 @@ static int do_lookup(struct nameidata *n
+ goto unlazy;
+
+ /* Memory barrier in read_seqcount_begin of child is enough */
+- if (__read_seqcount_retry(&parent->d_seq, nd->seq))
++ if (__read_seqretry(&parent->d_lock, nd->seq))
+ return -ECHILD;
+ nd->seq = seq;
+
+@@ -1491,7 +1491,7 @@ static int path_init(int dfd, const char
+ if (flags & LOOKUP_RCU) {
+ br_read_lock(vfsmount_lock);
+ rcu_read_lock();
+- nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
++ nd->seq = __read_seqbegin(&nd->path.dentry->d_lock);
+ } else {
+ path_get(&nd->path);
+ }
+@@ -1521,7 +1521,7 @@ static int path_init(int dfd, const char
+ do {
+ seq = read_seqbegin(&fs->lock);
+ nd->path = fs->pwd;
+- nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
++ nd->seq = __read_seqbegin(&nd->path.dentry->d_lock);
+ } while (read_seqretry(&fs->lock, seq));
+ } else {
+ get_fs_pwd(current->fs, &nd->path);
+@@ -1550,7 +1550,7 @@ static int path_init(int dfd, const char
+ if (flags & LOOKUP_RCU) {
+ if (fput_needed)
+ *fp = file;
+- nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
++ nd->seq = __read_seqbegin(&nd->path.dentry->d_lock);
+ br_read_lock(vfsmount_lock);
+ rcu_read_lock();
+ } else {
+@@ -2615,10 +2615,10 @@ SYSCALL_DEFINE2(mkdir, const char __user
+ void dentry_unhash(struct dentry *dentry)
+ {
+ shrink_dcache_parent(dentry);
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry->d_count == 1)
+ __d_drop(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+Index: linux-3.2/fs/namespace.c
+===================================================================
+--- linux-3.2.orig/fs/namespace.c
++++ linux-3.2/fs/namespace.c
+@@ -560,9 +560,9 @@ static void dentry_reset_mounted(struct
+ return;
+ }
+ }
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ dentry->d_flags &= ~DCACHE_MOUNTED;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ /*
+@@ -587,9 +587,9 @@ void mnt_set_mountpoint(struct vfsmount
+ {
+ child_mnt->mnt_parent = mntget(mnt);
+ child_mnt->mnt_mountpoint = dget(dentry);
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_MOUNTED;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ /*
+Index: linux-3.2/fs/ncpfs/dir.c
+===================================================================
+--- linux-3.2.orig/fs/ncpfs/dir.c
++++ linux-3.2/fs/ncpfs/dir.c
+@@ -388,7 +388,7 @@ ncp_dget_fpos(struct dentry *dentry, str
+ }
+
+ /* If a pointer is invalid, we search the dentry. */
+- spin_lock(&parent->d_lock);
++ seq_spin_lock(&parent->d_lock);
+ next = parent->d_subdirs.next;
+ while (next != &parent->d_subdirs) {
+ dent = list_entry(next, struct dentry, d_u.d_child);
+@@ -397,12 +397,12 @@ ncp_dget_fpos(struct dentry *dentry, str
+ dget(dent);
+ else
+ dent = NULL;
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ goto out;
+ }
+ next = next->next;
+ }
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ return NULL;
+
+ out:
+Index: linux-3.2/fs/ncpfs/ncplib_kernel.h
+===================================================================
+--- linux-3.2.orig/fs/ncpfs/ncplib_kernel.h
++++ linux-3.2/fs/ncpfs/ncplib_kernel.h
+@@ -191,7 +191,7 @@ ncp_renew_dentries(struct dentry *parent
+ struct list_head *next;
+ struct dentry *dentry;
+
+- spin_lock(&parent->d_lock);
++ seq_spin_lock(&parent->d_lock);
+ next = parent->d_subdirs.next;
+ while (next != &parent->d_subdirs) {
+ dentry = list_entry(next, struct dentry, d_u.d_child);
+@@ -203,7 +203,7 @@ ncp_renew_dentries(struct dentry *parent
+
+ next = next->next;
+ }
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ }
+
+ static inline void
+@@ -213,7 +213,7 @@ ncp_invalidate_dircache_entries(struct d
+ struct list_head *next;
+ struct dentry *dentry;
+
+- spin_lock(&parent->d_lock);
++ seq_spin_lock(&parent->d_lock);
+ next = parent->d_subdirs.next;
+ while (next != &parent->d_subdirs) {
+ dentry = list_entry(next, struct dentry, d_u.d_child);
+@@ -221,7 +221,7 @@ ncp_invalidate_dircache_entries(struct d
+ ncp_age_dentry(server, dentry);
+ next = next->next;
+ }
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ }
+
+ struct ncp_cache_head {
+Index: linux-3.2/fs/nfs/dir.c
+===================================================================
+--- linux-3.2.orig/fs/nfs/dir.c
++++ linux-3.2/fs/nfs/dir.c
+@@ -1823,9 +1823,9 @@ static int nfs_unlink(struct inode *dir,
+ dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
+ dir->i_ino, dentry->d_name.name);
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry->d_count > 1) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ /* Start asynchronous writeout of the inode */
+ write_inode_now(dentry->d_inode, 0);
+ error = nfs_sillyrename(dir, dentry);
+@@ -1835,7 +1835,7 @@ static int nfs_unlink(struct inode *dir,
+ __d_drop(dentry);
+ need_rehash = 1;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ error = nfs_safe_remove(dentry);
+ if (!error || error == -ENOENT) {
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+Index: linux-3.2/fs/nfs/getroot.c
+===================================================================
+--- linux-3.2.orig/fs/nfs/getroot.c
++++ linux-3.2/fs/nfs/getroot.c
+@@ -64,9 +64,9 @@ static int nfs_superblock_set_dummy_root
+ * Oops, since the test for IS_ROOT() will fail.
+ */
+ spin_lock(&sb->s_root->d_inode->i_lock);
+- spin_lock(&sb->s_root->d_lock);
++ seq_spin_lock(&sb->s_root->d_lock);
+ list_del_init(&sb->s_root->d_alias);
+- spin_unlock(&sb->s_root->d_lock);
++ seq_spin_unlock(&sb->s_root->d_lock);
+ spin_unlock(&sb->s_root->d_inode->i_lock);
+ }
+ return 0;
+@@ -126,12 +126,12 @@ struct dentry *nfs_get_root(struct super
+ }
+
+ security_d_instantiate(ret, inode);
+- spin_lock(&ret->d_lock);
++ seq_spin_lock(&ret->d_lock);
+ if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
+ ret->d_fsdata = name;
+ name = NULL;
+ }
+- spin_unlock(&ret->d_lock);
++ seq_spin_unlock(&ret->d_lock);
+ out:
+ if (name)
+ kfree(name);
+@@ -250,12 +250,12 @@ struct dentry *nfs4_get_root(struct supe
+ }
+
+ security_d_instantiate(ret, inode);
+- spin_lock(&ret->d_lock);
++ seq_spin_lock(&ret->d_lock);
+ if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
+ ret->d_fsdata = name;
+ name = NULL;
+ }
+- spin_unlock(&ret->d_lock);
++ seq_spin_unlock(&ret->d_lock);
+ out:
+ if (name)
+ kfree(name);
+Index: linux-3.2/fs/nfs/namespace.c
+===================================================================
+--- linux-3.2.orig/fs/nfs/namespace.c
++++ linux-3.2/fs/nfs/namespace.c
+@@ -60,7 +60,7 @@ rename_retry:
+ seq = read_seqbegin(&rename_lock);
+ rcu_read_lock();
+ while (1) {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (IS_ROOT(dentry))
+ break;
+ namelen = dentry->d_name.len;
+@@ -70,17 +70,17 @@ rename_retry:
+ end -= namelen;
+ memcpy(end, dentry->d_name.name, namelen);
+ *--end = '/';
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ dentry = dentry->d_parent;
+ }
+ if (read_seqretry(&rename_lock, seq)) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ goto rename_retry;
+ }
+ if (*end != '/') {
+ if (--buflen < 0) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ goto Elong;
+ }
+@@ -89,7 +89,7 @@ rename_retry:
+ *p = end;
+ base = dentry->d_fsdata;
+ if (!base) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ WARN_ON(1);
+ return end;
+@@ -100,17 +100,17 @@ rename_retry:
+ namelen--;
+ buflen -= namelen;
+ if (buflen < 0) {
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ goto Elong;
+ }
+ end -= namelen;
+ memcpy(end, base, namelen);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ return end;
+ Elong_unlock:
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ if (read_seqretry(&rename_lock, seq))
+ goto rename_retry;
+Index: linux-3.2/fs/nfs/unlink.c
+===================================================================
+--- linux-3.2.orig/fs/nfs/unlink.c
++++ linux-3.2/fs/nfs/unlink.c
+@@ -156,7 +156,7 @@ static int nfs_do_call_unlink(struct den
+ */
+ nfs_free_dname(data);
+ ret = nfs_copy_dname(alias, data);
+- spin_lock(&alias->d_lock);
++ seq_spin_lock(&alias->d_lock);
+ if (ret == 0 && alias->d_inode != NULL &&
+ !(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
+ devname_garbage = alias->d_fsdata;
+@@ -165,7 +165,7 @@ static int nfs_do_call_unlink(struct den
+ ret = 1;
+ } else
+ ret = 0;
+- spin_unlock(&alias->d_lock);
++ seq_spin_unlock(&alias->d_lock);
+ nfs_dec_sillycount(dir);
+ dput(alias);
+ /*
+@@ -274,13 +274,13 @@ nfs_async_unlink(struct inode *dir, stru
+ data->res.dir_attr = &data->dir_attr;
+
+ status = -EBUSY;
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ goto out_unlock;
+ dentry->d_flags |= DCACHE_NFSFS_RENAMED;
+ devname_garbage = dentry->d_fsdata;
+ dentry->d_fsdata = data;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ /*
+ * If we'd displaced old cached devname, free it. At that
+ * point dentry is definitely not a root, so we won't need
+@@ -290,7 +290,7 @@ nfs_async_unlink(struct inode *dir, stru
+ kfree(devname_garbage);
+ return 0;
+ out_unlock:
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ put_rpccred(data->cred);
+ out_free:
+ kfree(data);
+@@ -312,13 +312,13 @@ nfs_complete_unlink(struct dentry *dentr
+ {
+ struct nfs_unlinkdata *data = NULL;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
+ data = dentry->d_fsdata;
+ dentry->d_fsdata = NULL;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data)))
+ nfs_free_unlinkdata(data);
+@@ -328,17 +328,17 @@ nfs_complete_unlink(struct dentry *dentr
+ static void
+ nfs_cancel_async_unlink(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ struct nfs_unlinkdata *data = dentry->d_fsdata;
+
+ dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
+ dentry->d_fsdata = NULL;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ nfs_free_unlinkdata(data);
+ return;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ struct nfs_renamedata {
+Index: linux-3.2/fs/nilfs2/namei.c
+===================================================================
+--- linux-3.2.orig/fs/nilfs2/namei.c
++++ linux-3.2/fs/nilfs2/namei.c
+@@ -538,11 +538,11 @@ static int nilfs_encode_fh(struct dentry
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+ struct inode *parent;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ parent = dentry->d_parent->d_inode;
+ fid->parent_ino = parent->i_ino;
+ fid->parent_gen = parent->i_generation;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ type = FILEID_NILFS_WITH_PARENT;
+ *lenp = NILFS_FID_SIZE_CONNECTABLE;
+Index: linux-3.2/fs/notify/fsnotify.c
+===================================================================
+--- linux-3.2.orig/fs/notify/fsnotify.c
++++ linux-3.2/fs/notify/fsnotify.c
+@@ -68,19 +68,19 @@ void __fsnotify_update_child_dentry_flag
+ /* run all of the children of the original inode and fix their
+ * d_flags to indicate parental interest (their parent is the
+ * original inode) */
+- spin_lock(&alias->d_lock);
++ seq_spin_lock(&alias->d_lock);
+ list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
+ if (!child->d_inode)
+ continue;
+
+- spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
+ if (watched)
+ child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+ else
+ child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
+- spin_unlock(&child->d_lock);
++ seq_spin_unlock(&child->d_lock);
+ }
+- spin_unlock(&alias->d_lock);
++ seq_spin_unlock(&alias->d_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ }
+Index: linux-3.2/fs/notify/vfsmount_mark.c
+===================================================================
+--- linux-3.2.orig/fs/notify/vfsmount_mark.c
++++ linux-3.2/fs/notify/vfsmount_mark.c
+@@ -35,13 +35,13 @@ void fsnotify_clear_marks_by_mount(struc
+ struct hlist_node *pos, *n;
+ LIST_HEAD(free_list);
+
+- spin_lock(&mnt->mnt_root->d_lock);
++ seq_spin_lock(&mnt->mnt_root->d_lock);
+ hlist_for_each_entry_safe(mark, pos, n, &mnt->mnt_fsnotify_marks, m.m_list) {
+ list_add(&mark->m.free_m_list, &free_list);
+ hlist_del_init_rcu(&mark->m.m_list);
+ fsnotify_get_mark(mark);
+ }
+- spin_unlock(&mnt->mnt_root->d_lock);
++ seq_spin_unlock(&mnt->mnt_root->d_lock);
+
+ list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
+ fsnotify_destroy_mark(mark);
+@@ -63,7 +63,7 @@ static void fsnotify_recalc_vfsmount_mas
+ struct hlist_node *pos;
+ __u32 new_mask = 0;
+
+- assert_spin_locked(&mnt->mnt_root->d_lock);
++ assert_seq_spin_locked(&mnt->mnt_root->d_lock);
+
+ hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list)
+ new_mask |= mark->mask;
+@@ -76,9 +76,9 @@ static void fsnotify_recalc_vfsmount_mas
+ */
+ void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt)
+ {
+- spin_lock(&mnt->mnt_root->d_lock);
++ seq_spin_lock(&mnt->mnt_root->d_lock);
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+- spin_unlock(&mnt->mnt_root->d_lock);
++ seq_spin_unlock(&mnt->mnt_root->d_lock);
+ }
+
+ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
+@@ -88,14 +88,14 @@ void fsnotify_destroy_vfsmount_mark(stru
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&mark->group->mark_lock);
+
+- spin_lock(&mnt->mnt_root->d_lock);
++ seq_spin_lock(&mnt->mnt_root->d_lock);
+
+ hlist_del_init_rcu(&mark->m.m_list);
+ mark->m.mnt = NULL;
+
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+
+- spin_unlock(&mnt->mnt_root->d_lock);
++ seq_spin_unlock(&mnt->mnt_root->d_lock);
+ }
+
+ static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,
+@@ -104,7 +104,7 @@ static struct fsnotify_mark *fsnotify_fi
+ struct fsnotify_mark *mark;
+ struct hlist_node *pos;
+
+- assert_spin_locked(&mnt->mnt_root->d_lock);
++ assert_seq_spin_locked(&mnt->mnt_root->d_lock);
+
+ hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list) {
+ if (mark->group == group) {
+@@ -124,9 +124,9 @@ struct fsnotify_mark *fsnotify_find_vfsm
+ {
+ struct fsnotify_mark *mark;
+
+- spin_lock(&mnt->mnt_root->d_lock);
++ seq_spin_lock(&mnt->mnt_root->d_lock);
+ mark = fsnotify_find_vfsmount_mark_locked(group, mnt);
+- spin_unlock(&mnt->mnt_root->d_lock);
++ seq_spin_unlock(&mnt->mnt_root->d_lock);
+
+ return mark;
+ }
+@@ -149,7 +149,7 @@ int fsnotify_add_vfsmount_mark(struct fs
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&group->mark_lock);
+
+- spin_lock(&mnt->mnt_root->d_lock);
++ seq_spin_lock(&mnt->mnt_root->d_lock);
+
+ mark->m.mnt = mnt;
+
+@@ -184,7 +184,7 @@ int fsnotify_add_vfsmount_mark(struct fs
+ hlist_add_after_rcu(last, &mark->m.m_list);
+ out:
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+- spin_unlock(&mnt->mnt_root->d_lock);
++ seq_spin_unlock(&mnt->mnt_root->d_lock);
+
+ return ret;
+ }
+Index: linux-3.2/fs/ocfs2/dcache.c
+===================================================================
+--- linux-3.2.orig/fs/ocfs2/dcache.c
++++ linux-3.2/fs/ocfs2/dcache.c
+@@ -177,16 +177,16 @@ struct dentry *ocfs2_find_local_alias(st
+ list_for_each(p, &inode->i_dentry) {
+ dentry = list_entry(p, struct dentry, d_alias);
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
+ trace_ocfs2_find_local_alias(dentry->d_name.len,
+ dentry->d_name.name);
+
+ dget_dlock(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ break;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ dentry = NULL;
+ }
+Index: linux-3.2/fs/ocfs2/export.c
+===================================================================
+--- linux-3.2.orig/fs/ocfs2/export.c
++++ linux-3.2/fs/ocfs2/export.c
+@@ -214,7 +214,7 @@ static int ocfs2_encode_fh(struct dentry
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+ struct inode *parent;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+
+ parent = dentry->d_parent->d_inode;
+ blkno = OCFS2_I(parent)->ip_blkno;
+@@ -224,7 +224,7 @@ static int ocfs2_encode_fh(struct dentry
+ fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff));
+ fh[5] = cpu_to_le32(generation);
+
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+
+ len = 6;
+ type = 2;
+Index: linux-3.2/fs/reiserfs/inode.c
+===================================================================
+--- linux-3.2.orig/fs/reiserfs/inode.c
++++ linux-3.2/fs/reiserfs/inode.c
+@@ -1614,7 +1614,7 @@ int reiserfs_encode_fh(struct dentry *de
+ if (maxlen < 5 || !need_parent)
+ return 3;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ inode = dentry->d_parent->d_inode;
+ data[3] = inode->i_ino;
+ data[4] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
+@@ -1623,7 +1623,7 @@ int reiserfs_encode_fh(struct dentry *de
+ data[5] = inode->i_generation;
+ *lenp = 6;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return *lenp;
+ }
+
+Index: linux-3.2/fs/udf/namei.c
+===================================================================
+--- linux-3.2.orig/fs/udf/namei.c
++++ linux-3.2/fs/udf/namei.c
+@@ -1296,13 +1296,13 @@ static int udf_encode_fh(struct dentry *
+ fid->udf.generation = inode->i_generation;
+
+ if (connectable && !S_ISDIR(inode->i_mode)) {
+- spin_lock(&de->d_lock);
++ seq_spin_lock(&de->d_lock);
+ inode = de->d_parent->d_inode;
+ location = UDF_I(inode)->i_location;
+ fid->udf.parent_block = location.logicalBlockNum;
+ fid->udf.parent_partref = location.partitionReferenceNum;
+ fid->udf.parent_generation = inode->i_generation;
+- spin_unlock(&de->d_lock);
++ seq_spin_unlock(&de->d_lock);
+ *lenp = 5;
+ type = FILEID_UDF_WITH_PARENT;
+ }
+Index: linux-3.2/fs/xfs/xfs_export.c
+===================================================================
+--- linux-3.2.orig/fs/xfs/xfs_export.c
++++ linux-3.2/fs/xfs/xfs_export.c
+@@ -97,20 +97,20 @@ xfs_fs_encode_fh(
+
+ switch (fileid_type) {
+ case FILEID_INO32_GEN_PARENT:
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
+ fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ /*FALLTHRU*/
+ case FILEID_INO32_GEN:
+ fid->i32.ino = XFS_I(inode)->i_ino;
+ fid->i32.gen = inode->i_generation;
+ break;
+ case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
+ fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ /*FALLTHRU*/
+ case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
+ fid64->ino = XFS_I(inode)->i_ino;
+Index: linux-3.2/include/linux/dcache.h
+===================================================================
+--- linux-3.2.orig/include/linux/dcache.h
++++ linux-3.2/include/linux/dcache.h
+@@ -116,7 +116,7 @@ full_name_hash(const unsigned char *name
+ struct dentry {
+ /* RCU lookup touched fields */
+ unsigned int d_flags; /* protected by d_lock */
+- seqcount_t d_seq; /* per dentry seqlock */
++ seqlock_t d_lock; /* per dentry seqlock */
+ struct hlist_bl_node d_hash; /* lookup hash list */
+ struct dentry *d_parent; /* parent directory */
+ struct qstr d_name;
+@@ -126,7 +126,6 @@ struct dentry {
+
+ /* Ref lookup also touches following */
+ unsigned int d_count; /* protected by d_lock */
+- spinlock_t d_lock; /* per dentry lock */
+ const struct dentry_operations *d_op;
+ struct super_block *d_sb; /* The root of the dentry tree */
+ unsigned long d_time; /* used by d_revalidate */
+@@ -323,8 +322,8 @@ static inline int __d_rcu_to_refcount(st
+ {
+ int ret = 0;
+
+- assert_spin_locked(&dentry->d_lock);
+- if (!read_seqcount_retry(&dentry->d_seq, seq)) {
++ assert_seq_spin_locked(&dentry->d_lock);
++ if (!read_seqretry(&dentry->d_lock, seq)) {
+ ret = 1;
+ dentry->d_count++;
+ }
+@@ -367,9 +366,9 @@ static inline struct dentry *dget_dlock(
+ static inline struct dentry *dget(struct dentry *dentry)
+ {
+ if (dentry) {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ dget_dlock(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+ return dentry;
+ }
+@@ -400,9 +399,9 @@ static inline int cant_mount(struct dent
+
+ static inline void dont_mount(struct dentry *dentry)
+ {
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_CANT_MOUNT;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ extern void dput(struct dentry *);
+Index: linux-3.2/include/linux/fs.h
+===================================================================
+--- linux-3.2.orig/include/linux/fs.h
++++ linux-3.2/include/linux/fs.h
+@@ -2603,9 +2603,9 @@ static inline ino_t parent_ino(struct de
+ * Don't strictly need d_lock here? If the parent ino could change
+ * then surely we'd have a deeper race in the caller?
+ */
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ res = dentry->d_parent->d_inode->i_ino;
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ return res;
+ }
+
+Index: linux-3.2/include/linux/fsnotify_backend.h
+===================================================================
+--- linux-3.2.orig/include/linux/fsnotify_backend.h
++++ linux-3.2/include/linux/fsnotify_backend.h
+@@ -329,7 +329,7 @@ static inline void __fsnotify_update_dca
+ {
+ struct dentry *parent;
+
+- assert_spin_locked(&dentry->d_lock);
++ assert_seq_spin_locked(&dentry->d_lock);
+
+ /*
+ * Serialisation of setting PARENT_WATCHED on the dentries is provided
+@@ -353,9 +353,9 @@ static inline void __fsnotify_d_instanti
+ if (!inode)
+ return;
+
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ __fsnotify_update_dcache_flags(dentry);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ /* called from fsnotify listeners, such as fanotify or dnotify */
+Index: linux-3.2/kernel/cgroup.c
+===================================================================
+--- linux-3.2.orig/kernel/cgroup.c
++++ linux-3.2/kernel/cgroup.c
+@@ -870,29 +870,29 @@ static void cgroup_clear_directory(struc
+ struct list_head *node;
+
+ BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ node = dentry->d_subdirs.next;
+ while (node != &dentry->d_subdirs) {
+ struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
+
+- spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+ list_del_init(node);
+ if (d->d_inode) {
+ /* This should never be called on a cgroup
+ * directory with child cgroups */
+ BUG_ON(d->d_inode->i_mode & S_IFDIR);
+ dget_dlock(d);
+- spin_unlock(&d->d_lock);
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&d->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ d_delete(d);
+ simple_unlink(dentry->d_inode, d);
+ dput(d);
+- spin_lock(&dentry->d_lock);
++ seq_spin_lock(&dentry->d_lock);
+ } else
+- spin_unlock(&d->d_lock);
++ seq_spin_unlock(&d->d_lock);
+ node = dentry->d_subdirs.next;
+ }
+- spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
+ }
+
+ /*
+@@ -905,11 +905,11 @@ static void cgroup_d_remove_dir(struct d
+ cgroup_clear_directory(dentry);
+
+ parent = dentry->d_parent;
+- spin_lock(&parent->d_lock);
+- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock(&parent->d_lock);
++ seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ list_del_init(&dentry->d_u.d_child);
+- spin_unlock(&dentry->d_lock);
+- spin_unlock(&parent->d_lock);
++ seq_spin_unlock(&dentry->d_lock);
++ seq_spin_unlock(&parent->d_lock);
+ remove_dir(dentry);
+ }
+
+Index: linux-3.2/net/sunrpc/rpc_pipe.c
+===================================================================
+--- linux-3.2.orig/net/sunrpc/rpc_pipe.c
++++ linux-3.2/net/sunrpc/rpc_pipe.c
+@@ -397,14 +397,14 @@ rpc_info_open(struct inode *inode, struc
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+
+- spin_lock(&file->f_path.dentry->d_lock);
++ seq_spin_lock(&file->f_path.dentry->d_lock);
+ if (!d_unhashed(file->f_path.dentry))
+ clnt = RPC_I(inode)->private;
+ if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
+- spin_unlock(&file->f_path.dentry->d_lock);
++ seq_spin_unlock(&file->f_path.dentry->d_lock);
+ m->private = clnt;
+ } else {
+- spin_unlock(&file->f_path.dentry->d_lock);
++ seq_spin_unlock(&file->f_path.dentry->d_lock);
+ single_release(inode, file);
+ ret = -EINVAL;
+ }
+Index: linux-3.2/security/selinux/selinuxfs.c
+===================================================================
+--- linux-3.2.orig/security/selinux/selinuxfs.c
++++ linux-3.2/security/selinux/selinuxfs.c
+@@ -1194,28 +1194,28 @@ static void sel_remove_entries(struct de
+ {
+ struct list_head *node;
+
+- spin_lock(&de->d_lock);
++ seq_spin_lock(&de->d_lock);
+ node = de->d_subdirs.next;
+ while (node != &de->d_subdirs) {
+ struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
+
+- spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
++ seq_spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+ list_del_init(node);
+
+ if (d->d_inode) {
+ dget_dlock(d);
+- spin_unlock(&de->d_lock);
+- spin_unlock(&d->d_lock);
++ seq_spin_unlock(&de->d_lock);
++ seq_spin_unlock(&d->d_lock);
+ d_delete(d);
+ simple_unlink(de->d_inode, d);
+ dput(d);
+- spin_lock(&de->d_lock);
++ seq_spin_lock(&de->d_lock);
+ } else
+- spin_unlock(&d->d_lock);
++ seq_spin_unlock(&d->d_lock);
+ node = de->d_subdirs.next;
+ }
+
+- spin_unlock(&de->d_lock);
++ seq_spin_unlock(&de->d_lock);
+ }
+
+ #define BOOL_DIR_NAME "booleans"
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/fs-protect-opencoded-isize-seqcount.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/fs-protect-opencoded-isize-seqcount.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,31 @@
+Subject: fs: Protect open coded isize seqcount
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 01 Mar 2012 16:12:47 +0100
+
+A writer might be preempted in the write side critical section on
+RT. Disable preemption to avoid endless spinning of a preempting
+reader.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+
+---
+ include/linux/fs.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+Index: linux-3.2/include/linux/fs.h
+===================================================================
+--- linux-3.2.orig/include/linux/fs.h
++++ linux-3.2/include/linux/fs.h
+@@ -903,9 +903,11 @@ static inline loff_t i_size_read(const s
+ static inline void i_size_write(struct inode *inode, loff_t i_size)
+ {
+ #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++ preempt_disable_rt();
+ write_seqcount_begin(&inode->i_size_seqcount);
+ inode->i_size = i_size;
+ write_seqcount_end(&inode->i_size_seqcount);
++ preempt_enable_rt();
+ #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+ preempt_disable();
+ inode->i_size = i_size;
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/fs-struct-use-seqlock.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/fs-struct-use-seqlock.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,339 @@
+Subject: fs: fs_struct use seqlock
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 27 Feb 2012 17:58:13 +0100
+
+Replace the open coded seqlock with a real one, so RT can handle it.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+
+---
+ fs/exec.c | 4 ++--
+ fs/fhandle.c | 4 ++--
+ fs/fs_struct.c | 46 ++++++++++++++++++----------------------------
+ fs/namei.c | 14 +++++++-------
+ include/linux/fs_struct.h | 16 +++++++---------
+ kernel/fork.c | 10 +++++-----
+ 6 files changed, 41 insertions(+), 53 deletions(-)
+
+Index: linux-3.2/fs/exec.c
+===================================================================
+--- linux-3.2.orig/fs/exec.c
++++ linux-3.2/fs/exec.c
+@@ -1239,7 +1239,7 @@ int check_unsafe_exec(struct linux_binpr
+ }
+
+ n_fs = 1;
+- spin_lock(&p->fs->lock);
++ seq_spin_lock(&p->fs->lock);
+ rcu_read_lock();
+ for (t = next_thread(p); t != p; t = next_thread(t)) {
+ if (t->fs == p->fs)
+@@ -1256,7 +1256,7 @@ int check_unsafe_exec(struct linux_binpr
+ res = 1;
+ }
+ }
+- spin_unlock(&p->fs->lock);
++ seq_spin_unlock(&p->fs->lock);
+
+ return res;
+ }
+Index: linux-3.2/fs/fhandle.c
+===================================================================
+--- linux-3.2.orig/fs/fhandle.c
++++ linux-3.2/fs/fhandle.c
+@@ -115,10 +115,10 @@ static struct vfsmount *get_vfsmount_fro
+
+ if (fd == AT_FDCWD) {
+ struct fs_struct *fs = current->fs;
+- spin_lock(&fs->lock);
++ seq_spin_lock(&fs->lock);
+ path = fs->pwd;
+ mntget(path.mnt);
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+ } else {
+ int fput_needed;
+ struct file *file = fget_light(fd, &fput_needed);
+Index: linux-3.2/fs/fs_struct.c
+===================================================================
+--- linux-3.2.orig/fs/fs_struct.c
++++ linux-3.2/fs/fs_struct.c
+@@ -26,13 +26,11 @@ void set_fs_root(struct fs_struct *fs, s
+ {
+ struct path old_root;
+
+- spin_lock(&fs->lock);
+- write_seqcount_begin(&fs->seq);
++ write_seqlock(&fs->lock);
+ old_root = fs->root;
+ fs->root = *path;
+ path_get_longterm(path);
+- write_seqcount_end(&fs->seq);
+- spin_unlock(&fs->lock);
++ write_sequnlock(&fs->lock);
+ if (old_root.dentry)
+ path_put_longterm(&old_root);
+ }
+@@ -45,13 +43,11 @@ void set_fs_pwd(struct fs_struct *fs, st
+ {
+ struct path old_pwd;
+
+- spin_lock(&fs->lock);
+- write_seqcount_begin(&fs->seq);
++ write_seqlock(&fs->lock);
+ old_pwd = fs->pwd;
+ fs->pwd = *path;
+ path_get_longterm(path);
+- write_seqcount_end(&fs->seq);
+- spin_unlock(&fs->lock);
++ write_sequnlock(&fs->lock);
+
+ if (old_pwd.dentry)
+ path_put_longterm(&old_pwd);
+@@ -68,8 +64,7 @@ void chroot_fs_refs(struct path *old_roo
+ task_lock(p);
+ fs = p->fs;
+ if (fs) {
+- spin_lock(&fs->lock);
+- write_seqcount_begin(&fs->seq);
++ write_seqlock(&fs->lock);
+ if (fs->root.dentry == old_root->dentry
+ && fs->root.mnt == old_root->mnt) {
+ path_get_longterm(new_root);
+@@ -82,8 +77,7 @@ void chroot_fs_refs(struct path *old_roo
+ fs->pwd = *new_root;
+ count++;
+ }
+- write_seqcount_end(&fs->seq);
+- spin_unlock(&fs->lock);
++ write_sequnlock(&fs->lock);
+ }
+ task_unlock(p);
+ } while_each_thread(g, p);
+@@ -106,12 +100,10 @@ void exit_fs(struct task_struct *tsk)
+ if (fs) {
+ int kill;
+ task_lock(tsk);
+- spin_lock(&fs->lock);
+- write_seqcount_begin(&fs->seq);
++ write_seqlock(&fs->lock);
+ tsk->fs = NULL;
+ kill = !--fs->users;
+- write_seqcount_end(&fs->seq);
+- spin_unlock(&fs->lock);
++ write_sequnlock(&fs->lock);
+ task_unlock(tsk);
+ if (kill)
+ free_fs_struct(fs);
+@@ -125,16 +117,15 @@ struct fs_struct *copy_fs_struct(struct
+ if (fs) {
+ fs->users = 1;
+ fs->in_exec = 0;
+- spin_lock_init(&fs->lock);
+- seqcount_init(&fs->seq);
++ seqlock_init(&fs->lock);
+ fs->umask = old->umask;
+
+- spin_lock(&old->lock);
++ seq_spin_lock(&old->lock);
+ fs->root = old->root;
+ path_get_longterm(&fs->root);
+ fs->pwd = old->pwd;
+ path_get_longterm(&fs->pwd);
+- spin_unlock(&old->lock);
++ seq_spin_unlock(&old->lock);
+ }
+ return fs;
+ }
+@@ -149,10 +140,10 @@ int unshare_fs_struct(void)
+ return -ENOMEM;
+
+ task_lock(current);
+- spin_lock(&fs->lock);
++ seq_spin_lock(&fs->lock);
+ kill = !--fs->users;
+ current->fs = new_fs;
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+ task_unlock(current);
+
+ if (kill)
+@@ -171,8 +162,7 @@ EXPORT_SYMBOL(current_umask);
+ /* to be mentioned only in INIT_TASK */
+ struct fs_struct init_fs = {
+ .users = 1,
+- .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
+- .seq = SEQCNT_ZERO,
++ .lock = __SEQLOCK_UNLOCKED(init_fs.lock),
+ .umask = 0022,
+ };
+
+@@ -185,14 +175,14 @@ void daemonize_fs_struct(void)
+
+ task_lock(current);
+
+- spin_lock(&init_fs.lock);
++ seq_spin_lock(&init_fs.lock);
+ init_fs.users++;
+- spin_unlock(&init_fs.lock);
++ seq_spin_unlock(&init_fs.lock);
+
+- spin_lock(&fs->lock);
++ seq_spin_lock(&fs->lock);
+ current->fs = &init_fs;
+ kill = !--fs->users;
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+
+ task_unlock(current);
+ if (kill)
+Index: linux-3.2/fs/namei.c
+===================================================================
+--- linux-3.2.orig/fs/namei.c
++++ linux-3.2/fs/namei.c
+@@ -428,7 +428,7 @@ static int unlazy_walk(struct nameidata
+ BUG_ON(!(nd->flags & LOOKUP_RCU));
+ if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
+ want_root = 1;
+- spin_lock(&fs->lock);
++ seq_spin_lock(&fs->lock);
+ if (nd->root.mnt != fs->root.mnt ||
+ nd->root.dentry != fs->root.dentry)
+ goto err_root;
+@@ -458,7 +458,7 @@ static int unlazy_walk(struct nameidata
+ spin_unlock(&parent->d_lock);
+ if (want_root) {
+ path_get(&nd->root);
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+ }
+ mntget(nd->path.mnt);
+
+@@ -473,7 +473,7 @@ err_parent:
+ spin_unlock(&parent->d_lock);
+ err_root:
+ if (want_root)
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+ return -ECHILD;
+ }
+
+@@ -567,10 +567,10 @@ static __always_inline void set_root_rcu
+ unsigned seq;
+
+ do {
+- seq = read_seqcount_begin(&fs->seq);
++ seq = read_seqbegin(&fs->lock);
+ nd->root = fs->root;
+ nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
+- } while (read_seqcount_retry(&fs->seq, seq));
++ } while (read_seqretry(&fs->lock, seq));
+ }
+ }
+
+@@ -1519,10 +1519,10 @@ static int path_init(int dfd, const char
+ rcu_read_lock();
+
+ do {
+- seq = read_seqcount_begin(&fs->seq);
++ seq = read_seqbegin(&fs->lock);
+ nd->path = fs->pwd;
+ nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+- } while (read_seqcount_retry(&fs->seq, seq));
++ } while (read_seqretry(&fs->lock, seq));
+ } else {
+ get_fs_pwd(current->fs, &nd->path);
+ }
+Index: linux-3.2/include/linux/fs_struct.h
+===================================================================
+--- linux-3.2.orig/include/linux/fs_struct.h
++++ linux-3.2/include/linux/fs_struct.h
+@@ -2,13 +2,11 @@
+ #define _LINUX_FS_STRUCT_H
+
+ #include <linux/path.h>
+-#include <linux/spinlock.h>
+ #include <linux/seqlock.h>
+
+ struct fs_struct {
+ int users;
+- spinlock_t lock;
+- seqcount_t seq;
++ seqlock_t lock;
+ int umask;
+ int in_exec;
+ struct path root, pwd;
+@@ -26,29 +24,29 @@ extern int unshare_fs_struct(void);
+
+ static inline void get_fs_root(struct fs_struct *fs, struct path *root)
+ {
+- spin_lock(&fs->lock);
++ seq_spin_lock(&fs->lock);
+ *root = fs->root;
+ path_get(root);
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+ }
+
+ static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
+ {
+- spin_lock(&fs->lock);
++ seq_spin_lock(&fs->lock);
+ *pwd = fs->pwd;
+ path_get(pwd);
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+ }
+
+ static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
+ struct path *pwd)
+ {
+- spin_lock(&fs->lock);
++ seq_spin_lock(&fs->lock);
+ *root = fs->root;
+ path_get(root);
+ *pwd = fs->pwd;
+ path_get(pwd);
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+ }
+
+ #endif /* _LINUX_FS_STRUCT_H */
+Index: linux-3.2/kernel/fork.c
+===================================================================
+--- linux-3.2.orig/kernel/fork.c
++++ linux-3.2/kernel/fork.c
+@@ -825,13 +825,13 @@ static int copy_fs(unsigned long clone_f
+ struct fs_struct *fs = current->fs;
+ if (clone_flags & CLONE_FS) {
+ /* tsk->fs is already what we want */
+- spin_lock(&fs->lock);
++ seq_spin_lock(&fs->lock);
+ if (fs->in_exec) {
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+ return -EAGAIN;
+ }
+ fs->users++;
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+ return 0;
+ }
+ tsk->fs = copy_fs_struct(fs);
+@@ -1717,13 +1717,13 @@ SYSCALL_DEFINE1(unshare, unsigned long,
+
+ if (new_fs) {
+ fs = current->fs;
+- spin_lock(&fs->lock);
++ seq_spin_lock(&fs->lock);
+ current->fs = new_fs;
+ if (--fs->users)
+ new_fs = NULL;
+ else
+ new_fs = fs;
+- spin_unlock(&fs->lock);
++ seq_spin_unlock(&fs->lock);
+ }
+
+ if (new_fd) {
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/futex-requeue-pi-fix.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/futex-requeue-pi-fix.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/futex-requeue-pi-fix.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -49,11 +49,16 @@
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
-diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
-index 991bc7f..9850dc0 100644
---- a/kernel/rtmutex.c
-+++ b/kernel/rtmutex.c
-@@ -75,7 +75,8 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+---
+ kernel/rtmutex.c | 32 +++++++++++++++++++++++++++++++-
+ kernel/rtmutex_common.h | 1 +
+ 2 files changed, 32 insertions(+), 1 deletion(-)
+
+Index: linux-3.2/kernel/rtmutex.c
+===================================================================
+--- linux-3.2.orig/kernel/rtmutex.c
++++ linux-3.2/kernel/rtmutex.c
+@@ -69,7 +69,8 @@ static void fixup_rt_mutex_waiters(struc
static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
{
@@ -63,7 +68,7 @@
}
/*
-@@ -1353,6 +1354,35 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+@@ -1010,6 +1011,35 @@ int rt_mutex_start_proxy_lock(struct rt_
return 1;
}
@@ -99,11 +104,11 @@
ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
if (ret && !rt_mutex_owner(lock)) {
-diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
-index a688a29..6ec3dc1 100644
---- a/kernel/rtmutex_common.h
-+++ b/kernel/rtmutex_common.h
-@@ -105,6 +105,7 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
+Index: linux-3.2/kernel/rtmutex_common.h
+===================================================================
+--- linux-3.2.orig/kernel/rtmutex_common.h
++++ linux-3.2/kernel/rtmutex_common.h
+@@ -104,6 +104,7 @@ static inline struct task_struct *rt_mut
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
@@ -111,6 +116,3 @@
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
-
-
-
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/ia64-vdso-use-seqcount.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/ia64-vdso-use-seqcount.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,91 @@
+Subject: ia64: vsyscall: Use seqcount instead of seqlock
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 28 Feb 2012 18:33:08 +0100
+
+The update of the vdso data happens under xtime_lock, so adding a
+nested lock is pointless. Just use a seqcount to sync the readers.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: Tony Luck <tony.luck at intel.com>
+---
+ arch/ia64/kernel/asm-offsets.c | 4 ++--
+ arch/ia64/kernel/fsys.S | 2 +-
+ arch/ia64/kernel/fsyscall_gtod_data.h | 2 +-
+ arch/ia64/kernel/time.c | 10 +++-------
+ 4 files changed, 7 insertions(+), 11 deletions(-)
+
+Index: linux-2.6/arch/ia64/kernel/asm-offsets.c
+===================================================================
+--- linux-2.6.orig/arch/ia64/kernel/asm-offsets.c
++++ linux-2.6/arch/ia64/kernel/asm-offsets.c
+@@ -269,8 +269,8 @@ void foo(void)
+ BLANK();
+
+ /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
+- DEFINE(IA64_GTOD_LOCK_OFFSET,
+- offsetof (struct fsyscall_gtod_data_t, lock));
++ DEFINE(IA64_GTOD_SEQ_OFFSET,
++ offsetof (struct fsyscall_gtod_data_t, seq);
+ DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
+ offsetof (struct fsyscall_gtod_data_t, wall_time));
+ DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
+Index: linux-2.6/arch/ia64/kernel/fsys.S
+===================================================================
+--- linux-2.6.orig/arch/ia64/kernel/fsys.S
++++ linux-2.6/arch/ia64/kernel/fsys.S
+@@ -174,7 +174,7 @@ ENTRY(fsys_set_tid_address)
+ FSYS_RETURN
+ END(fsys_set_tid_address)
+
+-#if IA64_GTOD_LOCK_OFFSET !=0
++#if IA64_GTOD_SEQ_OFFSET !=0
+ #error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
+ #endif
+ #if IA64_ITC_JITTER_OFFSET !=0
+Index: linux-2.6/arch/ia64/kernel/fsyscall_gtod_data.h
+===================================================================
+--- linux-2.6.orig/arch/ia64/kernel/fsyscall_gtod_data.h
++++ linux-2.6/arch/ia64/kernel/fsyscall_gtod_data.h
+@@ -6,7 +6,7 @@
+ */
+
+ struct fsyscall_gtod_data_t {
+- seqlock_t lock;
++ seqcount_t seq;
+ struct timespec wall_time;
+ struct timespec monotonic_time;
+ cycle_t clk_mask;
+Index: linux-2.6/arch/ia64/kernel/time.c
+===================================================================
+--- linux-2.6.orig/arch/ia64/kernel/time.c
++++ linux-2.6/arch/ia64/kernel/time.c
+@@ -35,9 +35,7 @@
+
+ static cycle_t itc_get_cycles(struct clocksource *cs);
+
+-struct fsyscall_gtod_data_t fsyscall_gtod_data = {
+- .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
+-};
++struct fsyscall_gtod_data_t fsyscall_gtod_data;
+
+ struct itc_jitter_data_t itc_jitter_data;
+
+@@ -460,9 +458,7 @@ void update_vsyscall_tz(void)
+ void update_vsyscall(struct timespec *wall, struct timespec *wtm,
+ struct clocksource *c, u32 mult)
+ {
+- unsigned long flags;
+-
+- write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
++ write_seqcount_begin(&fsyscall_gtod_data.seq);
+
+ /* copy fsyscall clock data */
+ fsyscall_gtod_data.clk_mask = c->mask;
+@@ -485,6 +481,6 @@ void update_vsyscall(struct timespec *wa
+ fsyscall_gtod_data.monotonic_time.tv_sec++;
+ }
+
+- write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
++ write_seqcount_end(&fsyscall_gtod_data.seq);
+ }
+
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -138,7 +138,7 @@
===================================================================
--- linux-3.2.orig/kernel/softirq.c
+++ linux-3.2/kernel/softirq.c
-@@ -431,6 +431,13 @@ void local_bh_enable_ip(unsigned long ip
+@@ -425,6 +425,13 @@ void local_bh_enable_ip(unsigned long ip
}
EXPORT_SYMBOL(local_bh_enable_ip);
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/latency-hist.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/latency-hist.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/latency-hist.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -219,7 +219,7 @@
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
-@@ -1566,6 +1566,12 @@ struct task_struct {
+@@ -1570,6 +1570,12 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/localversion.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/localversion.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/localversion.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -14,4 +14,4 @@
--- /dev/null
+++ linux-3.2/localversion-rt
@@ -0,0 +1 @@
-+-rt12
++-rt15
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/mm-prepare-pf-disable-discoupling.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -19,7 +19,7 @@
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
-@@ -1431,6 +1431,7 @@ struct task_struct {
+@@ -1435,6 +1435,7 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/net-u64-stat-protect-seqcount.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/net-u64-stat-protect-seqcount.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,34 @@
+Subject: net: u64_stat: Protect seqcount
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 01 Mar 2012 16:16:02 +0100
+
+On RT we must prevent that the writer gets preempted inside the write
+section. Otherwise a preempting reader might spin forever.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+
+---
+ include/linux/u64_stats_sync.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+Index: linux-3.2/include/linux/u64_stats_sync.h
+===================================================================
+--- linux-3.2.orig/include/linux/u64_stats_sync.h
++++ linux-3.2/include/linux/u64_stats_sync.h
+@@ -70,6 +70,7 @@ struct u64_stats_sync {
+ static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
+ {
+ #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++ preempt_disable_rt();
+ write_seqcount_begin(&syncp->seq);
+ #endif
+ }
+@@ -78,6 +79,7 @@ static inline void u64_stats_update_end(
+ {
+ #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ write_seqcount_end(&syncp->seq);
++ preempt_enable_rt();
+ #endif
+ }
+
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/net-wireless-warn-nort.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/net-wireless-warn-nort.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/net-wireless-warn-nort.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -11,7 +11,7 @@
===================================================================
--- linux-3.2.orig/net/mac80211/rx.c
+++ linux-3.2/net/mac80211/rx.c
-@@ -2952,7 +2952,7 @@ void ieee80211_rx(struct ieee80211_hw *h
+@@ -2958,7 +2958,7 @@ void ieee80211_rx(struct ieee80211_hw *h
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/oleg-signal-rt-fix.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/oleg-signal-rt-fix.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/oleg-signal-rt-fix.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -29,15 +29,40 @@
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
- arch/x86/kernel/signal.c | 9 +++++++++
- include/linux/sched.h | 4 ++++
- kernel/signal.c | 31 +++++++++++++++++++++++++++++--
- 3 files changed, 42 insertions(+), 2 deletions(-)
+ arch/x86/include/asm/signal.h | 13 +++++++++++++
+ arch/x86/kernel/signal.c | 9 +++++++++
+ include/linux/sched.h | 4 ++++
+ kernel/signal.c | 37 +++++++++++++++++++++++++++++++++++--
+ 4 files changed, 61 insertions(+), 2 deletions(-)
-Index: linux-rt.git/arch/x86/kernel/signal.c
+Index: linux-3.2/arch/x86/include/asm/signal.h
===================================================================
---- linux-rt.git.orig/arch/x86/kernel/signal.c
-+++ linux-rt.git/arch/x86/kernel/signal.c
+--- linux-3.2.orig/arch/x86/include/asm/signal.h
++++ linux-3.2/arch/x86/include/asm/signal.h
+@@ -31,6 +31,19 @@ typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+ } sigset_t;
+
++/*
++ * Because some traps use the IST stack, we must keep
++ * preemption disabled while calling do_trap(), but do_trap()
++ * may call force_sig_info() which will grab the signal spin_locks
++ * for the task, which in PREEMPT_RT_FULL are mutexes.
++ * By defining ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will
++ * set TIF_NOTIFY_RESUME and set up the signal to be sent on exit
++ * of the trap.
++ */
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
++#define ARCH_RT_DELAYS_SIGNAL_SEND
++#endif
++
+ #else
+ /* Here we must cater to libcs that poke about in kernel headers. */
+
+Index: linux-3.2/arch/x86/kernel/signal.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/signal.c
++++ linux-3.2/arch/x86/kernel/signal.c
@@ -820,6 +820,15 @@ do_notify_resume(struct pt_regs *regs, v
mce_notify_process();
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
@@ -54,11 +79,11 @@
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
-Index: linux-rt.git/include/linux/sched.h
+Index: linux-3.2/include/linux/sched.h
===================================================================
---- linux-rt.git.orig/include/linux/sched.h
-+++ linux-rt.git/include/linux/sched.h
-@@ -1405,6 +1405,10 @@ struct task_struct {
+--- linux-3.2.orig/include/linux/sched.h
++++ linux-3.2/include/linux/sched.h
+@@ -1392,6 +1392,10 @@ struct task_struct {
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
@@ -69,10 +94,10 @@
unsigned long sas_ss_sp;
size_t sas_ss_size;
-Index: linux-rt.git/kernel/signal.c
+Index: linux-3.2/kernel/signal.c
===================================================================
---- linux-rt.git.orig/kernel/signal.c
-+++ linux-rt.git/kernel/signal.c
+--- linux-3.2.orig/kernel/signal.c
++++ linux-3.2/kernel/signal.c
@@ -1273,8 +1273,8 @@ int do_send_sig_info(int sig, struct sig
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
@@ -124,27 +149,3 @@
/*
* Nuke all other threads in the group.
*/
-Index: linux-rt.git/arch/x86/include/asm/signal.h
-===================================================================
---- linux-rt.git.orig/arch/x86/include/asm/signal.h
-+++ linux-rt.git/arch/x86/include/asm/signal.h
-@@ -31,6 +31,19 @@ typedef struct {
- unsigned long sig[_NSIG_WORDS];
- } sigset_t;
-
-+/*
-+ * Because some traps use the IST stack, we must keep
-+ * preemption disabled while calling do_trap(), but do_trap()
-+ * may call force_sig_info() which will grab the signal spin_locks
-+ * for the task, which in PREEMPT_RT_FULL are mutexes.
-+ * By defining ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will
-+ * set TIF_NOTIFY_RESUME and set up the signal to be sent on exit
-+ * of the trap.
-+ */
-+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
-+#define ARCH_RT_DELAYS_SIGNAL_SEND
-+#endif
-+
- #else
- /* Here we must cater to libcs that poke about in kernel headers. */
-
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/panic-disable-random-on-rt.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/panic-disable-random-on-rt.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/panic-disable-random-on-rt.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -8,7 +8,7 @@
===================================================================
--- linux-3.2.orig/kernel/panic.c
+++ linux-3.2/kernel/panic.c
-@@ -334,9 +334,11 @@ static u64 oops_id;
+@@ -343,9 +343,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -137,7 +137,7 @@
void __init softirq_early_init(void)
{
-@@ -448,7 +454,7 @@ EXPORT_SYMBOL(in_serving_softirq);
+@@ -455,7 +461,7 @@ EXPORT_SYMBOL(in_serving_softirq);
* Called with bh and local interrupts disabled. For full RT cpu must
* be pinned.
*/
@@ -146,7 +146,7 @@
{
u32 pending = local_softirq_pending();
int cpu = smp_processor_id();
-@@ -462,7 +468,7 @@ static void __do_softirq(void)
+@@ -469,7 +475,7 @@ static void __do_softirq(void)
lockdep_softirq_enter();
@@ -155,7 +155,7 @@
pending = local_softirq_pending();
if (pending)
-@@ -501,7 +507,7 @@ static int __thread_do_softirq(int cpu)
+@@ -508,7 +514,7 @@ static int __thread_do_softirq(int cpu)
* schedule!
*/
if (local_softirq_pending())
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-frob-migrate_disable-2.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -77,7 +77,7 @@
cpumask_t cpus_allowed;
#ifdef CONFIG_PREEMPT_RCU
-@@ -2677,11 +2679,22 @@ static inline void set_task_cpu(struct t
+@@ -2681,11 +2683,22 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-frob-pagefault_disable.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -346,7 +346,7 @@
#include <asm/processor.h>
-@@ -1431,7 +1432,9 @@ struct task_struct {
+@@ -1435,7 +1436,9 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
@@ -356,7 +356,7 @@
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
-@@ -1580,6 +1583,17 @@ struct task_struct {
+@@ -1584,6 +1587,17 @@ struct task_struct {
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-re-_3_0-rt4.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-re-_3_0-rt4.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/peter_zijlstra-re-_3_0-rt4.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -14,7 +14,6 @@
Signed-off-by: Peter Zijlstra <a.p.zijlstra at chello.nl>
[dvhart at linux.intel.com: build fix]
Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
-[bwh: Fix context in arch/x86/kernel/process_32.c after FPU changes in 3.2.8]
---
arch/x86/kernel/process_32.c | 36 ++++++++++++++++++++++++++++++++++++
include/linux/sched.h | 5 +++++
@@ -33,7 +32,7 @@
#include <asm/pgtable.h>
#include <asm/system.h>
-@@ -350,6 +351,41 @@ __switch_to(struct task_struct *prev_p,
+@@ -339,6 +340,41 @@ __switch_to(struct task_struct *prev_p,
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
__switch_to_xtra(prev_p, next_p, tss);
@@ -87,7 +86,7 @@
#include <asm/system.h>
#include <asm/page.h>
#include <asm/ptrace.h>
-@@ -1599,6 +1600,10 @@ struct task_struct {
+@@ -1603,6 +1604,10 @@ struct task_struct {
struct rcu_head put_rcu;
int softirq_nestcnt;
#endif
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-futex-rt-fix-possible-lockup-when-taking-pi_lock-in-proxy-handler.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-futex-rt-fix-possible-lockup-when-taking-pi_lock-in-proxy-handler.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,53 @@
+Subject: futex/rt: Fix possible lockup when taking pi_lock in proxy handler
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 01 Mar 2012 13:55:29 -0500
+
+When taking the pi_lock, we must disable interrupts because the
+pi_lock can also be taken in an interrupt handler.
+
+Use raw_spin_lock_irq() instead of raw_spin_lock().
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190345.165160680@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/rtmutex.c | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
+index 9850dc0..b525158 100644
+--- a/kernel/rtmutex.c
++++ b/kernel/rtmutex.c
+@@ -1373,14 +1373,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
+ * it will know that we are in the process of requeuing it.
+ */
+- raw_spin_lock(&task->pi_lock);
++ raw_spin_lock_irq(&task->pi_lock);
+ if (task->pi_blocked_on) {
+- raw_spin_unlock(&task->pi_lock);
++ raw_spin_unlock_irq(&task->pi_lock);
+ raw_spin_unlock(&lock->wait_lock);
+ return -EAGAIN;
+ }
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
+- raw_spin_unlock(&task->pi_lock);
++ raw_spin_unlock_irq(&task->pi_lock);
+ #endif
+
+ ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+--
+1.7.3.4
+
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
+the body of a message to majordomo at vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+
+
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-lglock-rt-use-non-rt-for_each_cpu-in-rt-code.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-lglock-rt-use-non-rt-for_each_cpu-in-rt-code.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,118 @@
+Subject: lglock/rt: Use non-rt for_each_cpu() in -rt code
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 01 Mar 2012 13:55:30 -0500
+
+Currently the RT version of the lglocks() does a for_each_online_cpu()
+in the name##_global_lock_online() functions. Non-rt uses its own
+mask for this, and for good reason.
+
+A task may grab a *_global_lock_online(), and in the mean time, one
+of the CPUs goes offline. Now when that task does a *_global_unlock_online()
+it releases all the locks *except* the one that went offline.
+
+Now if that CPU were to come back on line, its lock is now owned by a
+task that never released it when it should have.
+
+This causes all sorts of fun errors. Like owners of a lock no longer
+existing, or sleeping on IO, waiting to be woken up by a task that
+happens to be blocked on the lock it never released.
+
+Convert the RT versions to use the lglock specific cpumasks. As once
+a CPU comes on line, the mask is set, and never cleared even when the
+CPU goes offline. The locks for that CPU will still be taken and released.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190345.374756214@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ include/linux/lglock.h | 35 ++++++++++++++++++++++++++++++++---
+ 1 files changed, 32 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/lglock.h b/include/linux/lglock.h
+index 52b289f..cdfcef3 100644
+--- a/include/linux/lglock.h
++++ b/include/linux/lglock.h
+@@ -203,9 +203,31 @@
+ #else /* !PREEMPT_RT_FULL */
+ #define DEFINE_LGLOCK(name) \
+ \
+- DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
++ DEFINE_PER_CPU(struct rt_mutex, name##_lock); \
++ DEFINE_SPINLOCK(name##_cpu_lock); \
++ cpumask_t name##_cpus __read_mostly; \
+ DEFINE_LGLOCK_LOCKDEP(name); \
+ \
++ static int \
++ name##_lg_cpu_callback(struct notifier_block *nb, \
++ unsigned long action, void *hcpu) \
++ { \
++ switch (action & ~CPU_TASKS_FROZEN) { \
++ case CPU_UP_PREPARE: \
++ spin_lock(&name##_cpu_lock); \
++ cpu_set((unsigned long)hcpu, name##_cpus); \
++ spin_unlock(&name##_cpu_lock); \
++ break; \
++ case CPU_UP_CANCELED: case CPU_DEAD: \
++ spin_lock(&name##_cpu_lock); \
++ cpu_clear((unsigned long)hcpu, name##_cpus); \
++ spin_unlock(&name##_cpu_lock); \
++ } \
++ return NOTIFY_OK; \
++ } \
++ static struct notifier_block name##_lg_cpu_notifier = { \
++ .notifier_call = name##_lg_cpu_callback, \
++ }; \
+ void name##_lock_init(void) { \
+ int i; \
+ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
+@@ -214,6 +236,11 @@
+ lock = &per_cpu(name##_lock, i); \
+ rt_mutex_init(lock); \
+ } \
++ register_hotcpu_notifier(&name##_lg_cpu_notifier); \
++ get_online_cpus(); \
++ for_each_online_cpu(i) \
++ cpu_set(i, name##_cpus); \
++ put_online_cpus(); \
+ } \
+ EXPORT_SYMBOL(name##_lock_init); \
+ \
+@@ -254,7 +281,8 @@
+ void name##_global_lock_online(void) { \
+ int i; \
+ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
+- for_each_online_cpu(i) { \
++ spin_lock(&name##_cpu_lock); \
++ for_each_cpu(i, &name##_cpus) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_lock(lock); \
+@@ -265,11 +293,12 @@
+ void name##_global_unlock_online(void) { \
+ int i; \
+ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
+- for_each_online_cpu(i) { \
++ for_each_cpu(i, &name##_cpus) { \
+ struct rt_mutex *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ __rt_spin_unlock(lock); \
+ } \
++ spin_unlock(&name##_cpu_lock); \
+ } \
+ EXPORT_SYMBOL(name##_global_unlock_online); \
+ \
+--
+1.7.3.4
+
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
+the body of a message to majordomo at vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+
+
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,33 @@
+Subject: ring-buffer/rt: Check for irqs disabled before grabbing reader lock
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 01 Mar 2012 13:55:32 -0500
+
+In RT the reader lock is a mutex and we can not grab it when preemption is
+disabled. The in_atomic() check that is there does not check if irqs are
+disabled. Add that check as well.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190345.786365803@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/trace/ring_buffer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-3.2/kernel/trace/ring_buffer.c
+===================================================================
+--- linux-3.2.orig/kernel/trace/ring_buffer.c
++++ linux-3.2/kernel/trace/ring_buffer.c
+@@ -1054,7 +1054,7 @@ static inline int ok_to_lock(void)
+ if (in_nmi())
+ return 0;
+ #ifdef CONFIG_PREEMPT_RT_FULL
+- if (in_atomic())
++ if (in_atomic() || irqs_disabled())
+ return 0;
+ #endif
+ return 1;
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,58 @@
+Subject: sched/rt: Fix wait_task_interactive() to test rt_spin_lock state
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 01 Mar 2012 13:55:33 -0500
+
+The wait_task_interactive() will have a task sleep waiting for another
+task to have a certain state. But it ignores the rt_spin_locks state
+and can return with an incorrect result if the task it is waiting
+for is blocked on a rt_spin_lock() and is waking up.
+
+The rt_spin_locks save the tasks state in the saved_state field
+and the wait_task_interactive() must also test that state.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190345.979435764@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/sched.c | 6 ++++--
+ 1 files changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched.c b/kernel/sched.c
+index 81b340d..1cc706d 100644
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -2450,7 +2450,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+ * is actually now running somewhere else!
+ */
+ while (task_running(rq, p)) {
+- if (match_state && unlikely(p->state != match_state))
++ if (match_state && unlikely(p->state != match_state)
++ && unlikely(p->saved_state != match_state))
+ return 0;
+ cpu_relax();
+ }
+@@ -2465,7 +2466,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+ running = task_running(rq, p);
+ on_rq = p->on_rq;
+ ncsw = 0;
+- if (!match_state || p->state == match_state)
++ if (!match_state || p->state == match_state
++ || p->saved_state == match_state)
+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+ task_rq_unlock(rq, p, &flags);
+
+--
+1.7.3.4
+
+
+--
+To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
+the body of a message to majordomo at vger.kernel.org
+More majordomo info at http://vger.kernel.org/majordomo-info.html
+
+
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-timer-fix-hotplug-for-rt.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/rfc-timer-fix-hotplug-for-rt.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,72 @@
+Subject: timer: Fix hotplug for -rt
+From: Steven Rostedt <rostedt at goodmis.org>
+Date: Thu, 01 Mar 2012 13:55:28 -0500
+
+Revert the RT patch:
+ Author: Ingo Molnar <mingo at elte.hu>
+ Date: Fri Jul 3 08:30:32 2009 -0500
+ timers: fix timer hotplug on -rt
+
+ Here we are in the CPU_DEAD notifier, and we must not sleep nor
+ enable interrupts.
+
+There's no problem with sleeping in this notifier.
+
+But the get_cpu_var() had to be converted to a get_local_var().
+
+Replace the previous fix with the get_local_var() convert.
+
+Signed-off-by: Steven Rostedt <rostedt at goodmis.org>
+Cc: Carsten Emde <C.Emde at osadl.org>
+Cc: John Kacur <jkacur at redhat.com>
+Cc: Peter Zijlstra <peterz at infradead.org>
+Cc: Clark Williams <clark.williams at gmail.com>
+Cc: stable-rt at vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190344.948157137@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/timer.c | 16 +++++-----------
+ 1 file changed, 5 insertions(+), 11 deletions(-)
+
+Index: linux-3.2/kernel/timer.c
+===================================================================
+--- linux-3.2.orig/kernel/timer.c
++++ linux-3.2/kernel/timer.c
+@@ -1743,21 +1743,17 @@ static void __cpuinit migrate_timers(int
+ {
+ struct tvec_base *old_base;
+ struct tvec_base *new_base;
+- unsigned long flags;
+ int i;
+
+ BUG_ON(cpu_online(cpu));
+ old_base = per_cpu(tvec_bases, cpu);
+- new_base = get_cpu_var(tvec_bases);
++ new_base = get_local_var(tvec_bases);
+ /*
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+ */
+- local_irq_save(flags);
+- while (!spin_trylock(&new_base->lock))
+- cpu_relax();
+- while (!spin_trylock(&old_base->lock))
+- cpu_relax();
++ spin_lock_irq(&new_base->lock);
++ spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+ BUG_ON(old_base->running_timer);
+
+@@ -1771,10 +1767,8 @@ static void __cpuinit migrate_timers(int
+ }
+
+ spin_unlock(&old_base->lock);
+- spin_unlock(&new_base->lock);
+- local_irq_restore(flags);
+-
+- put_cpu_var(tvec_bases);
++ spin_unlock_irq(&new_base->lock);
++ put_local_var(tvec_bases);
+ }
+ #endif /* CONFIG_HOTPLUG_CPU */
+
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/rt-mutex-add-sleeping-spinlocks-support.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -103,7 +103,7 @@
* See Documentation/rt-mutex-design.txt for details.
*/
#include <linux/spinlock.h>
-@@ -95,6 +101,12 @@ static inline void mark_rt_mutex_waiters
+@@ -96,6 +102,12 @@ static inline void mark_rt_mutex_waiters
}
#endif
@@ -116,7 +116,7 @@
/*
* Calculate task priority from the waiter list priority
*
-@@ -141,6 +153,14 @@ static void rt_mutex_adjust_prio(struct
+@@ -142,6 +154,14 @@ static void rt_mutex_adjust_prio(struct
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
@@ -131,7 +131,7 @@
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -252,13 +272,15 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -253,13 +273,15 @@ static int rt_mutex_adjust_prio_chain(st
/* Release the task */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
if (!rt_mutex_owner(lock)) {
@@ -150,7 +150,7 @@
raw_spin_unlock(&lock->wait_lock);
goto out_put_task;
}
-@@ -303,6 +325,25 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -304,6 +326,25 @@ static int rt_mutex_adjust_prio_chain(st
return ret;
}
@@ -176,7 +176,7 @@
/*
* Try to take an rt-mutex
*
-@@ -312,8 +353,9 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -313,8 +354,9 @@ static int rt_mutex_adjust_prio_chain(st
* @task: the task which wants to acquire the lock
* @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
*/
@@ -188,7 +188,7 @@
{
/*
* We have to be careful here if the atomic speedups are
-@@ -346,12 +388,14 @@ static int try_to_take_rt_mutex(struct r
+@@ -347,12 +389,14 @@ static int try_to_take_rt_mutex(struct r
* 3) it is top waiter
*/
if (rt_mutex_has_waiters(lock)) {
@@ -207,7 +207,7 @@
if (waiter || rt_mutex_has_waiters(lock)) {
unsigned long flags;
struct rt_mutex_waiter *top;
-@@ -376,7 +420,6 @@ static int try_to_take_rt_mutex(struct r
+@@ -377,7 +421,6 @@ static int try_to_take_rt_mutex(struct r
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
@@ -215,7 +215,7 @@
debug_rt_mutex_lock(lock);
rt_mutex_set_owner(lock, task);
-@@ -386,6 +429,13 @@ static int try_to_take_rt_mutex(struct r
+@@ -387,6 +430,13 @@ static int try_to_take_rt_mutex(struct r
return 1;
}
@@ -229,7 +229,7 @@
/*
* Task blocks on lock.
*
-@@ -500,7 +550,7 @@ static void wakeup_next_waiter(struct rt
+@@ -501,7 +551,7 @@ static void wakeup_next_waiter(struct rt
raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
@@ -238,7 +238,7 @@
}
/*
-@@ -579,18 +629,315 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -580,18 +630,315 @@ void rt_mutex_adjust_pi(struct task_stru
return;
}
@@ -557,7 +557,7 @@
* @timeout: the pre-initialized and started timer, or NULL for none
* @waiter: the pre-initialized rt_mutex_waiter
*
-@@ -654,9 +1001,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -655,9 +1002,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
struct rt_mutex_waiter waiter;
int ret = 0;
@@ -569,7 +569,7 @@
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
-@@ -709,6 +1057,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
+@@ -710,6 +1058,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
int ret = 0;
raw_spin_lock(&lock->wait_lock);
@@ -577,7 +577,7 @@
if (likely(rt_mutex_owner(lock) != current)) {
-@@ -941,7 +1290,6 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+@@ -942,7 +1291,6 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
@@ -585,7 +585,7 @@
plist_head_init(&lock->wait_list);
debug_rt_mutex_init(lock, name);
-@@ -961,7 +1309,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -962,7 +1310,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
@@ -606,7 +606,7 @@
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
-@@ -125,4 +126,12 @@ extern int rt_mutex_finish_proxy_lock(st
+@@ -126,4 +127,12 @@ extern int rt_mutex_finish_proxy_lock(st
# include "rtmutex.h"
#endif
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/sched-delay-put-task.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/sched-delay-put-task.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/sched-delay-put-task.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -12,7 +12,7 @@
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
-@@ -1587,6 +1587,9 @@ struct task_struct {
+@@ -1591,6 +1591,9 @@ struct task_struct {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
atomic_t ptrace_bp_refcnt;
#endif
@@ -22,7 +22,7 @@
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
-@@ -1771,6 +1774,15 @@ extern struct pid *cad_pid;
+@@ -1775,6 +1778,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -38,7 +38,7 @@
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -1778,6 +1790,7 @@ static inline void put_task_struct(struc
+@@ -1782,6 +1794,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/sched-migrate-disable.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/sched-migrate-disable.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/sched-migrate-disable.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -42,7 +42,7 @@
cpumask_t cpus_allowed;
#ifdef CONFIG_PREEMPT_RCU
-@@ -1594,9 +1595,6 @@ struct task_struct {
+@@ -1598,9 +1599,6 @@ struct task_struct {
#endif
};
@@ -52,7 +52,7 @@
#ifdef CONFIG_PREEMPT_RT_FULL
static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
#else
-@@ -2679,6 +2677,15 @@ static inline void set_task_cpu(struct t
+@@ -2683,6 +2681,15 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/sched-mmdrop-delayed.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/sched-mmdrop-delayed.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/sched-mmdrop-delayed.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -39,7 +39,7 @@
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
-@@ -2264,12 +2264,24 @@ extern struct mm_struct * mm_alloc(void)
+@@ -2268,12 +2268,24 @@ extern struct mm_struct * mm_alloc(void)
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/sched-no-work-when-pi-blocked.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/sched-no-work-when-pi-blocked.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/sched-no-work-when-pi-blocked.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -12,7 +12,7 @@
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
-@@ -2091,12 +2091,20 @@ extern unsigned int sysctl_sched_cfs_ban
+@@ -2095,12 +2095,20 @@ extern unsigned int sysctl_sched_cfs_ban
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/sched-rt-mutex-wakeup.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -28,7 +28,7 @@
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -2174,6 +2176,7 @@ extern void xtime_update(unsigned long t
+@@ -2178,6 +2180,7 @@ extern void xtime_update(unsigned long t
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-prevent-rt-starvation.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,56 @@
+Subject: seqlock: Prevent rt starvation
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Wed, 22 Feb 2012 12:03:30 +0100
+
+If a low prio writer gets preempted while holding the seqlock write
+locked, a high prio reader spins forever on RT.
+
+To prevent this let the reader grab the spinlock, so it blocks and
+eventually boosts the writer. This way the writer can proceed and
+endless spinning is prevented.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+
+---
+ include/linux/seqlock.h | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+Index: linux-3.2/include/linux/seqlock.h
+===================================================================
+--- linux-3.2.orig/include/linux/seqlock.h
++++ linux-3.2/include/linux/seqlock.h
+@@ -177,10 +177,33 @@ typedef struct {
+ /*
+ * Read side functions for starting and finalizing a read side section.
+ */
++#ifndef CONFIG_PREEMPT_RT
+ static inline unsigned read_seqbegin(const seqlock_t *sl)
+ {
+ return read_seqcount_begin(&sl->seqcount);
+ }
++#else
++/*
++ * Starvation safe read side for RT
++ */
++static inline unsigned read_seqbegin(seqlock_t *sl)
++{
++ unsigned ret;
++
++repeat:
++ ret = sl->seqcount.sequence;
++ if (unlikely(ret & 1)) {
++ /*
++ * Take the lock and let the writer proceed (i.e. evtl
++ * boost it), otherwise we could loop here forever.
++ */
++ spin_lock(&sl->lock);
++ spin_unlock(&sl->lock);
++ goto repeat;
++ }
++ return ret;
++}
++#endif
+
+ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ {
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-provide-seq-spin-lock.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-provide-seq-spin-lock.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,94 @@
+Subject: seqlock: Provide seq_spin_* functions
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Mon, 27 Feb 2012 17:55:11 +0100
+
+In some cases it's desirable to lock the seqlock w/o changing the
+seqcount. Provide functions for this, so we can avoid open coded
+constructs.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+Cc: stable-rt at vger.kernel.org
+---
+ include/linux/seqlock.h | 64 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 64 insertions(+)
+
+Index: rt/include/linux/seqlock.h
+===================================================================
+--- rt.orig/include/linux/seqlock.h
++++ rt/include/linux/seqlock.h
+@@ -188,6 +188,19 @@ static inline unsigned read_seqretry(con
+ }
+
+ /*
++ * Ditto w/o barriers
++ */
++static inline unsigned __read_seqbegin(const seqlock_t *sl)
++{
++ return __read_seqcount_begin(&sl->seqcount);
++}
++
++static inline unsigned __read_seqretry(const seqlock_t *sl, unsigned start)
++{
++ return __read_seqcount_retry(&sl->seqcount, start);
++}
++
++/*
+ * Lock out other writers and update the count.
+ * Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
+@@ -247,4 +260,55 @@ write_sequnlock_irqrestore(seqlock_t *sl
+ spin_unlock_irqrestore(&sl->lock, flags);
+ }
+
++/*
++ * Instead of open coding a spinlock and a seqcount, the following
++ * functions allow to serialize on the seqlock w/o touching seqcount.
++ */
++static inline void seq_spin_lock(seqlock_t *sl)
++{
++ spin_lock(&sl->lock);
++}
++
++static inline int seq_spin_trylock(seqlock_t *sl)
++{
++ return spin_trylock(&sl->lock);
++}
++
++static inline void seq_spin_unlock(seqlock_t *sl)
++{
++ spin_unlock(&sl->lock);
++}
++
++static inline void assert_seq_spin_locked(seqlock_t *sl)
++{
++ assert_spin_locked(&sl->lock);
++}
++
++static inline void seq_spin_lock_nested(seqlock_t *sl, int subclass)
++{
++ spin_lock_nested(&sl->lock, subclass);
++}
++
++/*
++ * For writers which need to take/release the lock w/o updating seqcount for
++ * whatever reasons the following functions allow to update the count
++ * after the lock has been acquired or before it is released.
++ */
++static inline void write_seqlock_begin(seqlock_t *sl)
++{
++ assert_spin_locked(&sl->lock);
++ write_seqcount_begin(&sl->seqcount);
++}
++
++static inline void write_seqlock_end(seqlock_t *sl)
++{
++ assert_spin_locked(&sl->lock);
++ write_seqcount_end(&sl->seqcount);
++}
++
++static inline void write_seqlock_barrier(seqlock_t *sl)
++{
++ write_seqcount_barrier(&sl->seqcount);
++}
++
+ #endif /* __LINUX_SEQLOCK_H */
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-remove-unused-functions.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-remove-unused-functions.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-remove-unused-functions.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -1,4 +1,4 @@
-Subject: seqlock-remove-unused-functions.patch
+Subject: seqlock: Remove unused functions
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 16 Jul 2011 18:38:22 +0200
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-use-seqcount.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-use-seqcount.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/seqlock-use-seqcount.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -1,16 +1,18 @@
-Subject: seqlock-use-seqcount.patch
+Subject: seqlock: Use seqcount
From: Thomas Gleixner <tglx at linutronix.de>
Date: Sat, 16 Jul 2011 18:40:26 +0200
+No point in having different implementations for the same thing.
+
Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
---
- include/linux/seqlock.h | 166 ++++++++++++++++++++++++------------------------
- 1 file changed, 83 insertions(+), 83 deletions(-)
+ include/linux/seqlock.h | 176 +++++++++++++++++++++++++-----------------------
+ 1 file changed, 93 insertions(+), 83 deletions(-)
-Index: linux-3.2/include/linux/seqlock.h
+Index: rt/include/linux/seqlock.h
===================================================================
---- linux-3.2.orig/include/linux/seqlock.h
-+++ linux-3.2/include/linux/seqlock.h
+--- rt.orig/include/linux/seqlock.h
++++ rt/include/linux/seqlock.h
@@ -30,81 +30,12 @@
#include <linux/preempt.h>
#include <asm/processor.h>
@@ -101,7 +103,7 @@
return __read_seqcount_retry(s, start);
}
-@@ -220,21 +150,91 @@ static inline void write_seqcount_barrie
+@@ -220,21 +150,101 @@ static inline void write_seqcount_barrie
s->sequence+=2;
}
@@ -130,8 +132,18 @@
+#define DEFINE_SEQLOCK(x) \
+ seqlock_t x = __SEQLOCK_UNLOCKED(x)
+
-+#define read_seqbegin(sl) read_seqcount_begin(&(sl)->seqcount)
-+#define read_seqretry(sl, start) read_seqcount_retry(&(sl)->seqcount, start)
++/*
++ * Read side functions for starting and finalizing a read side section.
++ */
++static inline unsigned read_seqbegin(const seqlock_t *sl)
++{
++ return read_seqcount_begin(&sl->seqcount);
++}
++
++static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
++{
++ return read_seqcount_retry(&sl->seqcount, start);
++}
+
+/*
+ * Lock out other writers and update the count.
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/series
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/series Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/series Sun Mar 4 13:31:34 2012 (r18782)
@@ -16,8 +16,6 @@
# rtc-deal-with-errors-correctly.patch -- 3.0.1
# rtc-fix-hrtimer-deadlock.patch -- 3.0.1
# rtc-limit-frequency.patch -- 3.0.1
-genirq-unmask-oneshot-irqs-when-thread-is-not-woken.patch
-genirq-handle-pending-irqs-in-irq-startup.patch
# Some variant of this is in 3.1
@@ -98,7 +96,21 @@
# Stuff which should go upstream ASAP
############################################################
+# Timekeeping / VDSO
+time-remove-bogus-comments.patch
+x86-vdso-remove-bogus-locking-in-update_vsyscall_tz.patch
+x86-vdso-use-seqcount.patch
+ia64-vdso-use-seqcount.patch
+
+# SEQLOCK
+seqlock-remove-unused-functions.patch
+seqlock-use-seqcount.patch
+seqlock-provide-seq-spin-lock.patch
+fs-struct-use-seqlock.patch
+fs-dentry-use-seqlock.patch
+
# RAW SPINLOCKS
+timekeeping-split-xtime-lock.patch
intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch
# MM memcg
@@ -196,25 +208,15 @@
locking-various-init-fixes.patch
# rtc-tegra-lock-init.patch -- 3.0.1
-# SEQLOCK
-seqlock-remove-unused-functions.patch
-seqlock-use-seqcount.patch
-
# PCI
wait-provide-__wake_up_all_locked.patch
pci-access-use-__wake_up_all_locked.patch
-# ACPI
-acpi-make-gbl-hardware-lock-raw.patch
-acpi-make-ec-lock-raw-as-well.patch
-
#####################################################
# Stuff which should go mainline, but wants some care
#####################################################
# SEQLOCK
-seqlock-raw-seqlock.patch
-timekeeping-covert-xtimelock.patch
# ANON RW SEMAPHORES
@@ -559,17 +561,32 @@
# Device mapper
dm-make-rt-aware.patch
+# ACPI
+# Dropped those two as they cause a scheduling in atomic failure and
+# we have no clue why we made those locks raw in the first place.
+# acpi-make-gbl-hardware-lock-raw.patch
+# acpi-make-ec-lock-raw-as-well.patch
+
+# This one is just a follow up to the raw spin locks
# Simple raw spinlock based waitqueue
-wait-simple-version.patch
-acpi-gpe-use-wait-simple.patch
+# wait-simple-version.patch
+# acpi-gpe-use-wait-simple.patch
# CPUMASK OFFSTACK
cpumask-disable-offstack-on-rt.patch
+# Various fixes - fold them back
+seqlock-prevent-rt-starvation.patch
+fs-protect-opencoded-isize-seqcount.patch
+net-u64-stat-protect-seqcount.patch
+rfc-timer-fix-hotplug-for-rt.patch
+rfc-futex-rt-fix-possible-lockup-when-taking-pi_lock-in-proxy-handler.patch
+rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch
+rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch
+rfc-lglock-rt-use-non-rt-for_each_cpu-in-rt-code.patch
+
# Enable full RT
+cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
kconfig-disable-a-few-options-rt.patch
kconfig-preempt-rt-full.patch
-# Needs some thought and testing
-#softirq-preempt-fix.patch
-
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/signal-fix-up-rcu-wreckage.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -11,7 +11,7 @@
===================================================================
--- linux-3.2.orig/kernel/signal.c
+++ linux-3.2/kernel/signal.c
-@@ -1329,12 +1329,12 @@ struct sighand_struct *__lock_task_sigha
+@@ -1362,12 +1362,12 @@ struct sighand_struct *__lock_task_sigha
struct sighand_struct *sighand;
for (;;) {
@@ -26,7 +26,7 @@
break;
}
-@@ -1345,7 +1345,7 @@ struct sighand_struct *__lock_task_sigha
+@@ -1378,7 +1378,7 @@ struct sighand_struct *__lock_task_sigha
}
spin_unlock(&sighand->siglock);
rcu_read_unlock();
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/softirq-disable-softirq-stacks-for-rt.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -19,7 +19,7 @@
===================================================================
--- linux-3.2.orig/arch/powerpc/kernel/irq.c
+++ linux-3.2/arch/powerpc/kernel/irq.c
-@@ -443,6 +443,7 @@ void irq_ctx_init(void)
+@@ -440,6 +440,7 @@ void irq_ctx_init(void)
}
}
@@ -27,7 +27,7 @@
static inline void do_softirq_onstack(void)
{
struct thread_info *curtp, *irqtp;
-@@ -479,7 +480,7 @@ void do_softirq(void)
+@@ -476,7 +477,7 @@ void do_softirq(void)
local_irq_restore(flags);
}
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/softirq-local-lock.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/softirq-local-lock.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/softirq-local-lock.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -89,7 +89,7 @@
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
-@@ -1597,6 +1597,7 @@ struct task_struct {
+@@ -1601,6 +1601,7 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/stomp-machine-mark-stomper-thread.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/stomp-machine-mark-stomper-thread.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/stomp-machine-mark-stomper-thread.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -12,7 +12,7 @@
===================================================================
--- linux-3.2.orig/include/linux/sched.h
+++ linux-3.2/include/linux/sched.h
-@@ -1818,6 +1818,7 @@ extern void thread_group_times(struct ta
+@@ -1822,6 +1822,7 @@ extern void thread_group_times(struct ta
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
#define PF_KSWAPD 0x00040000 /* I am kswapd */
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/time-remove-bogus-comments.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/time-remove-bogus-comments.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,38 @@
+Subject: time: Remove bogus comments
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 28 Feb 2012 19:06:50 +0100
+
+There is no global irq lock which makes a syscall magically SMP
+safe. Remove the outdated comment concerning do_settimeofday() as
+well.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/time.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+Index: linux-3.2/kernel/time.c
+===================================================================
+--- linux-3.2.orig/kernel/time.c
++++ linux-3.2/kernel/time.c
+@@ -163,7 +163,6 @@ int do_sys_settimeofday(const struct tim
+ return error;
+
+ if (tz) {
+- /* SMP safe, global irq locking makes it work. */
+ sys_tz = *tz;
+ update_vsyscall_tz();
+ if (firsttime) {
+@@ -173,12 +172,7 @@ int do_sys_settimeofday(const struct tim
+ }
+ }
+ if (tv)
+- {
+- /* SMP safe, again the code in arch/foo/time.c should
+- * globally block out interrupts when it runs.
+- */
+ return do_settimeofday(tv);
+- }
+ return 0;
+ }
+
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/timekeeping-split-xtime-lock.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/timekeeping-split-xtime-lock.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,533 @@
+Subject: timekeeping: Split xtime_lock
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Thu, 01 Mar 2012 15:14:06 +0100
+
+xtime_lock is going to be split apart in mainline, so we can shorten
+the seqcount protected regions and avoid updating seqcount in some
+code pathes. This is a straight forward split, so we can avoid the
+whole mess with raw seqlocks for RT.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ kernel/time/jiffies.c | 4 -
+ kernel/time/ntp.c | 24 +++++++----
+ kernel/time/tick-common.c | 10 ++--
+ kernel/time/tick-internal.h | 3 -
+ kernel/time/tick-sched.c | 16 ++++---
+ kernel/time/timekeeping.c | 90 +++++++++++++++++++++++++-------------------
+ 6 files changed, 88 insertions(+), 59 deletions(-)
+
+Index: linux-3.2/kernel/time/jiffies.c
+===================================================================
+--- linux-3.2.orig/kernel/time/jiffies.c
++++ linux-3.2/kernel/time/jiffies.c
+@@ -74,9 +74,9 @@ u64 get_jiffies_64(void)
+ u64 ret;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ ret = jiffies_64;
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+ return ret;
+ }
+ EXPORT_SYMBOL(get_jiffies_64);
+Index: linux-3.2/kernel/time/ntp.c
+===================================================================
+--- linux-3.2.orig/kernel/time/ntp.c
++++ linux-3.2/kernel/time/ntp.c
+@@ -358,7 +358,8 @@ static enum hrtimer_restart ntp_leap_sec
+ {
+ enum hrtimer_restart res = HRTIMER_NORESTART;
+
+- write_seqlock(&xtime_lock);
++ raw_spin_lock(&xtime_lock);
++ write_seqcount_begin(&xtime_seq);
+
+ switch (time_state) {
+ case TIME_OK:
+@@ -388,7 +389,8 @@ static enum hrtimer_restart ntp_leap_sec
+ break;
+ }
+
+- write_sequnlock(&xtime_lock);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock(&xtime_lock);
+
+ return res;
+ }
+@@ -663,7 +665,8 @@ int do_adjtimex(struct timex *txc)
+
+ getnstimeofday(&ts);
+
+- write_seqlock_irq(&xtime_lock);
++ raw_spin_lock_irq(&xtime_lock);
++ write_seqcount_begin(&xtime_seq);
+
+ if (txc->modes & ADJ_ADJTIME) {
+ long save_adjust = time_adjust;
+@@ -705,7 +708,8 @@ int do_adjtimex(struct timex *txc)
+ /* fill PPS status fields */
+ pps_fill_timex(txc);
+
+- write_sequnlock_irq(&xtime_lock);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock_irq(&xtime_lock);
+
+ txc->time.tv_sec = ts.tv_sec;
+ txc->time.tv_usec = ts.tv_nsec;
+@@ -903,7 +907,8 @@ void hardpps(const struct timespec *phas
+
+ pts_norm = pps_normalize_ts(*phase_ts);
+
+- write_seqlock_irqsave(&xtime_lock, flags);
++ raw_spin_lock_irqsave(&xtime_lock, flags);
++ write_seqcount_begin(&xtime_seq);
+
+ /* clear the error bits, they will be set again if needed */
+ time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
+@@ -916,7 +921,8 @@ void hardpps(const struct timespec *phas
+ * just start the frequency interval */
+ if (unlikely(pps_fbase.tv_sec == 0)) {
+ pps_fbase = *raw_ts;
+- write_sequnlock_irqrestore(&xtime_lock, flags);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock_irqrestore(&xtime_lock, flags);
+ return;
+ }
+
+@@ -931,7 +937,8 @@ void hardpps(const struct timespec *phas
+ time_status |= STA_PPSJITTER;
+ /* restart the frequency calibration interval */
+ pps_fbase = *raw_ts;
+- write_sequnlock_irqrestore(&xtime_lock, flags);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock_irqrestore(&xtime_lock, flags);
+ pr_err("hardpps: PPSJITTER: bad pulse\n");
+ return;
+ }
+@@ -948,7 +955,8 @@ void hardpps(const struct timespec *phas
+
+ hardpps_update_phase(pts_norm.nsec);
+
+- write_sequnlock_irqrestore(&xtime_lock, flags);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock_irqrestore(&xtime_lock, flags);
+ }
+ EXPORT_SYMBOL(hardpps);
+
+Index: linux-3.2/kernel/time/tick-common.c
+===================================================================
+--- linux-3.2.orig/kernel/time/tick-common.c
++++ linux-3.2/kernel/time/tick-common.c
+@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void)
+ static void tick_periodic(int cpu)
+ {
+ if (tick_do_timer_cpu == cpu) {
+- write_seqlock(&xtime_lock);
++ raw_spin_lock(&xtime_lock);
++ write_seqcount_begin(&xtime_seq);
+
+ /* Keep track of the next tick event */
+ tick_next_period = ktime_add(tick_next_period, tick_period);
+
+ do_timer(1);
+- write_sequnlock(&xtime_lock);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock(&xtime_lock);
+ }
+
+ update_process_times(user_mode(get_irq_regs()));
+@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_ev
+ ktime_t next;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ next = tick_next_period;
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
+
+Index: linux-3.2/kernel/time/tick-internal.h
+===================================================================
+--- linux-3.2.orig/kernel/time/tick-internal.h
++++ linux-3.2/kernel/time/tick-internal.h
+@@ -141,4 +141,5 @@ static inline int tick_device_is_functio
+ #endif
+
+ extern void do_timer(unsigned long ticks);
+-extern seqlock_t xtime_lock;
++extern raw_spinlock_t xtime_lock;
++extern seqcount_t xtime_seq;
+Index: linux-3.2/kernel/time/tick-sched.c
+===================================================================
+--- linux-3.2.orig/kernel/time/tick-sched.c
++++ linux-3.2/kernel/time/tick-sched.c
+@@ -56,7 +56,8 @@ static void tick_do_update_jiffies64(kti
+ return;
+
+ /* Reevalute with xtime_lock held */
+- write_seqlock(&xtime_lock);
++ raw_spin_lock(&xtime_lock);
++ write_seqcount_begin(&xtime_seq);
+
+ delta = ktime_sub(now, last_jiffies_update);
+ if (delta.tv64 >= tick_period.tv64) {
+@@ -79,7 +80,8 @@ static void tick_do_update_jiffies64(kti
+ /* Keep the tick_next_period variable up to date */
+ tick_next_period = ktime_add(last_jiffies_update, tick_period);
+ }
+- write_sequnlock(&xtime_lock);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock(&xtime_lock);
+ }
+
+ /*
+@@ -89,12 +91,14 @@ static ktime_t tick_init_jiffy_update(vo
+ {
+ ktime_t period;
+
+- write_seqlock(&xtime_lock);
++ raw_spin_lock(&xtime_lock);
++ write_seqcount_begin(&xtime_seq);
+ /* Did we start the jiffies update yet ? */
+ if (last_jiffies_update.tv64 == 0)
+ last_jiffies_update = tick_next_period;
+ period = last_jiffies_update;
+- write_sequnlock(&xtime_lock);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock(&xtime_lock);
+ return period;
+ }
+
+@@ -345,11 +349,11 @@ void tick_nohz_stop_sched_tick(int inidl
+ ts->idle_calls++;
+ /* Read jiffies and the time when jiffies were updated last */
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ last_update = last_jiffies_update;
+ last_jiffies = jiffies;
+ time_delta = timekeeping_max_deferment();
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
+ arch_needs_cpu(cpu)) {
+Index: linux-3.2/kernel/time/timekeeping.c
+===================================================================
+--- linux-3.2.orig/kernel/time/timekeeping.c
++++ linux-3.2/kernel/time/timekeeping.c
+@@ -139,8 +139,8 @@ static inline s64 timekeeping_get_ns_raw
+ * This read-write spinlock protects us from races in SMP while
+ * playing with xtime.
+ */
+-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
+-
++__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(xtime_lock);
++seqcount_t xtime_seq;
+
+ /*
+ * The current time
+@@ -222,7 +222,7 @@ void getnstimeofday(struct timespec *ts)
+ WARN_ON(timekeeping_suspended);
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+
+ *ts = xtime;
+ nsecs = timekeeping_get_ns();
+@@ -230,7 +230,7 @@ void getnstimeofday(struct timespec *ts)
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
+
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ timespec_add_ns(ts, nsecs);
+ }
+@@ -245,14 +245,14 @@ ktime_t ktime_get(void)
+ WARN_ON(timekeeping_suspended);
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
+ nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
+ nsecs += timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
+
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+ /*
+ * Use ktime_set/ktime_add_ns to create a proper ktime on
+ * 32-bit architectures without CONFIG_KTIME_SCALAR.
+@@ -278,14 +278,14 @@ void ktime_get_ts(struct timespec *ts)
+ WARN_ON(timekeeping_suspended);
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ *ts = xtime;
+ tomono = wall_to_monotonic;
+ nsecs = timekeeping_get_ns();
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
+
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
+ ts->tv_nsec + tomono.tv_nsec + nsecs);
+@@ -313,7 +313,7 @@ void getnstime_raw_and_real(struct times
+ do {
+ u32 arch_offset;
+
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+
+ *ts_raw = raw_time;
+ *ts_real = xtime;
+@@ -326,7 +326,7 @@ void getnstime_raw_and_real(struct times
+ nsecs_raw += arch_offset;
+ nsecs_real += arch_offset;
+
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ timespec_add_ns(ts_raw, nsecs_raw);
+ timespec_add_ns(ts_real, nsecs_real);
+@@ -365,7 +365,8 @@ int do_settimeofday(const struct timespe
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+- write_seqlock_irqsave(&xtime_lock, flags);
++ raw_spin_lock_irqsave(&xtime_lock, flags);
++ write_seqcount_begin(&xtime_seq);
+
+ timekeeping_forward_now();
+
+@@ -381,7 +382,8 @@ int do_settimeofday(const struct timespe
+ update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+ timekeeper.mult);
+
+- write_sequnlock_irqrestore(&xtime_lock, flags);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock_irqrestore(&xtime_lock, flags);
+
+ /* signal hrtimers about time change */
+ clock_was_set();
+@@ -405,7 +407,8 @@ int timekeeping_inject_offset(struct tim
+ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+- write_seqlock_irqsave(&xtime_lock, flags);
++ raw_spin_lock_irqsave(&xtime_lock, flags);
++ write_seqcount_begin(&xtime_seq);
+
+ timekeeping_forward_now();
+
+@@ -418,7 +421,8 @@ int timekeeping_inject_offset(struct tim
+ update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+ timekeeper.mult);
+
+- write_sequnlock_irqrestore(&xtime_lock, flags);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock_irqrestore(&xtime_lock, flags);
+
+ /* signal hrtimers about time change */
+ clock_was_set();
+@@ -490,11 +494,11 @@ void getrawmonotonic(struct timespec *ts
+ s64 nsecs;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ nsecs = timekeeping_get_ns_raw();
+ *ts = raw_time;
+
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ timespec_add_ns(ts, nsecs);
+ }
+@@ -510,11 +514,11 @@ int timekeeping_valid_for_hres(void)
+ int ret;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+
+ ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
+
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ return ret;
+ }
+@@ -572,7 +576,8 @@ void __init timekeeping_init(void)
+ read_persistent_clock(&now);
+ read_boot_clock(&boot);
+
+- write_seqlock_irqsave(&xtime_lock, flags);
++ raw_spin_lock_irqsave(&xtime_lock, flags);
++ write_seqcount_begin(&xtime_seq);
+
+ ntp_init();
+
+@@ -593,7 +598,8 @@ void __init timekeeping_init(void)
+ -boot.tv_sec, -boot.tv_nsec);
+ total_sleep_time.tv_sec = 0;
+ total_sleep_time.tv_nsec = 0;
+- write_sequnlock_irqrestore(&xtime_lock, flags);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock_irqrestore(&xtime_lock, flags);
+ }
+
+ /* time in seconds when suspend began */
+@@ -640,7 +646,8 @@ void timekeeping_inject_sleeptime(struct
+ if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
+ return;
+
+- write_seqlock_irqsave(&xtime_lock, flags);
++ raw_spin_lock_irqsave(&xtime_lock, flags);
++ write_seqcount_begin(&xtime_seq);
+ timekeeping_forward_now();
+
+ __timekeeping_inject_sleeptime(delta);
+@@ -650,7 +657,8 @@ void timekeeping_inject_sleeptime(struct
+ update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
+ timekeeper.mult);
+
+- write_sequnlock_irqrestore(&xtime_lock, flags);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock_irqrestore(&xtime_lock, flags);
+
+ /* signal hrtimers about time change */
+ clock_was_set();
+@@ -673,7 +681,8 @@ static void timekeeping_resume(void)
+
+ clocksource_resume();
+
+- write_seqlock_irqsave(&xtime_lock, flags);
++ raw_spin_lock_irqsave(&xtime_lock, flags);
++ write_seqcount_begin(&xtime_seq);
+
+ if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
+ ts = timespec_sub(ts, timekeeping_suspend_time);
+@@ -683,7 +692,8 @@ static void timekeeping_resume(void)
+ timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
+ timekeeper.ntp_error = 0;
+ timekeeping_suspended = 0;
+- write_sequnlock_irqrestore(&xtime_lock, flags);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock_irqrestore(&xtime_lock, flags);
+
+ touch_softlockup_watchdog();
+
+@@ -701,7 +711,8 @@ static int timekeeping_suspend(void)
+
+ read_persistent_clock(&timekeeping_suspend_time);
+
+- write_seqlock_irqsave(&xtime_lock, flags);
++ raw_spin_lock_irqsave(&xtime_lock, flags);
++ write_seqcount_begin(&xtime_seq);
+ timekeeping_forward_now();
+ timekeeping_suspended = 1;
+
+@@ -724,7 +735,8 @@ static int timekeeping_suspend(void)
+ timekeeping_suspend_time =
+ timespec_add(timekeeping_suspend_time, delta_delta);
+ }
+- write_sequnlock_irqrestore(&xtime_lock, flags);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock_irqrestore(&xtime_lock, flags);
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
+ clocksource_suspend();
+@@ -1101,13 +1113,13 @@ void get_monotonic_boottime(struct times
+ WARN_ON(timekeeping_suspended);
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ *ts = xtime;
+ tomono = wall_to_monotonic;
+ sleep = total_sleep_time;
+ nsecs = timekeeping_get_ns();
+
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
+ ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
+@@ -1158,10 +1170,10 @@ struct timespec current_kernel_time(void
+ unsigned long seq;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+
+ now = xtime;
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ return now;
+ }
+@@ -1173,11 +1185,11 @@ struct timespec get_monotonic_coarse(voi
+ unsigned long seq;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+
+ now = xtime;
+ mono = wall_to_monotonic;
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
+ now.tv_nsec + mono.tv_nsec);
+@@ -1209,11 +1221,11 @@ void get_xtime_and_monotonic_and_sleep_o
+ unsigned long seq;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ *xtim = xtime;
+ *wtom = wall_to_monotonic;
+ *sleep = total_sleep_time;
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+ }
+
+ /**
+@@ -1225,9 +1237,9 @@ ktime_t ktime_get_monotonic_offset(void)
+ struct timespec wtom;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ wtom = wall_to_monotonic;
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+ return timespec_to_ktime(wtom);
+ }
+
+@@ -1239,7 +1251,9 @@ ktime_t ktime_get_monotonic_offset(void)
+ */
+ void xtime_update(unsigned long ticks)
+ {
+- write_seqlock(&xtime_lock);
++ raw_spin_lock(&xtime_lock);
++ write_seqcount_begin(&xtime_seq);
+ do_timer(ticks);
+- write_sequnlock(&xtime_lock);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock(&xtime_lock);
+ }
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/timer-handle-idle-trylock-in-get-next-timer-irq.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -36,7 +36,7 @@
===================================================================
--- linux-3.2.orig/kernel/rtmutex.c
+++ linux-3.2/kernel/rtmutex.c
-@@ -860,15 +860,10 @@ EXPORT_SYMBOL(rt_spin_unlock_wait);
+@@ -861,15 +861,10 @@ EXPORT_SYMBOL(rt_spin_unlock_wait);
int __lockfunc rt_spin_trylock(spinlock_t *lock)
{
Modified: dists/sid/linux-2.6/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch
==============================================================================
--- dists/sid/linux-2.6/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/usb-hcd-use-local-irq-nort.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -16,7 +16,7 @@
===================================================================
--- linux-3.2.orig/drivers/usb/core/hcd.c
+++ linux-3.2/drivers/usb/core/hcd.c
-@@ -2146,7 +2146,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
+@@ -2145,7 +2145,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
* when the first handler doesn't use it. So let's just
* assume it's never used.
*/
@@ -25,7 +25,7 @@
if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) {
rc = IRQ_NONE;
-@@ -2159,7 +2159,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
+@@ -2158,7 +2158,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
rc = IRQ_HANDLED;
}
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/x86-vdso-remove-bogus-locking-in-update_vsyscall_tz.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/x86-vdso-remove-bogus-locking-in-update_vsyscall_tz.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,32 @@
+Subject: x86: vdso: Remove bogus locking in update_vsyscall_tz()
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 28 Feb 2012 19:10:46 +0100
+
+Changing the sequence count in update_vsyscall_tz() is completely
+pointless.
+
+The vdso code copies the data unprotected. There is no point to change
+this as sys_tz is nowhere protected at all. See sys_gettimeofday().
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/kernel/vsyscall_64.c | 5 -----
+ 1 file changed, 5 deletions(-)
+
+Index: linux-2.6/arch/x86/kernel/vsyscall_64.c
+===================================================================
+--- linux-2.6.orig/arch/x86/kernel/vsyscall_64.c
++++ linux-2.6/arch/x86/kernel/vsyscall_64.c
+@@ -80,12 +80,7 @@ early_param("vsyscall", vsyscall_setup);
+
+ void update_vsyscall_tz(void)
+ {
+- unsigned long flags;
+-
+- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+- /* sys_tz has changed */
+ vsyscall_gtod_data.sys_tz = sys_tz;
+- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ }
+
+ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
Added: dists/sid/linux-2.6/debian/patches/features/all/rt/x86-vdso-use-seqcount.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/rt/x86-vdso-use-seqcount.patch Sun Mar 4 13:31:34 2012 (r18782)
@@ -0,0 +1,124 @@
+Subject: x86: vdso: Use seqcount instead of seqlock
+From: Thomas Gleixner <tglx at linutronix.de>
+Date: Tue, 28 Feb 2012 18:24:07 +0100
+
+The update of the vdso data happens under xtime_lock, so adding a
+nested lock is pointless. Just use a seqcount to sync the readers.
+
+Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
+---
+ arch/x86/include/asm/vgtod.h | 2 +-
+ arch/x86/kernel/vsyscall_64.c | 11 +++--------
+ arch/x86/vdso/vclock_gettime.c | 16 ++++++++--------
+ 3 files changed, 12 insertions(+), 17 deletions(-)
+
+Index: linux-3.2/arch/x86/include/asm/vgtod.h
+===================================================================
+--- linux-3.2.orig/arch/x86/include/asm/vgtod.h
++++ linux-3.2/arch/x86/include/asm/vgtod.h
+@@ -5,7 +5,7 @@
+ #include <linux/clocksource.h>
+
+ struct vsyscall_gtod_data {
+- seqlock_t lock;
++ seqcount_t seq;
+
+ /* open coded 'struct timespec' */
+ time_t wall_time_sec;
+Index: linux-3.2/arch/x86/kernel/vsyscall_64.c
+===================================================================
+--- linux-3.2.orig/arch/x86/kernel/vsyscall_64.c
++++ linux-3.2/arch/x86/kernel/vsyscall_64.c
+@@ -52,10 +52,7 @@
+ #include "vsyscall_trace.h"
+
+ DEFINE_VVAR(int, vgetcpu_mode);
+-DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
+-{
+- .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
+-};
++DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
+
+ static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
+
+@@ -86,9 +83,7 @@ void update_vsyscall_tz(void)
+ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
+ struct clocksource *clock, u32 mult)
+ {
+- unsigned long flags;
+-
+- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
++ write_seqcount_begin(&vsyscall_gtod_data.seq);
+
+ /* copy vsyscall data */
+ vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
+@@ -101,7 +96,7 @@ void update_vsyscall(struct timespec *wa
+ vsyscall_gtod_data.wall_to_monotonic = *wtm;
+ vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
+
+- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
++ write_seqcount_end(&vsyscall_gtod_data.seq);
+ }
+
+ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+Index: linux-3.2/arch/x86/vdso/vclock_gettime.c
+===================================================================
+--- linux-3.2.orig/arch/x86/vdso/vclock_gettime.c
++++ linux-3.2/arch/x86/vdso/vclock_gettime.c
+@@ -86,11 +86,11 @@ notrace static noinline int do_realtime(
+ {
+ unsigned long seq, ns;
+ do {
+- seq = read_seqbegin(>od->lock);
++ seq = read_seqcount_begin(>od->seq);
+ ts->tv_sec = gtod->wall_time_sec;
+ ts->tv_nsec = gtod->wall_time_nsec;
+ ns = vgetns();
+- } while (unlikely(read_seqretry(>od->lock, seq)));
++ } while (unlikely(read_seqcount_retry(>od->seq, seq)));
+ timespec_add_ns(ts, ns);
+ return 0;
+ }
+@@ -99,12 +99,12 @@ notrace static noinline int do_monotonic
+ {
+ unsigned long seq, ns, secs;
+ do {
+- seq = read_seqbegin(>od->lock);
++ seq = read_seqcount_begin(>od->seq);
+ secs = gtod->wall_time_sec;
+ ns = gtod->wall_time_nsec + vgetns();
+ secs += gtod->wall_to_monotonic.tv_sec;
+ ns += gtod->wall_to_monotonic.tv_nsec;
+- } while (unlikely(read_seqretry(>od->lock, seq)));
++ } while (unlikely(read_seqcount_retry(>od->seq, seq)));
+
+ /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
+ * are all guaranteed to be nonnegative.
+@@ -123,10 +123,10 @@ notrace static noinline int do_realtime_
+ {
+ unsigned long seq;
+ do {
+- seq = read_seqbegin(>od->lock);
++ seq = read_seqcount_begin(>od->seq);
+ ts->tv_sec = gtod->wall_time_coarse.tv_sec;
+ ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
+- } while (unlikely(read_seqretry(>od->lock, seq)));
++ } while (unlikely(read_seqcount_retry(>od->seq, seq)));
+ return 0;
+ }
+
+@@ -134,12 +134,12 @@ notrace static noinline int do_monotonic
+ {
+ unsigned long seq, ns, secs;
+ do {
+- seq = read_seqbegin(>od->lock);
++ seq = read_seqcount_begin(>od->seq);
+ secs = gtod->wall_time_coarse.tv_sec;
+ ns = gtod->wall_time_coarse.tv_nsec;
+ secs += gtod->wall_to_monotonic.tv_sec;
+ ns += gtod->wall_to_monotonic.tv_nsec;
+- } while (unlikely(read_seqretry(>od->lock, seq)));
++ } while (unlikely(read_seqcount_retry(>od->seq, seq)));
+
+ /* wall_time_nsec and wall_to_monotonic.tv_nsec are
+ * guaranteed to be between 0 and NSEC_PER_SEC.
Modified: dists/sid/linux-2.6/debian/patches/series/base-extra
==============================================================================
--- dists/sid/linux-2.6/debian/patches/series/base-extra Sat Mar 3 21:05:17 2012 (r18781)
+++ dists/sid/linux-2.6/debian/patches/series/base-extra Sun Mar 4 13:31:34 2012 (r18782)
@@ -12,6 +12,16 @@
+ features/all/rt/power-allow-irq-threading.patch featureset=rt
+ features/all/rt/sched-keep-period-timer-alive-when-throttled.patch featureset=rt
+ features/all/rt/sched-prevent-boosting-from-throttling.patch featureset=rt
++ features/all/rt/time-remove-bogus-comments.patch featureset=rt
++ features/all/rt/x86-vdso-remove-bogus-locking-in-update_vsyscall_tz.patch featureset=rt
++ features/all/rt/x86-vdso-use-seqcount.patch featureset=rt
++ features/all/rt/ia64-vdso-use-seqcount.patch featureset=rt
++ features/all/rt/seqlock-remove-unused-functions.patch featureset=rt
++ features/all/rt/seqlock-use-seqcount.patch featureset=rt
++ features/all/rt/seqlock-provide-seq-spin-lock.patch featureset=rt
++ features/all/rt/fs-struct-use-seqlock.patch featureset=rt
++ features/all/rt/fs-dentry-use-seqlock.patch featureset=rt
++ features/all/rt/timekeeping-split-xtime-lock.patch featureset=rt
+ features/all/rt/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch featureset=rt
+ features/all/rt/mm-memcg-shorten-preempt-disabled-section-around-event-checks.patch featureset=rt
+ features/all/rt/tracing-account-for-preempt-off-in-preempt_schedule.patch featureset=rt
@@ -57,14 +67,8 @@
+ features/all/rt/fix-rt-int3-x86_32-3.2-rt.patch featureset=rt
+ features/all/rt/rcu-reduce-lock-section.patch featureset=rt
+ features/all/rt/locking-various-init-fixes.patch featureset=rt
-+ features/all/rt/seqlock-remove-unused-functions.patch featureset=rt
-+ features/all/rt/seqlock-use-seqcount.patch featureset=rt
+ features/all/rt/wait-provide-__wake_up_all_locked.patch featureset=rt
+ features/all/rt/pci-access-use-__wake_up_all_locked.patch featureset=rt
-+ features/all/rt/acpi-make-gbl-hardware-lock-raw.patch featureset=rt
-+ features/all/rt/acpi-make-ec-lock-raw-as-well.patch featureset=rt
-+ features/all/rt/seqlock-raw-seqlock.patch featureset=rt
-+ features/all/rt/timekeeping-covert-xtimelock.patch featureset=rt
+ features/all/rt/latency-hist.patch featureset=rt
+ features/all/rt/hwlatdetect.patch featureset=rt
+ features/all/rt/early-printk-consolidate.patch featureset=rt
@@ -246,8 +250,15 @@
+ features/all/rt/scsi-fcoe-rt-aware.patch featureset=rt
+ features/all/rt/x86-crypto-reduce-preempt-disabled-regions.patch featureset=rt
+ features/all/rt/dm-make-rt-aware.patch featureset=rt
-+ features/all/rt/wait-simple-version.patch featureset=rt
-+ features/all/rt/acpi-gpe-use-wait-simple.patch featureset=rt
+ features/all/rt/cpumask-disable-offstack-on-rt.patch featureset=rt
++ features/all/rt/seqlock-prevent-rt-starvation.patch featureset=rt
++ features/all/rt/fs-protect-opencoded-isize-seqcount.patch featureset=rt
++ features/all/rt/net-u64-stat-protect-seqcount.patch featureset=rt
++ features/all/rt/rfc-timer-fix-hotplug-for-rt.patch featureset=rt
++ features/all/rt/rfc-futex-rt-fix-possible-lockup-when-taking-pi_lock-in-proxy-handler.patch featureset=rt
++ features/all/rt/rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch featureset=rt
++ features/all/rt/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch featureset=rt
++ features/all/rt/rfc-lglock-rt-use-non-rt-for_each_cpu-in-rt-code.patch featureset=rt
++ features/all/rt/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch featureset=rt
+ features/all/rt/kconfig-disable-a-few-options-rt.patch featureset=rt
+ features/all/rt/kconfig-preempt-rt-full.patch featureset=rt
More information about the Kernel-svn-changes
mailing list