[kernel] r18474 - in dists/squeeze-security/linux-2.6: . debian debian/config debian/patches/bugfix/all debian/patches/bugfix/all/stable debian/patches/bugfix/x86 debian/patches/debian debian/patches/features/all debian/patches/features/all/e1000e debian/patches/features/all/igb debian/patches/features/all/openvz debian/patches/features/all/r8169 debian/patches/features/all/tg3 debian/patches/features/all/vserver debian/patches/features/all/xen debian/patches/series debian/templates/image.plain.bug

Dann Frazier dannf at alioth.debian.org
Sun Jan 8 10:47:14 UTC 2012


Author: dannf
Date: Sun Jan  8 10:47:12 2012
New Revision: 18474

Log:
merge 2.6.32-36 -> 2.6.32-39

Added:
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/3c503-fix-broken-IRQ-autoprobing.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/3c503-fix-broken-IRQ-autoprobing.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Add-a-no-lvds-quirk-for-the-Asus-EeeBox-PC-.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Add-a-no-lvds-quirk-for-the-Asus-EeeBox-PC-.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Implement-fair-lru-eviction-across-both-rin.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Implement-fair-lru-eviction-across-both-rin.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Maintain-LRU-order-of-inactive-objects-upon.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Maintain-LRU-order-of-inactive-objects-upon.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Move-the-eviction-logic-to-its-own-file.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Move-the-eviction-logic-to-its-own-file.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Periodically-flush-the-active-lists-and-req.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Periodically-flush-the-active-lists-and-req.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-evict-Ensure-we-completely-cleanup-on-failu.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-evict-Ensure-we-completely-cleanup-on-failu.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-prepare-for-fair-lru-eviction.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-prepare-for-fair-lru-eviction.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-implement-helper-functions-for-scanning-lru-list.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-implement-helper-functions-for-scanning-lru-list.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-radeon-kms-fix-bad-shift-atom-iio-parser.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-radeon-kms-fix-bad-shift-atom-iio-parser.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-radeon-kms-fix-for-radeon-on-systems-4GB-without.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-radeon-kms-fix-for-radeon-on-systems-4GB-without.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-ttm-fix-ttm_bo_add_ttm-user-failure-path.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-ttm-fix-ttm_bo_add_ttm-user-failure-path.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm_mm-extract-check_free_mm_node.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm_mm-extract-check_free_mm_node.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/fs-devpts-inode.c-correctly-check-d_alloc_name-retur.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/fs-devpts-inode.c-correctly-check-d_alloc_name-retur.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/igb-Fix-lack-of-flush-after-register-write-and-befor.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/igb-Fix-lack-of-flush-after-register-write-and-befor.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ipv6-add-gso-support-on-forwarding-path.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/ipv6-add-gso-support-on-forwarding-path.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ipv6-make-fragment-identifications-less-predictable.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/ipv6-make-fragment-identifications-less-predictable.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-fix-ipv6-gso-type-checks-in-intel-ethernet-drivers.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/net-fix-ipv6-gso-type-checks-in-intel-ethernet-drivers.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/revert-xen-use-IRQF_FORCE_RESUME.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/revert-xen-use-IRQF_FORCE_RESUME.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/sched-work-around-sched_group-cpu_power-0.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/sched-work-around-sched_group-cpu_power-0.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/splice-direct_splice_actor-should-not-use-pos-in-sd.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/splice-direct_splice_actor-should-not-use-pos-in-sd.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.42.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.42.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.43.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.43.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.44.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.44.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.45.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.45.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.46.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.46.patch
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/x86/revert-x86-hotplug-Use-mwait-to-offline-a-processor-.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/x86/revert-x86-hotplug-Use-mwait-to-offline-a-processor-.patch
   dists/squeeze-security/linux-2.6/debian/patches/debian/bridge-avoid-ABI-change-in-2.6.32.44.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/bridge-avoid-ABI-change-in-2.6.32.44.patch
   dists/squeeze-security/linux-2.6/debian/patches/debian/if_packet-avoid-ABI-change-in-2.6.32.43.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/if_packet-avoid-ABI-change-in-2.6.32.43.patch
   dists/squeeze-security/linux-2.6/debian/patches/debian/mm-avoid-ABI-change-in-2.6.32.43.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/mm-avoid-ABI-change-in-2.6.32.43.patch
   dists/squeeze-security/linux-2.6/debian/patches/debian/revert-net-ipv4-Check-for-mistakenly-passed-in-non-I.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/revert-net-ipv4-Check-for-mistakenly-passed-in-non-I.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/PCI-Add-dummy-implementation-of-pci_dev_run_wake.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/PCI-Add-dummy-implementation-of-pci_dev_run_wake.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/PCI-introduce-pci_pcie_cap.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/PCI-introduce-pci_pcie_cap.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/dma-mapping-dma-mapping.h-add-dma_set_coherent_mask.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/dma-mapping-dma-mapping.h-add-dma_set_coherent_mask.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/dma-mapping.h-add-the-dma_unmap-state-API.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/dma-mapping.h-add-the-dma_unmap-state-API.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/e1000e/
      - copied from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/e1000e/
   dists/squeeze-security/linux-2.6/debian/patches/features/all/err.h-add-helper-function-to-simplify-pointer-error-.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/err.h-add-helper-function-to-simplify-pointer-error-.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/etherdevice-Dummy-implementation-of-dev_hw_addr_rand.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/etherdevice-Dummy-implementation-of-dev_hw_addr_rand.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/igb/
      - copied from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/igb/
   dists/squeeze-security/linux-2.6/debian/patches/features/all/net-Add-netdev_alloc_skb_ip_align-helper.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/net-Add-netdev_alloc_skb_ip_align-helper.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/cpt-Allow-ext4-mount.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/openvz/cpt-Allow-ext4-mount.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/ptrace_dont_allow_process_without_memory_map_v2.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/openvz/ptrace_dont_allow_process_without_memory_map_v2.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS-2.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS-2.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/r8169/
      - copied from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/r8169/
   dists/squeeze-security/linux-2.6/debian/patches/features/all/tg3/
      - copied from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/tg3/
   dists/squeeze-security/linux-2.6/debian/patches/features/all/vserver/vs2.3.0.36.29.7.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/vserver/vs2.3.0.36.29.7.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/vserver/vserver-Wire-up-syscall-on-powerpc.patch
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/vserver/vserver-Wire-up-syscall-on-powerpc.patch
   dists/squeeze-security/linux-2.6/debian/patches/series/36
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/36
   dists/squeeze-security/linux-2.6/debian/patches/series/37
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/37
   dists/squeeze-security/linux-2.6/debian/patches/series/38
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/38
   dists/squeeze-security/linux-2.6/debian/patches/series/39
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/39
   dists/squeeze-security/linux-2.6/debian/patches/series/39-extra
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/39-extra
   dists/squeeze-security/linux-2.6/debian/patches/series/39squeeze1
      - copied unchanged from r18473, dists/squeeze-security/linux-2.6/debian/patches/series/35squeeze3
Replaced:
   dists/squeeze-security/linux-2.6/debian/patches/series/35squeeze2
      - copied unchanged from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/35squeeze2
Deleted:
   dists/squeeze-security/linux-2.6/debian/patches/features/all/vserver/vs2.3.0.36.29.6.patch
   dists/squeeze-security/linux-2.6/debian/patches/series/35-extra
   dists/squeeze-security/linux-2.6/debian/patches/series/35squeeze3
Modified:
   dists/squeeze-security/linux-2.6/   (props changed)
   dists/squeeze-security/linux-2.6/debian/changelog
   dists/squeeze-security/linux-2.6/debian/config/defines
   dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/0002-venfs-Backport-some-patches-from-rhel6-branch.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/openvz.patch
   dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/pvops.patch
   dists/squeeze-security/linux-2.6/debian/templates/image.plain.bug/include-network

Modified: dists/squeeze-security/linux-2.6/debian/changelog
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/changelog	Sun Jan  8 10:41:18 2012	(r18473)
+++ dists/squeeze-security/linux-2.6/debian/changelog	Sun Jan  8 10:47:12 2012	(r18474)
@@ -1,4 +1,4 @@
-linux-2.6 (2.6.32-35squeeze3) UNRELEASED; urgency=low
+linux-2.6 (2.6.32-39squeeze1) UNRELEASED; urgency=low
 
   * Restrict ioctl forwarding on partitions and logical volumes (CVE-2011-4127)
   * xfs: Fix possible memory corruption in xfs_readlink (CVE-2011-4077)
@@ -9,6 +9,147 @@
 
  -- dann frazier <dannf at debian.org>  Fri, 06 Jan 2012 18:46:21 -0700
 
+linux-2.6 (2.6.32-39) stable; urgency=high
+
+  [ Ian Campbell ]
+  * xen: Revert "xen: Use IRQF_FORCE_RESUME". Fixes live migration regression
+    in 2.6.32.42. (Closes: #644604)
+
+  [ Ben Hutchings ]
+  * Really fix bugs in IPv6 forwarding with GRO/GSO (Closes: #630730):
+    - e1000e,igb,igbvf,ixgbe: Fix IPv6 GSO type checks
+    - ipv6: Add GSO support on forwarding path
+  * [powerpc] vserver: Wire up syscall (Closes: #646132)
+
+ -- dann frazier <dannf at debian.org>  Thu, 27 Oct 2011 17:03:42 -0600
+
+linux-2.6 (2.6.32-38) stable; urgency=high
+
+  * Revert "ipv6: make fragment identifications less predictable"
+    (Closes: #643817).  This reopens CVE-2011-2699.
+
+ -- Ben Hutchings <ben at decadent.org.uk>  Sun, 02 Oct 2011 00:17:26 +0100
+
+linux-2.6 (2.6.32-37) stable; urgency=low
+
+  * pm: Fix definition of SET_SYSTEM_SLEEP_PM_OPS used in backported drivers
+    (fixes FTBFS on ia64)
+  * splice: Fix write position in output file (Closes: #641419)
+  * PCI: Add definition of pci_pcie_cap(), used in backported e1000e
+    (fixes FTBFS on armel, mips, mipsel, sparc)
+  * [openvz] cpt: Allow ext4 mounts (Closes: #642380)
+
+ -- Ben Hutchings <ben at decadent.org.uk>  Sat, 24 Sep 2011 04:59:12 +0100
+
+linux-2.6 (2.6.32-36) stable; urgency=low
+
+  [ maximilian attems ]
+  * Add drm change from 2.6.32.41+drm33.18:
+    - drm/radeon/kms: fix bad shift in atom iio table parser
+  * [opvenz] ptrace: Don't allow to trace a process without memory map.
+  * Add drm change from 2.6.32.42+drm33.19, including:
+    - drm/i915: Add a no lvds quirk for the Asus EeeBox PC EB1007
+    - drm/radeon/kms: fix for radeon on systems >4GB without hardware iommu
+  * Add longterm release 2.6.32.43, including:
+    - ksm: fix NULL pointer dereference in scan_get_next_rmap_item()
+      (CVE-2011-2183)
+    - TTY: ldisc, do not close until there are readers
+    - uvcvideo: Remove buffers from the queues when freeing
+    - inet_diag: fix inet_diag_bc_audit() (CVE-2011-2213)
+    - net: filter: Use WARN_RATELIMIT
+    - af_packet: prevent information leak
+    - ipv6/udp: Use the correct variable to determine non-blocking condition
+    - mm: prevent concurrent unmap_mapping_range() on the same inode
+    For the complete list of changes, see:
+     http://www.kernel.org/pub/linux/kernel/v2.6/longterm/v2.6.32/ChangeLog-2.6.32.43
+    and the bug report which this closes: #637848.
+
+  [ Ben Hutchings ]
+  * Add longterm release 2.6.32.42, including:
+    - ftrace: Only update the function code on write to filter files
+    - kmemleak: Do not return a pointer to an object that kmemleak did not get
+    - ext3: Fix fs corruption when make_indexed_dir() fails
+    - jbd: fix fsync() tid wraparound bug
+    - PCI: allow matching of prefetchable resources to non-prefetchable windows
+      (Closes: #637659)
+    - loop: handle on-demand devices correctly
+    - xhci: Fix full speed bInterval encoding; fix interval calculation for
+      FS isoc endpoints (regressions in 2.6.32-34)
+    - OHCI: fix regression caused by nVidia shutdown workaround
+      (regression in 2.6.32-31)
+    - brd: handle on-demand devices correctly
+    - xen mmu: fix a race window causing leave_mm BUG()
+    - SCSI: Fix oops caused by queue refcounting failure
+    - fat: Fix corrupt inode flags when remove ATTR_SYS flag
+    - pata_cm64x: fix boot crash on parisc (Closes: #622745, #622997)
+    - Revert "iwlagn: Support new 5000 microcode." (Closes: #632778)
+    For the complete list of changes, see:
+     http://www.kernel.org/pub/linux/kernel/v2.6/longterm/v2.6.32/ChangeLog-2.6.32.42
+    and the bug report which this closes: #631465.
+  * [vserver] Update patch to 2.6.32.41-vs2.3.0.36.29.7
+    - Apply sched changes deferred from 2.6.32.29
+  * e1000e: Backport changes up to Linux 2.6.38 (Closes: #627700)
+    - Add support for i82567V-4 and i82579
+    - Fix support for i82577, i82578 and i82583
+  * e1000e: Fix selection of alternate MAC address on device id 0x1060
+    (regression in 2.6.34)
+  * igb,igbvf: Backport changes up to Linux 3.0.4 (Closes: #627702)
+    - Add support for i82576-ET2, i82580, DH89xxCC, i340 and i350
+  * r8169: Backport changes up to Linux 3.0.2 (Closes: #627704)
+    - Fix support for RTL8102E and RTL8168DP
+    - Add support for RTL8105E, RTL8168E and another variant of RTL8168DP
+    - Add support for D-Link DGE-530T rev C1
+  * tg3,broadcom: Backport changes up to Linux 2.6.38 (Closes: #627705)
+    - Add support for BCM5717, BCM5719, BCM57765
+    - Add support for BCM50610M and BCM5241 PHYs
+    - Fix support for BCM5755
+  * Remove net device features from bug reports (Closes: #638956)
+  * Revert "net/ipv4: Check for mistakenly passed in non-IPv4 address"
+    included in 2.6.32.43, which might break some applications
+  * Add longterm release 2.6.32.44, including:
+    - NFSv4.1: update nfs4_fattr_bitmap_maxsz
+    - hwmon: (max1111) Fix race condition causing NULL pointer exception
+    - bridge: send proper message_age in config BPDU
+    - USB: OHCI: fix another regression for NVIDIA controllers
+      (Closes: #620848)
+    - ext3: Fix oops in ext3_try_to_allocate_with_rsv()
+    - svcrpc: fix list-corrupting race on nfsd shutdown
+    - alpha: fix several security issues (CVE-2011-2208, CVE-2011-2209,
+      CVE-2011-2210, CVE-2011-2211)
+    - ALSA: sound/core/pcm_compat.c: adjust array index
+    - atm: [br2684] allow routed mode operation again
+    For the complete list of changes, see:
+     http://www.kernel.org/pub/linux/kernel/v2.6/longterm/v2.6.32/ChangeLog-2.6.32.44
+    and the bug report which this closes: #639425.
+  * Add longterm release 2.6.32.45, including:
+    - ALSA: timer - Fix Oops at closing slave timer
+    For the complete list of changes, see:
+     http://www.kernel.org/pub/linux/kernel/v2.6/longterm/v2.6.32/ChangeLog-2.6.32.45
+    and the bug report which this closes: #639426.
+  * sched: Work around sched_group::cpu_power == 0 (Ameliorates: #636797)
+  * [x86] Revert "x86, hotplug: Use mwait to offline a processor, fix the
+    legacy case" (Closes: #622259)
+  * Fix bugs in IPv6 forwarding with GRO/GSO (Closes: #630730):
+    - e1000e,igb,igbvf,ixgbe: Fix IPv6 GSO type checks
+    - ipv6: Add GSO support on forwarding path
+  * devpts: correctly check d_alloc_name() return code (Closes: #640650)
+  * ipv6: make fragment identifications less predictable (CVE-2011-2699)
+  * Add longterm release 2.6.32.46, including:
+    - atm: br2864: sent packets truncated in VC routed mode (Closes: #638656)
+    - hwmon: (ibmaem) add missing kfree
+    - ALSA: snd-usb-caiaq: Correct offset fields of outbound iso_frame_desc
+    - ALSA: snd_usb_caiaq: track submitted output urbs
+    - futex: Fix regression with read only mappings
+    - x86-32, vdso: On system call restart after SYSENTER, use int $0x80
+    - fuse: check size of FUSE_NOTIFY_INVAL_ENTRY message
+    For the complete list of changes, see:
+     http://www.kernel.org/pub/linux/kernel/v2.6/longterm/v2.6.32/ChangeLog-2.6.32.46
+    and the bug report which this closes: #641232.
+  * drm/ttm: fix ttm_bo_add_ttm(user) failure path
+  * 3c503: fix broken IRQ autoprobing (Closes: #566522)
+
+ -- Ben Hutchings <ben at decadent.org.uk>  Mon, 12 Sep 2011 00:53:46 +0100
+
 linux-2.6 (2.6.32-35squeeze2) stable-security; urgency=high
 
   * Fix regression in /proc/<pid>/maps fixes for CVE-2011-1020

Modified: dists/squeeze-security/linux-2.6/debian/config/defines
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/config/defines	Sun Jan  8 10:41:18 2012	(r18473)
+++ dists/squeeze-security/linux-2.6/debian/config/defines	Sun Jan  8 10:47:12 2012	(r18474)
@@ -7,6 +7,7 @@
  __scm_*
  scm_*
  hisax_init_pcmcia
+ clocksource_*
 
 [base]
 arches:
@@ -73,9 +74,15 @@
 drivers/scsi/3w-sas.h: 2.6.33
 drivers/staging/ramzswap/: 2.6.33
 drivers/media/dvb/mantis/: 2.6.34
+drivers/net/e1000e/: 2.6.38
+drivers/net/igb/: 3.0
+drivers/net/igbvf/: 3.0
 drivers/net/macvtap.c: 2.6.34
+drivers/net/r8169.c: 3.0
 drivers/net/sky2.c: 2.6.35
 drivers/net/sky2.h: 2.6.35
+drivers/net/tg3.c: 2.6.38
+drivers/net/tg3.h: 2.6.38
 drivers/net/usb/ipheth.c: 2.6.34
 drivers/staging/brcm80211/: 2.6.37
 drivers/staging/rt2860/: 2.6.34

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/3c503-fix-broken-IRQ-autoprobing.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/3c503-fix-broken-IRQ-autoprobing.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/3c503-fix-broken-IRQ-autoprobing.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/3c503-fix-broken-IRQ-autoprobing.patch)
@@ -0,0 +1,43 @@
+From: Ondrej Zary <linux at rainbow-software.org>
+Date: Sun, 12 Jun 2011 09:40:49 +0000
+Subject: [PATCH] 3c503: fix broken IRQ autoprobing
+
+commit 1ffde03d2aa112750468cff07efc9e0a504517dd upstream.
+
+Fix broken IRQ autoprobing in 3c503 driver:
+ - improper IRQ freeing (does not free IRQs causes WARN)
+ - missing break when an working IRQ is found
+
+The driver works with this patch.
+
+Signed-off-by: Ondrej Zary <linux at rainbow-software.org>
+Reviewed-by: Ben Hutchings <ben at decadent.org.uk>
+Signed-off-by: David S. Miller <davem at conan.davemloft.net>
+---
+ drivers/net/3c503.c |    3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
+index d84f6e8..5b73298 100644
+--- a/drivers/net/3c503.c
++++ b/drivers/net/3c503.c
+@@ -412,7 +412,7 @@ el2_open(struct net_device *dev)
+ 		outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
+ 		outb_p(0x00, E33G_IDCFR);
+ 		msleep(1);
+-		free_irq(*irqp, el2_probe_interrupt);
++		free_irq(*irqp, &seen);
+ 		if (!seen)
+ 			continue;
+ 
+@@ -422,6 +422,7 @@ el2_open(struct net_device *dev)
+ 			continue;
+ 		if (retval < 0)
+ 			goto err_disable;
++		break;
+ 	} while (*++irqp);
+ 
+ 	if (*irqp == 0) {
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Add-a-no-lvds-quirk-for-the-Asus-EeeBox-PC-.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Add-a-no-lvds-quirk-for-the-Asus-EeeBox-PC-.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Add-a-no-lvds-quirk-for-the-Asus-EeeBox-PC-.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Add-a-no-lvds-quirk-for-the-Asus-EeeBox-PC-.patch)
@@ -0,0 +1,43 @@
+From: Hans de Goede <hdegoede at redhat.com>
+Date: Sat, 4 Jun 2011 15:39:21 +0200
+Subject: [PATCH 09/10] drm/i915: Add a no lvds quirk for the Asus EeeBox PC EB1007
+
+commit b0088882c63a9bdec8f3671438928ac7ab4bbcd8 upstream.
+
+commit 6a574b5b9b186e28abd3e571dfd1700c5220b510 upstream.
+
+I found this while figuring out why gnome-shell would not run on my
+Asus EeeBox PC EB1007. As a standalone "pc" this device cleary does not have
+an internal panel, yet it claims it does. Add a quirk to fix this.
+
+Signed-off-by: Hans de Goede <hdegoede at redhat.com>
+Reviewed-by: Keith Packard <keithp at keithp.com>
+Signed-off-by: Keith Packard <keithp at keithp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/i915/intel_lvds.c |    8 ++++++++
+ 1 files changed, 8 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index d34c09f..7cfc814 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -865,6 +865,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
+ 		},
+ 	},
++	{
++		.callback = intel_no_lvds_dmi_callback,
++		.ident = "Asus EeeBox PC EB1007",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
++		},
++	},
+ 
+ 	{ }	/* terminating entry */
+ };
+-- 
+1.7.2.5
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Implement-fair-lru-eviction-across-both-rin.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Implement-fair-lru-eviction-across-both-rin.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Implement-fair-lru-eviction-across-both-rin.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Implement-fair-lru-eviction-across-both-rin.patch)
@@ -0,0 +1,337 @@
+From: Chris Wilson <chris at chris-wilson.co.uk>
+Date: Fri, 17 Jun 2011 10:04:21 -0500
+Subject: [PATCH 05/10] drm/i915: Implement fair lru eviction across both rings. (v2)
+
+commit f42384c96e7e53c42615b16396c47edf40667b72 upstream.
+
+BugLink: http://bugs.launchpad.net/bugs/599017
+
+Based in a large part upon Daniel Vetter's implementation and adapted
+for handling multiple rings in a single pass.
+
+This should lead to better gtt usage and fixes the page-fault-of-doom
+triggered. The fairness is provided by scanning through the GTT space
+amalgamating space in rendering order. As soon as we have a contiguous
+space in the GTT large enough for the new object (and its alignment),
+evict any object which lies within that space. This should keep more
+objects resident in the GTT.
+
+Doing throughput testing on a PineView machine with cairo-perf-trace
+indicates that there is very little difference with the new LRU scan,
+perhaps a small improvement... Except oddly for the poppler trace.
+
+Reference:
+
+  Bug 15911 - Intermittent X crash (freeze)
+  https://bugzilla.kernel.org/show_bug.cgi?id=15911
+
+  Bug 20152 - cannot view JPG in firefox when running UXA
+  https://bugs.freedesktop.org/show_bug.cgi?id=20152
+
+  Bug 24369 - Hang when scrolling firefox page with window in front
+  https://bugs.freedesktop.org/show_bug.cgi?id=24369
+
+  Bug 28478 - Intermittent graphics lockups due to overflow/loop
+  https://bugs.freedesktop.org/show_bug.cgi?id=28478
+
+v2: Attempt to clarify the logic and order of eviction through the use
+of comments and macros.
+
+Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
+Reviewed-by: Daniel Vetter <daniel at ffwll.ch>
+Signed-off-by: Eric Anholt <eric at anholt.net>
+(backported from commit cd377ea93f34cbd6ec49c868b66a5a7ab184775c upstream)
+
+Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/i915/i915_drv.h       |    2 +
+ drivers/gpu/drm/i915/i915_gem_evict.c |  240 +++++++++++++++++----------------
+ 2 files changed, 127 insertions(+), 115 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index f7e12ba..e0acd00 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -610,6 +610,8 @@ struct drm_i915_gem_object {
+ 	struct list_head list;
+ 	/** This object's place on GPU write list */
+ 	struct list_head gpu_write_list;
++	/** This object's place on eviction list */
++	struct list_head evict_list;
+ 
+ 	/** This object's place on the fenced object LRU */
+ 	struct list_head fence_list;
+diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
+index 127a28a..84ed1a7 100644
+--- a/drivers/gpu/drm/i915/i915_gem_evict.c
++++ b/drivers/gpu/drm/i915/i915_gem_evict.c
+@@ -31,140 +31,150 @@
+ #include "i915_drv.h"
+ #include "i915_drm.h"
+ 
+-static inline int
+-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+-{
+-	return obj_priv->madv == I915_MADV_DONTNEED;
+-}
+-
+-static int
+-i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
+-				      unsigned alignment, int *found)
++static struct drm_i915_gem_object *
++i915_gem_next_active_object(struct drm_device *dev,
++			    struct list_head **iter)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_gem_object *obj;
+-	struct drm_i915_gem_object *obj_priv;
+-	struct drm_gem_object *best = NULL;
+-	struct drm_gem_object *first = NULL;
+-
+-	/* Try to find the smallest clean object */
+-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+-		struct drm_gem_object *obj = obj_priv->obj;
+-		if (obj->size >= min_size) {
+-			if ((!obj_priv->dirty ||
+-			     i915_gem_object_is_purgeable(obj_priv)) &&
+-			    (!best || obj->size < best->size)) {
+-				best = obj;
+-				if (best->size == min_size)
+-					break;
+-			}
+-			if (!first)
+-			    first = obj;
+-		}
+-	}
+-
+-	obj = best ? best : first;
+-
+-	if (!obj) {
+-		*found = 0;
+-		return 0;
+-	}
++	struct drm_i915_gem_object *obj = NULL;
+ 
+-	*found = 1;
++	if (*iter != &dev_priv->mm.active_list)
++		obj = list_entry(*iter,
++				 struct drm_i915_gem_object,
++				 list);
+ 
+-#if WATCH_LRU
+-	DRM_INFO("%s: evicting %p\n", __func__, obj);
+-#endif
+-	obj_priv = obj->driver_private;
+-	BUG_ON(obj_priv->pin_count != 0);
+-	BUG_ON(obj_priv->active);
++	*iter = (*iter)->next;
++	return obj;
++}
+ 
+-	/* Wait on the rendering and unbind the buffer. */
+-	return i915_gem_object_unbind(obj);
++static bool
++mark_free(struct drm_i915_gem_object *obj_priv,
++	   struct list_head *unwind)
++{
++	list_add(&obj_priv->evict_list, unwind);
++	return drm_mm_scan_add_block(obj_priv->gtt_space);
+ }
+ 
++#define i915_for_each_active_object(OBJ, I) \
++	*(I) = dev_priv->mm.active_list.next; \
++	while (((OBJ) = i915_gem_next_active_object(dev, (I))) != NULL)
++
+ int
+-i915_gem_evict_something(struct drm_device *dev,
+-			 int min_size, unsigned alignment)
++i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	int ret, found;
+-
+-	for (;;) {
+-		i915_gem_retire_requests(dev);
+-
+-		/* If there's an inactive buffer available now, grab it
+-		 * and be done.
+-		 */
+-		ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
+-							    alignment,
+-							    &found);
+-		if (found)
+-			return ret;
++	struct list_head eviction_list, unwind_list;
++	struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
++	struct list_head *iter;
++	int ret = 0;
+ 
+-		/* If we didn't get anything, but the ring is still processing
+-		 * things, wait for the next to finish and hopefully leave us
+-		 * a buffer to evict.
+-		 */
+-		if (!list_empty(&dev_priv->mm.request_list)) {
+-			struct drm_i915_gem_request *request;
++	i915_gem_retire_requests(dev);
+ 
+-			request = list_first_entry(&dev_priv->mm.request_list,
+-						   struct drm_i915_gem_request,
+-						   list);
++	/* Re-check for free space after retiring requests */
++	if (drm_mm_search_free(&dev_priv->mm.gtt_space,
++			       min_size, alignment, 0))
++		return 0;
+ 
+-			ret = i915_do_wait_request(dev, request->seqno, true);
+-			if (ret)
+-				return ret;
++	/*
++	 * The goal is to evict objects and amalgamate space in LRU order.
++	 * The oldest idle objects reside on the inactive list, which is in
++	 * retirement order. The next objects to retire are those on the
++	 * active list that do not have an outstanding flush. Once the
++	 * hardware reports completion (the seqno is updated after the
++	 * batchbuffer has been finished) the clean buffer objects would
++	 * be retired to the inactive list. Any dirty objects would be added
++	 * to the tail of the flushing list. So after processing the clean
++	 * active objects we need to emit a MI_FLUSH to retire the flushing
++	 * list, hence the retirement order of the flushing list is in
++	 * advance of the dirty objects on the active list.
++	 *
++	 * The retirement sequence is thus:
++	 *   1. Inactive objects (already retired)
++	 *   2. Clean active objects
++	 *   3. Flushing list
++	 *   4. Dirty active objects.
++	 *
++	 * On each list, the oldest objects lie at the HEAD with the freshest
++	 * object on the TAIL.
++	 */
++
++	INIT_LIST_HEAD(&unwind_list);
++	drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
++
++	/* First see if there is a large enough contiguous idle region... */
++	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++		if (mark_free(obj_priv, &unwind_list))
++			goto found;
++	}
+ 
++	/* Now merge in the soon-to-be-expired objects... */
++	i915_for_each_active_object(obj_priv, &iter) {
++		/* Does the object require an outstanding flush? */
++		if (obj_priv->obj->write_domain || obj_priv->pin_count)
+ 			continue;
+-		}
+ 
+-		/* If we didn't have anything on the request list but there
+-		 * are buffers awaiting a flush, emit one and try again.
+-		 * When we wait on it, those buffers waiting for that flush
+-		 * will get moved to inactive.
+-		 */
+-		if (!list_empty(&dev_priv->mm.flushing_list)) {
+-			struct drm_gem_object *obj = NULL;
+-			struct drm_i915_gem_object *obj_priv;
+-
+-			/* Find an object that we can immediately reuse */
+-			list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+-				obj = obj_priv->obj;
+-				if (obj->size >= min_size)
+-					break;
+-
+-				obj = NULL;
+-			}
+-
+-			if (obj != NULL) {
+-				uint32_t seqno;
+-
+-				i915_gem_flush(dev,
+-					       obj->write_domain,
+-					       obj->write_domain);
+-				seqno = i915_add_request(dev, NULL, obj->write_domain);
+-				if (seqno == 0)
+-					return -ENOMEM;
+-
+-				ret = i915_do_wait_request(dev, seqno, true);
+-				if (ret)
+-					return ret;
+-
+-				continue;
+-			}
++		if (mark_free(obj_priv, &unwind_list))
++			goto found;
++	}
++
++	/* Finally add anything with a pending flush (in order of retirement) */
++	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
++		if (obj_priv->pin_count)
++			continue;
++
++		if (mark_free(obj_priv, &unwind_list))
++			goto found;
++	}
++	i915_for_each_active_object(obj_priv, &iter) {
++		if (! obj_priv->obj->write_domain || obj_priv->pin_count)
++			continue;
++
++		if (mark_free(obj_priv, &unwind_list))
++			goto found;
++	}
++
++	/* Nothing found, clean up and bail out! */
++	list_for_each_entry(obj_priv, &unwind_list, evict_list) {
++		ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
++		BUG_ON(ret);
++	}
++
++	/* We expect the caller to unpin, evict all and try again, or give up.
++	 * So calling i915_gem_evict_everything() is unnecessary.
++	 */
++	return -ENOSPC;
++
++found:
++	INIT_LIST_HEAD(&eviction_list);
++	list_for_each_entry_safe(obj_priv, tmp_obj_priv,
++				 &unwind_list, evict_list) {
++		if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
++			/* drm_mm doesn't allow any other other operations while
++			 * scanning, therefore store to be evicted objects on a
++			 * temporary list. */
++			list_move(&obj_priv->evict_list, &eviction_list);
+ 		}
++	}
+ 
+-		/* If we didn't do any of the above, there's no single buffer
+-		 * large enough to swap out for the new one, so just evict
+-		 * everything and start again. (This should be rare.)
+-		 */
+-		if (!list_empty (&dev_priv->mm.inactive_list))
+-			return i915_gem_evict_inactive(dev);
+-		else
+-			return i915_gem_evict_everything(dev);
++	/* Unbinding will emit any required flushes */
++	list_for_each_entry_safe(obj_priv, tmp_obj_priv,
++				 &eviction_list, evict_list) {
++#if WATCH_LRU
++		DRM_INFO("%s: evicting %p\n", __func__, obj);
++#endif
++		ret = i915_gem_object_unbind(obj_priv->obj);
++		if (ret)
++			return ret;
+ 	}
++
++	/* The just created free hole should be on the top of the free stack
++	 * maintained by drm_mm, so this BUG_ON actually executes in O(1).
++	 * Furthermore all accessed data has just recently been used, so it
++	 * should be really fast, too. */
++	BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
++				   alignment, 0));
++
++	return 0;
+ }
+ 
+ int
+-- 
+1.7.2.5
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Maintain-LRU-order-of-inactive-objects-upon.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Maintain-LRU-order-of-inactive-objects-upon.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Maintain-LRU-order-of-inactive-objects-upon.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Maintain-LRU-order-of-inactive-objects-upon.patch)
@@ -0,0 +1,73 @@
+From: Chris Wilson <chris at chris-wilson.co.uk>
+Date: Fri, 17 Jun 2011 10:04:22 -0500
+Subject: [PATCH 06/10] drm/i915: Maintain LRU order of inactive objects upon access by CPU (v2)
+
+commit f8fd3ab5b8bf8f99ea13ebbecabd5c8e42c82948 upstream.
+
+BugLink: http://bugs.launchpad.net/bugs/599017
+
+In order to reduce the penalty of fallbacks under memory pressure and to
+avoid a potential immediate ping-pong of evicting a mmaped buffer, we
+move the object to the tail of the inactive list when a page is freshly
+faulted or the object is moved into the CPU domain.
+
+We choose not to protect the CPU objects from casual eviction,
+preferring to keep the GPU active for as long as possible.
+
+v2: Daniel Vetter found a bug where I forgot that pinned objects are
+kept off the inactive list.
+
+Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
+Signed-off-by: Eric Anholt <eric at anholt.net>
+(backported from commit 7d1c4804ae98cdee572d7d10d8a5deaa2e686285 upstream)
+
+Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/i915/i915_gem.c |   16 ++++++++++++++++
+ 1 files changed, 16 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 2e4ff69..b3c7bd1 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -55,6 +55,14 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
+ static LIST_HEAD(shrink_list);
+ static DEFINE_SPINLOCK(shrink_list_lock);
+ 
++static inline bool
++i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
++{
++	return obj_priv->gtt_space &&
++		!obj_priv->active &&
++		obj_priv->pin_count == 0;
++}
++
+ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ 		     unsigned long end)
+ {
+@@ -1068,6 +1076,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ 	}
+ 
++	
++	/* Maintain LRU order of "inactive" objects */
++	if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
++		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++
+ 	drm_gem_object_unreference(obj);
+ 	mutex_unlock(&dev->struct_mutex);
+ 	return ret;
+@@ -1203,6 +1216,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ 			goto unlock;
+ 	}
+ 
++	if (i915_gem_object_is_inactive(obj_priv))
++		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
++
+ 	pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+ 		page_offset;
+ 
+-- 
+1.7.2.5
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Move-the-eviction-logic-to-its-own-file.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Move-the-eviction-logic-to-its-own-file.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Move-the-eviction-logic-to-its-own-file.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Move-the-eviction-logic-to-its-own-file.patch)
@@ -0,0 +1,576 @@
+From: Chris Wilson <chris at chris-wilson.co.uk>
+Date: Fri, 17 Jun 2011 10:04:21 -0500
+Subject: [PATCH 04/10] drm/i915: Move the eviction logic to its own file.
+
+commit cf9ec16fcec6fcb0a0ae6d5bcd3f34ff348c683e upstream.
+
+BugLink: http://bugs.launchpad.net/bugs/599017
+
+The eviction code is the gnarly underbelly of memory management, and is
+clearer if kept separated from the normal domain management in GEM.
+
+Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
+Signed-off-by: Eric Anholt <eric at anholt.net>
+(backported from commit b47eb4a2b302f33adaed2a27d2b3bfc74fe35ac5 upstream)
+
+Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/i915/Makefile         |    1 +
+ drivers/gpu/drm/i915/i915_drv.h       |   11 ++
+ drivers/gpu/drm/i915/i915_gem.c       |  206 +----------------------------
+ drivers/gpu/drm/i915/i915_gem_evict.c |  235 +++++++++++++++++++++++++++++++++
+ 4 files changed, 249 insertions(+), 204 deletions(-)
+ create mode 100644 drivers/gpu/drm/i915/i915_gem_evict.c
+
+diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
+index 9929f84..8a83bb7 100644
+--- a/drivers/gpu/drm/i915/Makefile
++++ b/drivers/gpu/drm/i915/Makefile
+@@ -8,6 +8,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
+           i915_suspend.o \
+ 	  i915_gem.o \
+ 	  i915_gem_debug.o \
++	  i915_gem_evict.o \
+ 	  i915_gem_tiling.o \
+ 	  i915_trace_points.o \
+ 	  intel_display.o \
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index ecc4fbe..f7e12ba 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -55,6 +55,8 @@ enum plane {
+ 
+ #define I915_NUM_PIPE	2
+ 
++#define I915_GEM_GPU_DOMAINS	(~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
++
+ /* Interface history:
+  *
+  * 1.1: Original.
+@@ -858,6 +860,9 @@ int i915_gem_init_object(struct drm_gem_object *obj);
+ void i915_gem_free_object(struct drm_gem_object *obj);
+ int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
+ void i915_gem_object_unpin(struct drm_gem_object *obj);
++void i915_gem_flush(struct drm_device *dev,
++		    uint32_t invalidate_domains,
++		    uint32_t flush_domains);
+ int i915_gem_object_unbind(struct drm_gem_object *obj);
+ void i915_gem_release_mmap(struct drm_gem_object *obj);
+ void i915_gem_lastclose(struct drm_device *dev);
+@@ -875,6 +880,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
+ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ 		     unsigned long end);
++int i915_gpu_idle(struct drm_device *dev);
+ int i915_gem_idle(struct drm_device *dev);
+ uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ 			  uint32_t flush_domains);
+@@ -896,6 +902,11 @@ void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
+ void i915_gem_shrinker_init(void);
+ void i915_gem_shrinker_exit(void);
+ 
++/* i915_gem_evict.c */
++int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
++int i915_gem_evict_everything(struct drm_device *dev);
++int i915_gem_evict_inactive(struct drm_device *dev);
++
+ /* i915_gem_tiling.c */
+ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index e0afa05..2e4ff69 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -34,8 +34,6 @@
+ #include <linux/swap.h>
+ #include <linux/pci.h>
+ 
+-#define I915_GEM_GPU_DOMAINS	(~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+-
+ static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
+ static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+ static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
+@@ -50,9 +48,6 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+ static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+ 					   unsigned alignment);
+ static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
+-static int i915_gem_evict_something(struct drm_device *dev, int min_size,
+-				    unsigned alignment);
+-static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
+ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ 				struct drm_i915_gem_pwrite *args,
+ 				struct drm_file *file_priv);
+@@ -1927,7 +1922,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
+ 	return i915_do_wait_request(dev, seqno, 1);
+ }
+ 
+-static void
++void
+ i915_gem_flush(struct drm_device *dev,
+ 	       uint32_t invalidate_domains,
+ 	       uint32_t flush_domains)
+@@ -2105,179 +2100,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
+ 	return 0;
+ }
+ 
+-static int
+-i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
+-				      unsigned alignment, int *found)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_gem_object *obj;
+-	struct drm_i915_gem_object *obj_priv;
+-	struct drm_gem_object *best = NULL;
+-	struct drm_gem_object *first = NULL;
+-
+-	/* Try to find the smallest clean object */
+-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+-		struct drm_gem_object *obj = obj_priv->obj;
+-		if (obj->size >= min_size) {
+-			if ((!obj_priv->dirty ||
+-			     i915_gem_object_is_purgeable(obj_priv)) &&
+-			    (!best || obj->size < best->size)) {
+-				best = obj;
+-				if (best->size == min_size)
+-					break;
+-			}
+-			if (!first)
+-			    first = obj;
+-		}
+-	}
+-
+-	obj = best ? best : first;
+-
+-	if (!obj) {
+-		*found = 0;
+-		return 0;
+-	}
+-
+-	*found = 1;
+-
+-#if WATCH_LRU
+-	DRM_INFO("%s: evicting %p\n", __func__, obj);
+-#endif
+-	obj_priv = obj->driver_private;
+-	BUG_ON(obj_priv->pin_count != 0);
+-	BUG_ON(obj_priv->active);
+-
+-	/* Wait on the rendering and unbind the buffer. */
+-	return i915_gem_object_unbind(obj);
+-}
+-
+-static int
+-i915_gem_evict_everything(struct drm_device *dev)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	int ret;
+-	uint32_t seqno;
+-	bool lists_empty;
+-
+-	spin_lock(&dev_priv->mm.active_list_lock);
+-	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+-		       list_empty(&dev_priv->mm.flushing_list) &&
+-		       list_empty(&dev_priv->mm.active_list));
+-	spin_unlock(&dev_priv->mm.active_list_lock);
+-
+-	if (lists_empty)
+-		return -ENOSPC;
+-
+-	/* Flush everything (on to the inactive lists) and evict */
+-	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+-	seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+-	if (seqno == 0)
+-		return -ENOMEM;
+-
+-	ret = i915_wait_request(dev, seqno);
+-	if (ret)
+-		return ret;
+-
+-	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+-
+-	ret = i915_gem_evict_from_inactive_list(dev);
+-	if (ret)
+-		return ret;
+-
+-	spin_lock(&dev_priv->mm.active_list_lock);
+-	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+-		       list_empty(&dev_priv->mm.flushing_list) &&
+-		       list_empty(&dev_priv->mm.active_list));
+-	spin_unlock(&dev_priv->mm.active_list_lock);
+-	BUG_ON(!lists_empty);
+-
+-	return 0;
+-}
+-
+-static int
+-i915_gem_evict_something(struct drm_device *dev,
+-			 int min_size, unsigned alignment)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	int ret, found;
+-
+-	for (;;) {
+-		i915_gem_retire_requests(dev);
+-
+-		/* If there's an inactive buffer available now, grab it
+-		 * and be done.
+-		 */
+-		ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
+-							    alignment,
+-							    &found);
+-		if (found)
+-			return ret;
+-
+-		/* If we didn't get anything, but the ring is still processing
+-		 * things, wait for the next to finish and hopefully leave us
+-		 * a buffer to evict.
+-		 */
+-		if (!list_empty(&dev_priv->mm.request_list)) {
+-			struct drm_i915_gem_request *request;
+-
+-			request = list_first_entry(&dev_priv->mm.request_list,
+-						   struct drm_i915_gem_request,
+-						   list);
+-
+-			ret = i915_wait_request(dev, request->seqno);
+-			if (ret)
+-				return ret;
+-
+-			continue;
+-		}
+-
+-		/* If we didn't have anything on the request list but there
+-		 * are buffers awaiting a flush, emit one and try again.
+-		 * When we wait on it, those buffers waiting for that flush
+-		 * will get moved to inactive.
+-		 */
+-		if (!list_empty(&dev_priv->mm.flushing_list)) {
+-			struct drm_gem_object *obj = NULL;
+-			struct drm_i915_gem_object *obj_priv;
+-
+-			/* Find an object that we can immediately reuse */
+-			list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+-				obj = obj_priv->obj;
+-				if (obj->size >= min_size)
+-					break;
+-
+-				obj = NULL;
+-			}
+-
+-			if (obj != NULL) {
+-				uint32_t seqno;
+-
+-				i915_gem_flush(dev,
+-					       obj->write_domain,
+-					       obj->write_domain);
+-				seqno = i915_add_request(dev, NULL, obj->write_domain);
+-				if (seqno == 0)
+-					return -ENOMEM;
+-
+-				ret = i915_wait_request(dev, seqno);
+-				if (ret)
+-					return ret;
+-
+-				continue;
+-			}
+-		}
+-
+-		/* If we didn't do any of the above, there's no single buffer
+-		 * large enough to swap out for the new one, so just evict
+-		 * everything and start again. (This should be rare.)
+-		 */
+-		if (!list_empty (&dev_priv->mm.inactive_list))
+-			return i915_gem_evict_from_inactive_list(dev);
+-		else
+-			return i915_gem_evict_everything(dev);
+-	}
+-}
+-
+ int
+ i915_gem_object_get_pages(struct drm_gem_object *obj,
+ 			  gfp_t gfpmask)
+@@ -4510,30 +4332,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
+ 	kfree(obj->driver_private);
+ }
+ 
+-/** Unbinds all inactive objects. */
+-static int
+-i915_gem_evict_from_inactive_list(struct drm_device *dev)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-
+-	while (!list_empty(&dev_priv->mm.inactive_list)) {
+-		struct drm_gem_object *obj;
+-		int ret;
+-
+-		obj = list_first_entry(&dev_priv->mm.inactive_list,
+-				       struct drm_i915_gem_object,
+-				       list)->obj;
+-
+-		ret = i915_gem_object_unbind(obj);
+-		if (ret != 0) {
+-			DRM_ERROR("Error unbinding object: %d\n", ret);
+-			return ret;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+ int
+ i915_gem_idle(struct drm_device *dev)
+ {
+@@ -4647,7 +4445,7 @@ i915_gem_idle(struct drm_device *dev)
+ 
+ 
+ 	/* Move all inactive buffers out of the GTT. */
+-	ret = i915_gem_evict_from_inactive_list(dev);
++	ret = i915_gem_evict_inactive(dev);
+ 	WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
+ 	if (ret) {
+ 		mutex_unlock(&dev->struct_mutex);
+diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
+new file mode 100644
+index 0000000..127a28a
+--- /dev/null
++++ b/drivers/gpu/drm/i915/i915_gem_evict.c
+@@ -0,0 +1,235 @@
++/*
++ * Copyright © 2008-2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Eric Anholt <eric at anholt.net>
++ *    Chris Wilson <chris at chris-wilson.co.uuk>
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drv.h"
++#include "i915_drm.h"
++
++static inline int
++i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
++{
++	return obj_priv->madv == I915_MADV_DONTNEED;
++}
++
++static int
++i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
++				      unsigned alignment, int *found)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_gem_object *obj;
++	struct drm_i915_gem_object *obj_priv;
++	struct drm_gem_object *best = NULL;
++	struct drm_gem_object *first = NULL;
++
++	/* Try to find the smallest clean object */
++	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
++		struct drm_gem_object *obj = obj_priv->obj;
++		if (obj->size >= min_size) {
++			if ((!obj_priv->dirty ||
++			     i915_gem_object_is_purgeable(obj_priv)) &&
++			    (!best || obj->size < best->size)) {
++				best = obj;
++				if (best->size == min_size)
++					break;
++			}
++			if (!first)
++			    first = obj;
++		}
++	}
++
++	obj = best ? best : first;
++
++	if (!obj) {
++		*found = 0;
++		return 0;
++	}
++
++	*found = 1;
++
++#if WATCH_LRU
++	DRM_INFO("%s: evicting %p\n", __func__, obj);
++#endif
++	obj_priv = obj->driver_private;
++	BUG_ON(obj_priv->pin_count != 0);
++	BUG_ON(obj_priv->active);
++
++	/* Wait on the rendering and unbind the buffer. */
++	return i915_gem_object_unbind(obj);
++}
++
++int
++i915_gem_evict_something(struct drm_device *dev,
++			 int min_size, unsigned alignment)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret, found;
++
++	for (;;) {
++		i915_gem_retire_requests(dev);
++
++		/* If there's an inactive buffer available now, grab it
++		 * and be done.
++		 */
++		ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
++							    alignment,
++							    &found);
++		if (found)
++			return ret;
++
++		/* If we didn't get anything, but the ring is still processing
++		 * things, wait for the next to finish and hopefully leave us
++		 * a buffer to evict.
++		 */
++		if (!list_empty(&dev_priv->mm.request_list)) {
++			struct drm_i915_gem_request *request;
++
++			request = list_first_entry(&dev_priv->mm.request_list,
++						   struct drm_i915_gem_request,
++						   list);
++
++			ret = i915_do_wait_request(dev, request->seqno, true);
++			if (ret)
++				return ret;
++
++			continue;
++		}
++
++		/* If we didn't have anything on the request list but there
++		 * are buffers awaiting a flush, emit one and try again.
++		 * When we wait on it, those buffers waiting for that flush
++		 * will get moved to inactive.
++		 */
++		if (!list_empty(&dev_priv->mm.flushing_list)) {
++			struct drm_gem_object *obj = NULL;
++			struct drm_i915_gem_object *obj_priv;
++
++			/* Find an object that we can immediately reuse */
++			list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
++				obj = obj_priv->obj;
++				if (obj->size >= min_size)
++					break;
++
++				obj = NULL;
++			}
++
++			if (obj != NULL) {
++				uint32_t seqno;
++
++				i915_gem_flush(dev,
++					       obj->write_domain,
++					       obj->write_domain);
++				seqno = i915_add_request(dev, NULL, obj->write_domain);
++				if (seqno == 0)
++					return -ENOMEM;
++
++				ret = i915_do_wait_request(dev, seqno, true);
++				if (ret)
++					return ret;
++
++				continue;
++			}
++		}
++
++		/* If we didn't do any of the above, there's no single buffer
++		 * large enough to swap out for the new one, so just evict
++		 * everything and start again. (This should be rare.)
++		 */
++		if (!list_empty (&dev_priv->mm.inactive_list))
++			return i915_gem_evict_inactive(dev);
++		else
++			return i915_gem_evict_everything(dev);
++	}
++}
++
++int
++i915_gem_evict_everything(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret;
++	uint32_t seqno;
++	bool lists_empty;
++
++	spin_lock(&dev_priv->mm.active_list_lock);
++	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
++		       list_empty(&dev_priv->mm.flushing_list) &&
++		       list_empty(&dev_priv->mm.active_list));
++	spin_unlock(&dev_priv->mm.active_list_lock);
++
++	if (lists_empty)
++		return -ENOSPC;
++
++	/* Flush everything (on to the inactive lists) and evict */
++	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
++	seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
++	if (seqno == 0)
++		return -ENOMEM;
++
++	ret = i915_do_wait_request(dev, seqno, true);
++	if (ret)
++		return ret;
++
++	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
++
++	ret = i915_gem_evict_inactive(dev);
++	if (ret)
++		return ret;
++
++	spin_lock(&dev_priv->mm.active_list_lock);
++	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
++		       list_empty(&dev_priv->mm.flushing_list) &&
++		       list_empty(&dev_priv->mm.active_list));
++	spin_unlock(&dev_priv->mm.active_list_lock);
++	BUG_ON(!lists_empty);
++
++	return 0;
++}
++
++/** Unbinds all inactive objects. */
++int
++i915_gem_evict_inactive(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++
++	while (!list_empty(&dev_priv->mm.inactive_list)) {
++		struct drm_gem_object *obj;
++		int ret;
++
++		obj = list_first_entry(&dev_priv->mm.inactive_list,
++				       struct drm_i915_gem_object,
++				       list)->obj;
++
++		ret = i915_gem_object_unbind(obj);
++		if (ret != 0) {
++			DRM_ERROR("Error unbinding object: %d\n", ret);
++			return ret;
++		}
++	}
++
++	return 0;
++}
+-- 
+1.7.2.5
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Periodically-flush-the-active-lists-and-req.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Periodically-flush-the-active-lists-and-req.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-Periodically-flush-the-active-lists-and-req.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-Periodically-flush-the-active-lists-and-req.patch)
@@ -0,0 +1,48 @@
+From: Chris Wilson <chris at chris-wilson.co.uk>
+Date: Fri, 17 Jun 2011 10:04:22 -0500
+Subject: [PATCH 08/10] drm/i915: Periodically flush the active lists and requests
+
+commit 41516474bc14ea128b05bf65c9cbdb04739582ac upstream.
+
+BugLink: http://bugs.launchpad.net/bugs/599017
+
+In order to retire active buffers whilst no client is active, we need to
+insert our own flush requests onto the ring.
+
+This is useful for servers that queue up some rendering and then go to
+sleep as it allows us to the complete processing of those requests,
+potentially making that memory available again much earlier.
+
+Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
+(backported from commit 0a58705b2fc3fa29525cf2fdae3d4276a5771280 upstream)
+
+Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/i915/i915_gem.c |    7 +++++++
+ 1 files changed, 7 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index b3c7bd1..0314f7f 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1862,9 +1862,16 @@ i915_gem_retire_work_handler(struct work_struct *work)
+ 
+ 	mutex_lock(&dev->struct_mutex);
+ 	i915_gem_retire_requests(dev);
++
++	if (!list_empty(&dev_priv->mm.gpu_write_list)) {
++		i915_gem_flush(dev, 0, I915_GEM_GPU_DOMAINS);
++		i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
++	}
++
+ 	if (!dev_priv->mm.suspended &&
+ 	    !list_empty(&dev_priv->mm.request_list))
+ 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
++
+ 	mutex_unlock(&dev->struct_mutex);
+ }
+ 
+-- 
+1.7.2.5
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-evict-Ensure-we-completely-cleanup-on-failu.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-evict-Ensure-we-completely-cleanup-on-failu.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-evict-Ensure-we-completely-cleanup-on-failu.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-evict-Ensure-we-completely-cleanup-on-failu.patch)
@@ -0,0 +1,86 @@
+nrom: Chris Wilson <chris at chris-wilson.co.uk>
+Date: Fri, 17 Jun 2011 10:04:22 -0500
+Subject: [PATCH 07/10] drm/i915/evict: Ensure we completely cleanup on failure
+
+commit 2f4f8bc3da84232a25e0ced165d4bb5643d3aaad upstream.
+
+BugLink: http://bugs.launchpad.net/bugs/599017
+
+... and not leave the objects in a inconsistent state.
+
+[seth.forshee at canonical.com: Also backported similar cleanups in success
+ path from commit e39a01501b228e1be2037d5bddccae2a820af902]
+Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
+Cc: stable at kernel.org
+(backported from commit 092de6f225638ec300936bfcbdc67805733cc78c upstream)
+
+Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/i915/i915_gem_evict.c |   32 ++++++++++++++++++++------------
+ 1 files changed, 20 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
+index 84ed1a7..9c1ec78 100644
+--- a/drivers/gpu/drm/i915/i915_gem_evict.c
++++ b/drivers/gpu/drm/i915/i915_gem_evict.c
+@@ -134,9 +134,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
+ 	}
+ 
+ 	/* Nothing found, clean up and bail out! */
+-	list_for_each_entry(obj_priv, &unwind_list, evict_list) {
++	while (!list_empty(&unwind_list)) {
++		obj_priv = list_first_entry(&unwind_list,
++					    struct drm_i915_gem_object,
++					    evict_list);
++
+ 		ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+ 		BUG_ON(ret);
++
++		list_del_init(&obj_priv->evict_list);
+ 	}
+ 
+ 	/* We expect the caller to unpin, evict all and try again, or give up.
+@@ -145,26 +151,28 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
+ 	return -ENOSPC;
+ 
+ found:
++	/* drm_mm doesn't allow any other other operations while
++	 * scanning, therefore store to be evicted objects on a
++	 * temporary list. */
+ 	INIT_LIST_HEAD(&eviction_list);
+ 	list_for_each_entry_safe(obj_priv, tmp_obj_priv,
+ 				 &unwind_list, evict_list) {
+ 		if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
+-			/* drm_mm doesn't allow any other other operations while
+-			 * scanning, therefore store to be evicted objects on a
+-			 * temporary list. */
+ 			list_move(&obj_priv->evict_list, &eviction_list);
++			continue;
+ 		}
++		list_del_init(&obj_priv->evict_list);
+ 	}
+ 
+ 	/* Unbinding will emit any required flushes */
+-	list_for_each_entry_safe(obj_priv, tmp_obj_priv,
+-				 &eviction_list, evict_list) {
+-#if WATCH_LRU
+-		DRM_INFO("%s: evicting %p\n", __func__, obj);
+-#endif
+-		ret = i915_gem_object_unbind(obj_priv->obj);
+-		if (ret)
+-			return ret;
++	while (!list_empty(&eviction_list)) {
++		obj_priv = list_first_entry(&eviction_list,
++					    struct drm_i915_gem_object,
++					    evict_list);
++		if (ret == 0)
++			ret = i915_gem_object_unbind(obj_priv->obj);
++
++		list_del_init(&obj_priv->evict_list);
+ 	}
+ 
+ 	/* The just created free hole should be on the top of the free stack
+-- 
+1.7.2.5
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-prepare-for-fair-lru-eviction.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-prepare-for-fair-lru-eviction.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-i915-prepare-for-fair-lru-eviction.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-i915-prepare-for-fair-lru-eviction.patch)
@@ -0,0 +1,192 @@
+From: Daniel Vetter <daniel.vetter at ffwll.ch>
+Date: Fri, 17 Jun 2011 10:04:20 -0500
+Subject: [PATCH 03/10] drm/i915: prepare for fair lru eviction
+
+commit f07147fcefea6d203882c570d61bdf73dd25ae66 upstream.
+
+BugLink: http://bugs.launchpad.net/bugs/599017
+
+This does two little changes:
+
+- Add an alignment parameter for evict_something. It's not really great to
+  whack a carefully sized hole into the gtt with the wrong alignment.
+  Especially since the fallback path is a full evict.
+
+- With the inactive scan stuff we need to evict more that one object, so
+  move the unbind call into the helper function that scans for the object
+  to be evicted, too.  And adjust its name.
+
+No functional changes in this patch, just preparation.
+
+Signed-Off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
+Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
+Signed-off-by: Eric Anholt <eric at anholt.net>
+(backported from commit 0108a3edd5c2e3b150a550d565b6aa1a67c0edbe upstream)
+
+Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/i915/i915_gem.c |   67 ++++++++++++++++++++++++---------------
+ 1 files changed, 41 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index a34fd44..e0afa05 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -36,6 +36,7 @@
+ 
+ #define I915_GEM_GPU_DOMAINS	(~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+ 
++static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
+ static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+ static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
+ static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
+@@ -49,7 +50,8 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+ static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+ 					   unsigned alignment);
+ static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
+-static int i915_gem_evict_something(struct drm_device *dev, int min_size);
++static int i915_gem_evict_something(struct drm_device *dev, int min_size,
++				    unsigned alignment);
+ static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
+ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ 				struct drm_i915_gem_pwrite *args,
+@@ -334,7 +336,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
+ 	if (ret == -ENOMEM) {
+ 		struct drm_device *dev = obj->dev;
+ 
+-		ret = i915_gem_evict_something(dev, obj->size);
++		ret = i915_gem_evict_something(dev, obj->size,
++					       i915_gem_get_gtt_alignment(obj));
+ 		if (ret)
+ 			return ret;
+ 
+@@ -2102,10 +2105,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
+ 	return 0;
+ }
+ 
+-static struct drm_gem_object *
+-i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
++static int
++i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
++				      unsigned alignment, int *found)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_gem_object *obj;
+ 	struct drm_i915_gem_object *obj_priv;
+ 	struct drm_gem_object *best = NULL;
+ 	struct drm_gem_object *first = NULL;
+@@ -2119,14 +2124,31 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
+ 			    (!best || obj->size < best->size)) {
+ 				best = obj;
+ 				if (best->size == min_size)
+-					return best;
++					break;
+ 			}
+ 			if (!first)
+ 			    first = obj;
+ 		}
+ 	}
+ 
+-	return best ? best : first;
++	obj = best ? best : first;
++
++	if (!obj) {
++		*found = 0;
++		return 0;
++	}
++
++	*found = 1;
++
++#if WATCH_LRU
++	DRM_INFO("%s: evicting %p\n", __func__, obj);
++#endif
++	obj_priv = obj->driver_private;
++	BUG_ON(obj_priv->pin_count != 0);
++	BUG_ON(obj_priv->active);
++
++	/* Wait on the rendering and unbind the buffer. */
++	return i915_gem_object_unbind(obj);
+ }
+ 
+ static int
+@@ -2173,11 +2195,11 @@ i915_gem_evict_everything(struct drm_device *dev)
+ }
+ 
+ static int
+-i915_gem_evict_something(struct drm_device *dev, int min_size)
++i915_gem_evict_something(struct drm_device *dev,
++			 int min_size, unsigned alignment)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_gem_object *obj;
+-	int ret;
++	int ret, found;
+ 
+ 	for (;;) {
+ 		i915_gem_retire_requests(dev);
+@@ -2185,20 +2207,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
+ 		/* If there's an inactive buffer available now, grab it
+ 		 * and be done.
+ 		 */
+-		obj = i915_gem_find_inactive_object(dev, min_size);
+-		if (obj) {
+-			struct drm_i915_gem_object *obj_priv;
+-
+-#if WATCH_LRU
+-			DRM_INFO("%s: evicting %p\n", __func__, obj);
+-#endif
+-			obj_priv = obj->driver_private;
+-			BUG_ON(obj_priv->pin_count != 0);
+-			BUG_ON(obj_priv->active);
+-
+-			/* Wait on the rendering and unbind the buffer. */
+-			return i915_gem_object_unbind(obj);
+-		}
++		ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
++							    alignment,
++							    &found);
++		if (found)
++			return ret;
+ 
+ 		/* If we didn't get anything, but the ring is still processing
+ 		 * things, wait for the next to finish and hopefully leave us
+@@ -2224,6 +2237,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
+ 		 * will get moved to inactive.
+ 		 */
+ 		if (!list_empty(&dev_priv->mm.flushing_list)) {
++			struct drm_gem_object *obj = NULL;
+ 			struct drm_i915_gem_object *obj_priv;
+ 
+ 			/* Find an object that we can immediately reuse */
+@@ -2672,7 +2686,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ #if WATCH_LRU
+ 		DRM_INFO("%s: GTT full, evicting something\n", __func__);
+ #endif
+-		ret = i915_gem_evict_something(dev, obj->size);
++		ret = i915_gem_evict_something(dev, obj->size, alignment);
+ 		if (ret)
+ 			return ret;
+ 
+@@ -2690,7 +2704,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ 
+ 		if (ret == -ENOMEM) {
+ 			/* first try to clear up some space from the GTT */
+-			ret = i915_gem_evict_something(dev, obj->size);
++			ret = i915_gem_evict_something(dev, obj->size,
++						       alignment);
+ 			if (ret) {
+ 				/* now try to shrink everyone else */
+ 				if (gfpmask) {
+@@ -2720,7 +2735,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+ 		drm_mm_put_block(obj_priv->gtt_space);
+ 		obj_priv->gtt_space = NULL;
+ 
+-		ret = i915_gem_evict_something(dev, obj->size);
++		ret = i915_gem_evict_something(dev, obj->size, alignment);
+ 		if (ret)
+ 			return ret;
+ 
+-- 
+1.7.2.5
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-implement-helper-functions-for-scanning-lru-list.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-implement-helper-functions-for-scanning-lru-list.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-implement-helper-functions-for-scanning-lru-list.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-implement-helper-functions-for-scanning-lru-list.patch)
@@ -0,0 +1,309 @@
+From: Daniel Vetter <daniel.vetter at ffwll.ch>
+Date: Fri, 17 Jun 2011 10:04:20 -0500
+Subject: [PATCH 02/10] drm: implement helper functions for scanning lru list
+
+commit be54bbcaee8559cc412b5e4abc8eb33388b083e0 upstream.
+
+BugLink: http://bugs.launchpad.net/bugs/599017
+
+These helper functions can be used to efficiently scan lru list
+for eviction. Eviction becomes a three stage process:
+1. Scanning through the lru list until a suitable hole has been found.
+2. Scan backwards to restore drm_mm consistency and find out which
+   objects fall into the hole.
+3. Evict the objects that fall into the hole.
+
+These helper functions don't allocate any memory (at the price of
+not allowing any other concurrent operations). Hence this can also be
+used for ttm (which does lru scanning under a spinlock).
+
+Evicting objects in this fashion should be more fair than the current
+approach by i915 (scan the lru for a object large enough to contain
+the new object). It's also more efficient than the current approach used
+by ttm (uncoditionally evict objects from the lru until there's enough
+free space).
+
+Signed-Off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
+Acked-by: Thomas Hellstrom <thellstrom at vmwgfx.com>
+Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
+Signed-off-by: Dave Airlie <airlied at redhat.com>
+(backported from commit 709ea97145c125b3811ff70429e90ebdb0e832e5 upstream)
+
+Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/drm_mm.c |  167 ++++++++++++++++++++++++++++++++++++++++++++-
+ include/drm/drm_mm.h     |   15 ++++-
+ 2 files changed, 177 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
+index 4935e91..f1d3314 100644
+--- a/drivers/gpu/drm/drm_mm.c
++++ b/drivers/gpu/drm/drm_mm.c
+@@ -83,9 +83,9 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
+ 	struct drm_mm_node *child;
+ 
+ 	if (atomic)
+-		child = kmalloc(sizeof(*child), GFP_ATOMIC);
++		child = kzalloc(sizeof(*child), GFP_ATOMIC);
+ 	else
+-		child = kmalloc(sizeof(*child), GFP_KERNEL);
++		child = kzalloc(sizeof(*child), GFP_KERNEL);
+ 
+ 	if (unlikely(child == NULL)) {
+ 		spin_lock(&mm->unused_lock);
+@@ -115,7 +115,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
+ 	spin_lock(&mm->unused_lock);
+ 	while (mm->num_unused < MM_UNUSED_TARGET) {
+ 		spin_unlock(&mm->unused_lock);
+-		node = kmalloc(sizeof(*node), GFP_KERNEL);
++		node = kzalloc(sizeof(*node), GFP_KERNEL);
+ 		spin_lock(&mm->unused_lock);
+ 
+ 		if (unlikely(node == NULL)) {
+@@ -179,7 +179,6 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+ 
+ 	INIT_LIST_HEAD(&child->fl_entry);
+ 
+-	child->free = 0;
+ 	child->size = size;
+ 	child->start = parent->start;
+ 	child->mm = parent->mm;
+@@ -280,6 +279,9 @@ void drm_mm_put_block(struct drm_mm_node *cur)
+ 
+ 	int merged = 0;
+ 
++	BUG_ON(cur->scanned_block || cur->scanned_prev_free
++				  || cur->scanned_next_free);
++
+ 	if (cur_head->prev != root_head) {
+ 		prev_node =
+ 		    list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+@@ -359,6 +361,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ 	struct drm_mm_node *best;
+ 	unsigned long best_size;
+ 
++	BUG_ON(mm->scanned_blocks);
++
+ 	best = NULL;
+ 	best_size = ~0UL;
+ 
+@@ -394,6 +398,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+ 	struct drm_mm_node *best;
+ 	unsigned long best_size;
+ 
++	BUG_ON(mm->scanned_blocks);
++
+ 	best = NULL;
+ 	best_size = ~0UL;
+ 
+@@ -419,6 +425,158 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+ }
+ EXPORT_SYMBOL(drm_mm_search_free_in_range);
+ 
++/**
++ * Initializa lru scanning.
++ *
++ * This simply sets up the scanning routines with the parameters for the desired
++ * hole.
++ *
++ * Warning: As long as the scan list is non-empty, no other operations than
++ * adding/removing nodes to/from the scan list are allowed.
++ */
++void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
++		      unsigned alignment)
++{
++	mm->scan_alignment = alignment;
++	mm->scan_size = size;
++	mm->scanned_blocks = 0;
++	mm->scan_hit_start = 0;
++	mm->scan_hit_size = 0;
++}
++EXPORT_SYMBOL(drm_mm_init_scan);
++
++/**
++ * Add a node to the scan list that might be freed to make space for the desired
++ * hole.
++ *
++ * Returns non-zero, if a hole has been found, zero otherwise.
++ */
++int drm_mm_scan_add_block(struct drm_mm_node *node)
++{
++	struct drm_mm *mm = node->mm;
++	struct list_head *prev_free, *next_free;
++	struct drm_mm_node *prev_node, *next_node;
++
++	mm->scanned_blocks++;
++
++	prev_free = next_free = NULL;
++
++	BUG_ON(node->free);
++	node->scanned_block = 1;
++	node->free = 1;
++
++	if (node->ml_entry.prev != &mm->ml_entry) {
++		prev_node = list_entry(node->ml_entry.prev, struct drm_mm_node,
++				       ml_entry);
++
++		if (prev_node->free) {
++			list_del(&prev_node->ml_entry);
++
++			node->start = prev_node->start;
++			node->size += prev_node->size;
++
++			prev_node->scanned_prev_free = 1;
++
++			prev_free = &prev_node->fl_entry;
++		}
++	}
++
++	if (node->ml_entry.next != &mm->ml_entry) {
++		next_node = list_entry(node->ml_entry.next, struct drm_mm_node,
++				       ml_entry);
++
++		if (next_node->free) {
++			list_del(&next_node->ml_entry);
++
++			node->size += next_node->size;
++
++			next_node->scanned_next_free = 1;
++
++			next_free = &next_node->fl_entry;
++		}
++	}
++
++	/* The fl_entry list is not used for allocated objects, so these two
++	 * pointers can be abused (as long as no allocations in this memory
++	 * manager happens). */
++	node->fl_entry.prev = prev_free;
++	node->fl_entry.next = next_free;
++
++	if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
++		mm->scan_hit_start = node->start;
++		mm->scan_hit_size = node->size;
++
++		return 1;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL(drm_mm_scan_add_block);
++
++/**
++ * Remove a node from the scan list.
++ *
++ * Nodes _must_ be removed in the exact same order from the scan list as they
++ * have been added, otherwise the internal state of the memory manager will be
++ * corrupted.
++ *
++ * When the scan list is empty, the selected memory nodes can be freed. An
++ * immediatly following drm_mm_search_free with best_match = 0 will then return
++ * the just freed block (because its at the top of the fl_entry list).
++ *
++ * Returns one if this block should be evicted, zero otherwise. Will always
++ * return zero when no hole has been found.
++ */
++int drm_mm_scan_remove_block(struct drm_mm_node *node)
++{
++	struct drm_mm *mm = node->mm;
++	struct drm_mm_node *prev_node, *next_node;
++
++	mm->scanned_blocks--;
++
++	BUG_ON(!node->scanned_block);
++	node->scanned_block = 0;
++	node->free = 0;
++
++	prev_node = list_entry(node->fl_entry.prev, struct drm_mm_node,
++			       fl_entry);
++	next_node = list_entry(node->fl_entry.next, struct drm_mm_node,
++			       fl_entry);
++
++	if (prev_node) {
++		BUG_ON(!prev_node->scanned_prev_free);
++		prev_node->scanned_prev_free = 0;
++
++		list_add_tail(&prev_node->ml_entry, &node->ml_entry);
++
++		node->start = prev_node->start + prev_node->size;
++		node->size -= prev_node->size;
++	}
++
++	if (next_node) {
++		BUG_ON(!next_node->scanned_next_free);
++		next_node->scanned_next_free = 0;
++
++		list_add(&next_node->ml_entry, &node->ml_entry);
++
++		node->size -= next_node->size;
++	}
++
++	INIT_LIST_HEAD(&node->fl_entry);
++
++	/* Only need to check for containement because start&size for the
++	 * complete resulting free block (not just the desired part) is
++	 * stored. */
++	if (node->start >= mm->scan_hit_start &&
++	    node->start + node->size
++	    		<= mm->scan_hit_start + mm->scan_hit_size) {
++		return 1;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL(drm_mm_scan_remove_block);
++
+ int drm_mm_clean(struct drm_mm * mm)
+ {
+ 	struct list_head *head = &mm->ml_entry;
+@@ -433,6 +591,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+ 	INIT_LIST_HEAD(&mm->fl_entry);
+ 	INIT_LIST_HEAD(&mm->unused_nodes);
+ 	mm->num_unused = 0;
++	mm->scanned_blocks = 0;
+ 	spin_lock_init(&mm->unused_lock);
+ 
+ 	return drm_mm_create_tail_node(mm, start, size, 0);
+diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
+index 4c10be3..83a7495 100644
+--- a/include/drm/drm_mm.h
++++ b/include/drm/drm_mm.h
+@@ -44,7 +44,10 @@
+ struct drm_mm_node {
+ 	struct list_head fl_entry;
+ 	struct list_head ml_entry;
+-	int free;
++	unsigned free : 1;
++	unsigned scanned_block : 1;
++	unsigned scanned_prev_free : 1;
++	unsigned scanned_next_free : 1;
+ 	unsigned long start;
+ 	unsigned long size;
+ 	struct drm_mm *mm;
+@@ -57,6 +60,11 @@ struct drm_mm {
+ 	struct list_head unused_nodes;
+ 	int num_unused;
+ 	spinlock_t unused_lock;
++	unsigned scan_alignment;
++	unsigned long scan_size;
++	unsigned long scan_hit_start;
++	unsigned scan_hit_size;
++	unsigned scanned_blocks;
+ };
+ 
+ /*
+@@ -133,6 +141,11 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+ 	return block->mm;
+ }
+ 
++void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
++		      unsigned alignment);
++int drm_mm_scan_add_block(struct drm_mm_node *node);
++int drm_mm_scan_remove_block(struct drm_mm_node *node);
++
+ extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
+ #ifdef CONFIG_DEBUG_FS
+ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
+-- 
+1.7.2.5
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-radeon-kms-fix-bad-shift-atom-iio-parser.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-radeon-kms-fix-bad-shift-atom-iio-parser.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-radeon-kms-fix-bad-shift-atom-iio-parser.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-radeon-kms-fix-bad-shift-atom-iio-parser.patch)
@@ -0,0 +1,51 @@
+From 0eef43558179bd918796d7008ff905b1b142cd5b Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexdeucher at gmail.com>
+Date: Thu, 14 Apr 2011 11:19:50 -0400
+Subject: [PATCH] drm/radeon/kms: fix bad shift in atom iio table parser
+
+commit 8e461123f28e6b17456225e70eb834b3b30d28bb upstream.
+
+Noticed by Patrick Lowry.
+
+Signed-off-by: Alex Deucher <alexdeucher at gmail.com>
+Signed-off-by: Dave Airlie <airlied at redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/radeon/atom.c |    6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index b1f929d..052312f 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -128,7 +128,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
+ 		case ATOM_IIO_MOVE_INDEX:
+ 			temp &=
+ 			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+-			      CU8(base + 2));
++			      CU8(base + 3));
+ 			temp |=
+ 			    ((index >> CU8(base + 2)) &
+ 			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+@@ -138,7 +138,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
+ 		case ATOM_IIO_MOVE_DATA:
+ 			temp &=
+ 			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+-			      CU8(base + 2));
++			      CU8(base + 3));
+ 			temp |=
+ 			    ((data >> CU8(base + 2)) &
+ 			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
+@@ -148,7 +148,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
+ 		case ATOM_IIO_MOVE_ATTR:
+ 			temp &=
+ 			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
+-			      CU8(base + 2));
++			      CU8(base + 3));
+ 			temp |=
+ 			    ((ctx->
+ 			      io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
+-- 
+1.7.4.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-radeon-kms-fix-for-radeon-on-systems-4GB-without.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-radeon-kms-fix-for-radeon-on-systems-4GB-without.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-radeon-kms-fix-for-radeon-on-systems-4GB-without.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-radeon-kms-fix-for-radeon-on-systems-4GB-without.patch)
@@ -0,0 +1,46 @@
+From: Daniel Haid <d.haid at gogi.tv>
+Date: Wed, 8 Jun 2011 20:04:45 +1000
+Subject: [PATCH 10/10] drm/radeon/kms: fix for radeon on systems >4GB without hardware iommu
+
+commit 2e49607f2fdfe966ea6caae27c1e7547b917ccb7 upstream.
+
+commit 62fff811d73095bd95579d72f558f03c78f7914a upstream.
+
+On my x86_64 system with >4GB of ram and swiotlb instead of
+a hardware iommu (because I have a VIA chipset), the call
+to pci_set_dma_mask (see below) with 40bits returns an error.
+
+But it seems that the radeon driver is designed to have
+need_dma32 = true exactly if pci_set_dma_mask is called
+with 32 bits and false if it is called with 40 bits.
+
+I have read somewhere that the default are 32 bits. So if the
+call fails I suppose that need_dma32 should be set to true.
+
+And indeed the patch fixes the problem I have had before
+and which I had described here:
+http://choon.net/forum/read.php?21,106131,115940
+
+Acked-by: Alex Deucher <alexdeucher at gmail.com>
+Signed-off-by: Dave Airlie <airlied at redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/radeon/radeon_device.c |    1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index ac47fd0..6a78b34 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -682,6 +682,7 @@ int radeon_device_init(struct radeon_device *rdev,
+ 	dma_bits = rdev->need_dma32 ? 32 : 40;
+ 	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+ 	if (r) {
++		rdev->need_dma32 = true;
+ 		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
+ 	}
+ 
+-- 
+1.7.2.5
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-ttm-fix-ttm_bo_add_ttm-user-failure-path.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-ttm-fix-ttm_bo_add_ttm-user-failure-path.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm-ttm-fix-ttm_bo_add_ttm-user-failure-path.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm-ttm-fix-ttm_bo_add_ttm-user-failure-path.patch)
@@ -0,0 +1,36 @@
+From: Marcin Slusarz <marcin.slusarz at gmail.com>
+Date: Mon, 22 Aug 2011 21:17:57 +0000
+Subject: [PATCH] drm/ttm: fix ttm_bo_add_ttm(user) failure path
+
+commit 7c4c3960dff109bc5db4c35da481c212dadb5eb5 upstream.
+
+ttm_tt_destroy kfrees passed object, so we need to nullify
+a reference to it.
+
+Signed-off-by: Marcin Slusarz <marcin.slusarz at gmail.com>
+Reviewed-by: Thomas Hellstrom <thellstrom at vmware.com>
+Signed-off-by: Dave Airlie <airlied at redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+---
+ drivers/gpu/drm/ttm/ttm_bo.c |    4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index acbfa27..94706ef 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -343,8 +343,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+ 
+ 		ret = ttm_tt_set_user(bo->ttm, current,
+ 				      bo->buffer_start, bo->num_pages);
+-		if (unlikely(ret != 0))
++		if (unlikely(ret != 0)) {
+ 			ttm_tt_destroy(bo->ttm);
++			bo->ttm = NULL;
++		}
+ 		break;
+ 	default:
+ 		printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm_mm-extract-check_free_mm_node.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm_mm-extract-check_free_mm_node.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/drm_mm-extract-check_free_mm_node.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/drm_mm-extract-check_free_mm_node.patch)
@@ -0,0 +1,143 @@
+From: Daniel Vetter <daniel.vetter at ffwll.ch>
+Date: Fri, 17 Jun 2011 10:04:19 -0500
+Subject: [PATCH 01/10] drm_mm: extract check_free_mm_node
+
+commit d4a82251610c863bab7f457cb7a76a4bf01abb21 upstream.
+
+BugLink: http://bugs.launchpad.net/bugs/599017
+
+There are already two copies of this logic. And the new scanning
+stuff will add some more. So extract it into a small helper
+function.
+
+Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
+Acked-by: Thomas Hellstrom <thellstrom at vmwgfx.com>
+Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
+Signed-off-by: Dave Airlie <airlied at redhat.com>
+(backported from commit 7a6b2896f261894dde287d3faefa4b432cddca53 upstream)
+
+Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
+---
+ drivers/gpu/drm/drm_mm.c |   69 ++++++++++++++++++++++-----------------------
+ 1 files changed, 34 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
+index 2ac074c8..4935e91 100644
+--- a/drivers/gpu/drm/drm_mm.c
++++ b/drivers/gpu/drm/drm_mm.c
+@@ -328,6 +328,27 @@ void drm_mm_put_block(struct drm_mm_node *cur)
+ 
+ EXPORT_SYMBOL(drm_mm_put_block);
+ 
++static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
++			      unsigned alignment)
++{
++	unsigned wasted = 0;
++
++	if (entry->size < size)
++		return 0;
++
++	if (alignment) {
++		register unsigned tmp = entry->start % alignment;
++		if (tmp)
++			wasted = alignment - tmp;
++	}
++
++	if (entry->size >= size + wasted) {
++		return 1;
++	}
++
++	return 0;
++}
++
+ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ 				       unsigned long size,
+ 				       unsigned alignment, int best_match)
+@@ -337,31 +358,22 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ 	struct drm_mm_node *entry;
+ 	struct drm_mm_node *best;
+ 	unsigned long best_size;
+-	unsigned wasted;
+ 
+ 	best = NULL;
+ 	best_size = ~0UL;
+ 
+ 	list_for_each(list, free_stack) {
+ 		entry = list_entry(list, struct drm_mm_node, fl_entry);
+-		wasted = 0;
+ 
+-		if (entry->size < size)
++		if (!check_free_mm_node(entry, size, alignment))
+ 			continue;
+ 
+-		if (alignment) {
+-			register unsigned tmp = entry->start % alignment;
+-			if (tmp)
+-				wasted += alignment - tmp;
+-		}
++		if (!best_match)
++			return entry;
+ 
+-		if (entry->size >= size + wasted) {
+-			if (!best_match)
+-				return entry;
+-			if (entry->size < best_size) {
+-				best = entry;
+-				best_size = entry->size;
+-			}
++		if (entry->size < best_size) {
++			best = entry;
++			best_size = entry->size;
+ 		}
+ 	}
+ 
+@@ -381,38 +393,25 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+ 	struct drm_mm_node *entry;
+ 	struct drm_mm_node *best;
+ 	unsigned long best_size;
+-	unsigned wasted;
+ 
+ 	best = NULL;
+ 	best_size = ~0UL;
+ 
+ 	list_for_each(list, free_stack) {
+ 		entry = list_entry(list, struct drm_mm_node, fl_entry);
+-		wasted = 0;
+-
+-		if (entry->size < size)
+-			continue;
+ 
+ 		if (entry->start > end || (entry->start+entry->size) < start)
+ 			continue;
+ 
+-		if (entry->start < start)
+-			wasted += start - entry->start;
++		if (!check_free_mm_node(entry, size, alignment))
++			continue;
+ 
+-		if (alignment) {
+-			register unsigned tmp = (entry->start + wasted) % alignment;
+-			if (tmp)
+-				wasted += alignment - tmp;
+-		}
++		if (!best_match)
++			return entry;
+ 
+-		if (entry->size >= size + wasted &&
+-		    (entry->start + wasted + size) <= end) {
+-			if (!best_match)
+-				return entry;
+-			if (entry->size < best_size) {
+-				best = entry;
+-				best_size = entry->size;
+-			}
++		if (entry->size < best_size) {
++			best = entry;
++			best_size = entry->size;
+ 		}
+ 	}
+ 
+-- 
+1.7.2.5
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/fs-devpts-inode.c-correctly-check-d_alloc_name-retur.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/fs-devpts-inode.c-correctly-check-d_alloc_name-retur.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/fs-devpts-inode.c-correctly-check-d_alloc_name-retur.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/fs-devpts-inode.c-correctly-check-d_alloc_name-retur.patch)
@@ -0,0 +1,35 @@
+From: Andrey Vagin <avagin at openvz.org>
+Date: Tue, 22 Mar 2011 16:35:11 -0700
+Subject: [PATCH] fs/devpts/inode.c: correctly check d_alloc_name() return
+ code in devpts_pty_new()
+
+commit b12d12596992f608f5506a8dabe4d1299594bd1e upstream.
+
+d_alloc_name return NULL in case error, but we expect errno in
+devpts_pty_new.
+
+Addresses http://bugzilla.openvz.org/show_bug.cgi?id=1758
+
+Signed-off-by: Andrey Vagin <avagin at openvz.org>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+---
+ fs/devpts/inode.c |    2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
+index c6bd815..2f27e57 100644
+--- a/fs/devpts/inode.c
++++ b/fs/devpts/inode.c
+@@ -502,7 +502,7 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty)
+ 	mutex_lock(&root->d_inode->i_mutex);
+ 
+ 	dentry = d_alloc_name(root, s);
+-	if (!IS_ERR(dentry)) {
++	if (dentry) {
+ 		d_add(dentry, inode);
+ 		fsnotify_create(root->d_inode, dentry);
+ 	} else {
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/igb-Fix-lack-of-flush-after-register-write-and-befor.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/igb-Fix-lack-of-flush-after-register-write-and-befor.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/igb-Fix-lack-of-flush-after-register-write-and-befor.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/igb-Fix-lack-of-flush-after-register-write-and-befor.patch)
@@ -0,0 +1,35 @@
+From: Carolyn Wyborny <carolyn.wyborny at intel.com>
+Date: Sat, 25 Jun 2011 13:18:12 +0000
+Subject: [PATCH] igb: Fix lack of flush after register write and before delay
+
+commit 064b43304ed8ede8e13ff7b4338d09fd37bcffb1 upstream.
+
+Register writes followed by a delay are required to have a flush
+before the delay in order to commit the values to the register.  Without
+the flush, the code following the delay may not function correctly.
+
+Reported-by: Tong Ho <tong.ho at ericsson.com>
+Reported-by: Guenter Roeck <guenter.roeck at ericsson.com>
+Signed-off-by: Carolyn Wyborny <carolyn.wyborny at intel.com>
+Tested-by:  Aaron Brown <aaron.f.brown at intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher at intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+---
+ drivers/net/igb/e1000_82575.c |    1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
+index 0f563c8..493e331 100644
+--- a/drivers/net/igb/e1000_82575.c
++++ b/drivers/net/igb/e1000_82575.c
+@@ -1735,6 +1735,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
+ 		ctrl |= E1000_CTRL_RST;
+ 
+ 	wr32(E1000_CTRL, ctrl);
++	wrfl();
+ 
+ 	/* Add delay to insure DEV_RST has time to complete */
+ 	if (global_device_reset)
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ipv6-add-gso-support-on-forwarding-path.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/ipv6-add-gso-support-on-forwarding-path.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ipv6-add-gso-support-on-forwarding-path.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/ipv6-add-gso-support-on-forwarding-path.patch)
@@ -0,0 +1,41 @@
+From: Herbert Xu <herbert at gondor.apana.org.au>
+Date: Thu, 27 May 2010 16:14:30 -0700
+Subject: ipv6: Add GSO support on forwarding path
+
+From: Herbert Xu <herbert at gondor.apana.org.au>
+
+commit 0aa68271510ae2b221d4b60892103837be63afe4 upstream.
+
+Currently we disallow GSO packets on the IPv6 forward path.
+This patch fixes this.
+
+Note that I discovered that our existing GSO MTU checks (e.g.,
+IPv4 forwarding) are buggy in that they skip the check altogether,
+when they really should be checking gso_size + header instead.
+
+I have also been lazy here in that I haven't bothered to segment
+the GSO packet by hand before generating an ICMP message.  Someone
+should add that to be 100% correct.
+
+Reported-by: Ralf Baechle <ralf at linux-mips.org>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Apollon Oikonomopoulos <apoikos at gmail.com>
+Signed-off-by: Faidon Liambotis <paravoid at debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+
+---
+ net/ipv6/ip6_output.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -510,7 +510,7 @@ int ip6_forward(struct sk_buff *skb)
+ 		}
+ 	}
+ 
+-	if (skb->len > dst_mtu(dst)) {
++	if (skb->len > dst_mtu(dst) && !skb_is_gso(skb)) {
+ 		/* Again, force OUTPUT device used as source address */
+ 		skb->dev = dst->dev;
+ 		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ipv6-make-fragment-identifications-less-predictable.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/ipv6-make-fragment-identifications-less-predictable.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/ipv6-make-fragment-identifications-less-predictable.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/ipv6-make-fragment-identifications-less-predictable.patch)
@@ -0,0 +1,184 @@
+From f25dd717c713e1132ebf010ca4893f81281bb65c Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet at gmail.com>
+Date: Mon, 8 Aug 2011 23:44:00 -0700
+Subject: [PATCH] ipv6: make fragment identifications less predictable
+
+[ Backport of upstream commit 87c48fa3b4630905f98268dde838ee43626a060c ]
+
+Fernando Gont reported current IPv6 fragment identification generation
+was not secure, because using a very predictable system-wide generator,
+allowing various attacks.
+
+IPv4 uses inetpeer cache to address this problem and to get good
+performance. We'll use this mechanism when IPv6 inetpeer is stable
+enough in linux-3.1
+
+For the time being, we use jhash on destination address to provide less
+predictable identifications. Also remove a spinlock and use cmpxchg() to
+get better SMP performance.
+
+Reported-by: Fernando Gont <fernando at gont.com.ar>
+Signed-off-by: Eric Dumazet <eric.dumazet at gmail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+[bwh: Backport further to 2.6.32]
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ include/net/ipv6.h      |   12 +-----------
+ include/net/transp_v6.h |    2 ++
+ net/ipv6/af_inet6.c     |    2 ++
+ net/ipv6/ip6_output.c   |   40 +++++++++++++++++++++++++++++++++++-----
+ net/ipv6/udp.c          |    2 +-
+ 5 files changed, 41 insertions(+), 17 deletions(-)
+
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 639bbf0..52d86da 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -449,17 +449,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
+ 	return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
+ }
+ 
+-static __inline__ void ipv6_select_ident(struct frag_hdr *fhdr)
+-{
+-	static u32 ipv6_fragmentation_id = 1;
+-	static DEFINE_SPINLOCK(ip6_id_lock);
+-
+-	spin_lock_bh(&ip6_id_lock);
+-	fhdr->identification = htonl(ipv6_fragmentation_id);
+-	if (++ipv6_fragmentation_id == 0)
+-		ipv6_fragmentation_id = 1;
+-	spin_unlock_bh(&ip6_id_lock);
+-}
++extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+ 
+ /*
+  *	Prototypes exported by ipv6
+diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
+index d65381c..8beefe1 100644
+--- a/include/net/transp_v6.h
++++ b/include/net/transp_v6.h
+@@ -16,6 +16,8 @@ extern struct proto tcpv6_prot;
+ 
+ struct flowi;
+ 
++extern void initialize_hashidentrnd(void);
++
+ /* extention headers */
+ extern int				ipv6_exthdrs_init(void);
+ extern void				ipv6_exthdrs_exit(void);
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index e127a32..835590d 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -1073,6 +1073,8 @@ static int __init inet6_init(void)
+ 		goto out;
+ 	}
+ 
++	initialize_hashidentrnd();
++
+ 	err = proto_register(&tcpv6_prot, 1);
+ 	if (err)
+ 		goto out;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index eca3ef7..43c31f9 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -604,6 +604,35 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ 	return offset;
+ }
+ 
++static u32 hashidentrnd __read_mostly;
++#define FID_HASH_SZ 16
++static u32 ipv6_fragmentation_id[FID_HASH_SZ];
++
++void __init initialize_hashidentrnd(void)
++{
++	get_random_bytes(&hashidentrnd, sizeof(hashidentrnd));
++}
++
++static u32 __ipv6_select_ident(const struct in6_addr *addr)
++{
++	u32 newid, oldid, hash = jhash2((u32 *)addr, 4, hashidentrnd);
++	u32 *pid = &ipv6_fragmentation_id[hash % FID_HASH_SZ];
++
++	do {
++		oldid = *pid;
++		newid = oldid + 1;
++		if (!(hash + newid))
++			newid++;
++	} while (cmpxchg(pid, oldid, newid) != oldid);
++
++	return hash + newid;
++}
++
++void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
++{
++	fhdr->identification = htonl(__ipv6_select_ident(&rt->rt6i_dst.addr));
++}
++
+ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+ {
+ 	struct sk_buff *frag;
+@@ -689,7 +718,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
+ 		skb_reset_network_header(skb);
+ 		memcpy(skb_network_header(skb), tmp_hdr, hlen);
+ 
+-		ipv6_select_ident(fh);
++		ipv6_select_ident(fh, rt);
+ 		fh->nexthdr = nexthdr;
+ 		fh->reserved = 0;
+ 		fh->frag_off = htons(IP6_MF);
+@@ -835,7 +864,7 @@ slow_path:
+ 		fh->nexthdr = nexthdr;
+ 		fh->reserved = 0;
+ 		if (!frag_id) {
+-			ipv6_select_ident(fh);
++			ipv6_select_ident(fh, rt);
+ 			frag_id = fh->identification;
+ 		} else
+ 			fh->identification = frag_id;
+@@ -1039,7 +1068,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ 			int getfrag(void *from, char *to, int offset, int len,
+ 			int odd, struct sk_buff *skb),
+ 			void *from, int length, int hh_len, int fragheaderlen,
+-			int transhdrlen, int mtu,unsigned int flags)
++			int transhdrlen, int mtu,unsigned int flags,
++			struct rt6_info *rt)
+ 
+ {
+ 	struct sk_buff *skb;
+@@ -1084,7 +1114,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
+ 		skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
+ 					     sizeof(struct frag_hdr)) & ~7;
+ 		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+-		ipv6_select_ident(&fhdr);
++		ipv6_select_ident(&fhdr, rt);
+ 		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+ 		__skb_queue_tail(&sk->sk_write_queue, skb);
+ 
+@@ -1233,7 +1263,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
+ 
+ 		err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
+ 					  fragheaderlen, transhdrlen, mtu,
+-					  flags);
++					  flags, rt);
+ 		if (err)
+ 			goto error;
+ 		return 0;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 154dd6b..f5ff5d3 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1167,7 +1167,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features)
+ 	fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ 	fptr->nexthdr = nexthdr;
+ 	fptr->reserved = 0;
+-	ipv6_select_ident(fptr);
++	ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
+ 
+ 	/* Fragment the skb. ipv6 header and the remaining fields of the
+ 	 * fragment header are updated in ipv6_gso_segment()
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-fix-ipv6-gso-type-checks-in-intel-ethernet-drivers.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/net-fix-ipv6-gso-type-checks-in-intel-ethernet-drivers.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-fix-ipv6-gso-type-checks-in-intel-ethernet-drivers.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/net-fix-ipv6-gso-type-checks-in-intel-ethernet-drivers.patch)
@@ -0,0 +1,72 @@
+From: Sridhar Samudrala <sri at us.ibm.com>
+Date: Sat, 23 Jan 2010 02:02:21 -0800
+Subject: net: Fix IPv6 GSO type checks in Intel ethernet drivers
+
+From: Sridhar Samudrala <sri at us.ibm.com>
+
+commit 8e1e8a4779cb23c1d9f51e9223795e07ec54d77a upstream.
+
+Found this problem when testing IPv6 from a KVM guest to a remote
+host via e1000e device on the host.
+The following patch fixes the check for IPv6 GSO packet in Intel
+ethernet drivers to use skb_is_gso_v6(). SKB_GSO_DODGY is also set
+when packets are forwarded from a guest.
+
+Signed-off-by: Sridhar Samudrala <sri at us.ibm.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher at intel.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+Signed-off-by: Faidon Liambotis <paravoid at debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
+
+
+---
+ drivers/net/e1000e/netdev.c    |    2 +-
+ drivers/net/igb/igb_main.c     |    2 +-
+ drivers/net/igbvf/netdev.c     |    2 +-
+ drivers/net/ixgbe/ixgbe_main.c |    2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/e1000e/netdev.c
++++ b/drivers/net/e1000e/netdev.c
+@@ -3807,7 +3807,7 @@ static int e1000_tso(struct e1000_adapte
+ 								 0);
+ 		cmd_length = E1000_TXD_CMD_IP;
+ 		ipcse = skb_transport_offset(skb) - 1;
+-	} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
++	} else if (skb_is_gso_v6(skb)) {
+ 		ipv6_hdr(skb)->payload_len = 0;
+ 		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ 		                                       &ipv6_hdr(skb)->daddr,
+--- a/drivers/net/igb/igb_main.c
++++ b/drivers/net/igb/igb_main.c
+@@ -3032,7 +3032,7 @@ static inline int igb_tso_adv(struct igb
+ 							 iph->daddr, 0,
+ 							 IPPROTO_TCP,
+ 							 0);
+-	} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
++	} else if (skb_is_gso_v6(skb)) {
+ 		ipv6_hdr(skb)->payload_len = 0;
+ 		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ 						       &ipv6_hdr(skb)->daddr,
+--- a/drivers/net/igbvf/netdev.c
++++ b/drivers/net/igbvf/netdev.c
+@@ -1953,7 +1953,7 @@ static int igbvf_tso(struct igbvf_adapte
+ 		                                         iph->daddr, 0,
+ 		                                         IPPROTO_TCP,
+ 		                                         0);
+-	} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
++	} else if (skb_is_gso_v6(skb)) {
+ 		ipv6_hdr(skb)->payload_len = 0;
+ 		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ 		                                       &ipv6_hdr(skb)->daddr,
+--- a/drivers/net/ixgbe/ixgbe_main.c
++++ b/drivers/net/ixgbe/ixgbe_main.c
+@@ -4881,7 +4881,7 @@ static int ixgbe_tso(struct ixgbe_adapte
+ 			                                         IPPROTO_TCP,
+ 			                                         0);
+ 			adapter->hw_tso_ctxt++;
+-		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
++		} else if (skb_is_gso_v6(skb)) {
+ 			ipv6_hdr(skb)->payload_len = 0;
+ 			tcp_hdr(skb)->check =
+ 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/revert-xen-use-IRQF_FORCE_RESUME.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/revert-xen-use-IRQF_FORCE_RESUME.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/revert-xen-use-IRQF_FORCE_RESUME.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/revert-xen-use-IRQF_FORCE_RESUME.patch)
@@ -0,0 +1,53 @@
+commit 7df4b4890d3e8253d007b5e7ae4ed452d5f39cac
+Author: Ian Campbell <ijc at hellion.org.uk>
+Date:   Sat Oct 15 07:03:41 2011 +0100
+
+    Revert "xen: Use IRQF_FORCE_RESUME"
+    
+    This reverts commit 652c98bac315a2253628885f05cfd5f30b553ae5.
+
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c
+index 009ca4e..e4820ce 100644
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -536,7 +536,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
+ 	if (irq < 0)
+ 		return irq;
+ 
+-	irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
++	irqflags |= IRQF_NO_SUSPEND;
+ 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
+ 	if (retval != 0) {
+ 		unbind_from_irq(irq);
+@@ -891,6 +891,7 @@ void xen_poll_irq(int irq)
+ void xen_irq_resume(void)
+ {
+ 	unsigned int cpu, irq, evtchn;
++	struct irq_desc *desc;
+ 
+ 	init_evtchn_cpu_bindings();
+ 
+@@ -909,6 +910,23 @@ void xen_irq_resume(void)
+ 		restore_cpu_virqs(cpu);
+ 		restore_cpu_ipis(cpu);
+ 	}
++
++	/*
++	 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
++	 * are not handled by the IRQ core.
++	 */
++	for_each_irq_desc(irq, desc) {
++		if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
++			continue;
++		if (desc->status & IRQ_DISABLED)
++			continue;
++
++		evtchn = evtchn_from_irq(irq);
++		if (evtchn == -1)
++			continue;
++
++		unmask_evtchn(evtchn);
++	}
+ }
+ 
+ static struct irq_chip xen_dynamic_chip __read_mostly = {

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/sched-work-around-sched_group-cpu_power-0.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/sched-work-around-sched_group-cpu_power-0.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/sched-work-around-sched_group-cpu_power-0.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/sched-work-around-sched_group-cpu_power-0.patch)
@@ -0,0 +1,116 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: sched: Work around sched_group::cpu_power = 0
+
+#636797 and others report a division by zero in the scheduler due
+to sched_group::cpu_power.  Try to work out why this is happening,
+and fix it up to something sane it does.
+
+Thanks to Bjoern Boschman <bjoern.boschman at nfon.net> for part of this.
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index a5387d5..7d10fbc 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -919,6 +919,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
+ 	return to_cpumask(sg->cpumask);
+ }
+ 
++extern unsigned int sched_warn_zero_power(struct sched_group *group);
++
++static inline unsigned int sched_group_power(struct sched_group *group)
++{
++	unsigned int power = ACCESS_ONCE(group->cpu_power);
++
++	return likely(power > 0) ? power : sched_warn_zero_power(group);
++}
++
+ enum sched_domain_level {
+ 	SD_LV_NONE = 0,
+ 	SD_LV_SIBLING,
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -3804,6 +3804,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
+ 	unsigned long weight = cpumask_weight(sched_domain_span(sd));
+ 	unsigned long power = SCHED_LOAD_SCALE;
+ 	struct sched_group *sdg = sd->groups;
++	unsigned long scale_rt;
+ 
+ 	if (sched_feat(ARCH_POWER))
+ 		power *= arch_scale_freq_power(sd, cpu);
+@@ -3821,12 +3822,16 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
+ 		power >>= SCHED_LOAD_SHIFT;
+ 	}
+ 
+-	power *= scale_rt_power(cpu);
++	scale_rt = scale_rt_power(cpu);
++	power *= scale_rt;
+ 	power >>= SCHED_LOAD_SHIFT;
+ 
+ 	if (!power)
+ 		power = 1;
+ 
++	WARN_ONCE((long)power <= 0, "group %p cpu_power = %ld; scale_rt = %ld",
++		  sdg, power, scale_rt);
++
+ 	cpu_rq(cpu)->cpu_power = power;
+ 	sdg->cpu_power = power;
+ }
+@@ -3850,6 +3855,8 @@ static void update_group_power(struct sched_domain *sd, int cpu)
+ 		group = group->next;
+ 	} while (group != child->groups);
+ 
++	WARN_ONCE((long)power <= 0, "group %p cpu_power = %ld", sdg, power);
++
+ 	sdg->cpu_power = power;
+ }
+ 
+@@ -3932,7 +3940,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
+ 	}
+ 
+ 	/* Adjust by relative CPU power of the group */
+-	sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
++	sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) /
++		sched_group_power(group);
+ 
+ 	/*
+ 	 * Consider the group unbalanced when the imbalance is larger
+@@ -8104,7 +8113,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
+ 
+ 		cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
+ 
+-		printk(KERN_CONT " %s", str);
++		printk(KERN_CONT " group %p cpus %s", group, str);
+ 		if (group->cpu_power != SCHED_LOAD_SCALE) {
+ 			printk(KERN_CONT " (cpu_power = %d)",
+ 				group->cpu_power);
+@@ -11190,3 +11199,16 @@ void synchronize_sched_expedited(void)
+ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+ 
+ #endif /* #else #ifndef CONFIG_SMP */
++
++#ifdef CONFIG_SMP
++/* Fix up and warn about group with cpu_power = 0 */
++unsigned int sched_warn_zero_power(struct sched_group *group)
++{
++	static char str[256];
++
++	cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
++	WARN_ONCE(1, "group %p cpus %s cpu_power = 0", group, str);
++
++	return SCHED_LOAD_SCALE;
++}
++#endif
+diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
+index d53c9c7..7119d8d 100644
+--- a/kernel/sched_fair.c
++++ b/kernel/sched_fair.c
+@@ -1354,7 +1354,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
+ 		}
+ 
+ 		/* Adjust by relative CPU power of the group */
+-		avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
++		avg_load = (avg_load * SCHED_LOAD_SCALE) /
++			sched_group_power(group);
+ 
+ 		if (local_group) {
+ 			this_load = avg_load;

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/splice-direct_splice_actor-should-not-use-pos-in-sd.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/splice-direct_splice_actor-should-not-use-pos-in-sd.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/splice-direct_splice_actor-should-not-use-pos-in-sd.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/splice-direct_splice_actor-should-not-use-pos-in-sd.patch)
@@ -0,0 +1,36 @@
+From: Changli Gao <xiaosuo at gmail.com>
+Date: Tue, 29 Jun 2010 13:09:18 +0200
+Subject: [PATCH] splice: direct_splice_actor() should not use pos in sd
+
+commit 2cb4b05e7647891b46b91c07c9a60304803d1688 upstream.
+
+direct_splice_actor() shouldn't use sd->pos, as sd->pos is for file reading,
+file->f_pos should be used instead.
+
+Signed-off-by: Changli Gao <xiaosuo at gmail.com>
+Signed-off-by: Miklos Szeredi <mszeredi at suse.cz>
+----
+ fs/splice.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+Signed-off-by: Jens Axboe <jaxboe at fusionio.com>
+---
+ fs/splice.c |    3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/fs/splice.c b/fs/splice.c
+index 740e6b9..4190049 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1282,7 +1282,8 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
+ {
+ 	struct file *file = sd->u.file;
+ 
+-	return do_splice_from(pipe, file, &sd->pos, sd->total_len, sd->flags);
++	return do_splice_from(pipe, file, &file->f_pos, sd->total_len,
++			      sd->flags);
+ }
+ 
+ /**
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.42.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.42.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.42.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.42.patch)
@@ -0,0 +1,2564 @@
+diff --git a/Makefile b/Makefile
+index 46e401f..fbd73d1 100644
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 499a133..9161338 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -463,7 +463,8 @@ _GLOBAL(disable_kernel_fp)
+  * wait for the flag to change, indicating this kernel is going away but
+  * the slave code for the next one is at addresses 0 to 100.
+  *
+- * This is used by all slaves.
++ * This is used by all slaves, even those that did not find a matching
++ * paca in the secondary startup code.
+  *
+  * Physical (hardware) cpu id should be in r3.
+  */
+@@ -472,10 +473,6 @@ _GLOBAL(kexec_wait)
+ 1:	mflr	r5
+ 	addi	r5,r5,kexec_flag-1b
+ 
+-	li      r4,KEXEC_STATE_REAL_MODE
+-	stb     r4,PACAKEXECSTATE(r13)
+-	SYNC
+-
+ 99:	HMT_LOW
+ #ifdef CONFIG_KEXEC		/* use no memory without kexec */
+ 	lwz	r4,0(r5)
+@@ -500,11 +497,17 @@ kexec_flag:
+  *
+  * get phys id from paca
+  * switch to real mode
++ * mark the paca as no longer used
+  * join other cpus in kexec_wait(phys_id)
+  */
+ _GLOBAL(kexec_smp_wait)
+ 	lhz	r3,PACAHWCPUID(r13)
+ 	bl	real_mode
++
++	li	r4,KEXEC_STATE_REAL_MODE
++	stb	r4,PACAKEXECSTATE(r13)
++	SYNC
++
+ 	b	.kexec_wait
+ 
+ /*
+diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
+index 8077409..93636ca 100644
+--- a/arch/powerpc/oprofile/op_model_power4.c
++++ b/arch/powerpc/oprofile/op_model_power4.c
+@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
+ 	return is_kernel;
+ }
+ 
++static bool pmc_overflow(unsigned long val)
++{
++	if ((int)val < 0)
++		return true;
++
++	/*
++	 * Events on POWER7 can roll back if a speculative event doesn't
++	 * eventually complete. Unfortunately in some rare cases they will
++	 * raise a performance monitor exception. We need to catch this to
++	 * ensure we reset the PMC. In all cases the PMC will be 256 or less
++	 * cycles from overflow.
++	 *
++	 * We only do this if the first pass fails to find any overflowing
++	 * PMCs because a user might set a period of less than 256 and we
++	 * don't want to mistakenly reset them.
++	 */
++	if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
++		return true;
++
++	return false;
++}
++
+ static void power4_handle_interrupt(struct pt_regs *regs,
+ 				    struct op_counter_config *ctr)
+ {
+@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
+ 
+ 	for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
+ 		val = classic_ctr_read(i);
+-		if (val < 0) {
++		if (pmc_overflow(val)) {
+ 			if (oprofile_running && ctr[i].enabled) {
+ 				oprofile_add_ext_sample(pc, regs, i, is_kernel);
+ 				classic_ctr_write(i, reset_value[i]);
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index d2c6c93..61c5874 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -42,7 +42,7 @@
+  * Returns 0 if the range is valid, nonzero otherwise.
+  *
+  * This is equivalent to the following test:
+- * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64)
++ * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
+  *
+  * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
+  */
+diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
+index 400be99..fc5d470 100644
+--- a/arch/x86/kernel/amd_iommu_init.c
++++ b/arch/x86/kernel/amd_iommu_init.c
+@@ -640,8 +640,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
+ {
+ 	u8 *p = (u8 *)h;
+ 	u8 *end = p, flags = 0;
+-	u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
+-	u32 ext_flags = 0;
++	u16 devid = 0, devid_start = 0, devid_to = 0;
++	u32 dev_i, ext_flags = 0;
+ 	bool alias = false;
+ 	struct ivhd_entry *e;
+ 
+@@ -796,7 +796,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
+ /* Initializes the device->iommu mapping for the driver */
+ static int __init init_iommu_devices(struct amd_iommu *iommu)
+ {
+-	u16 i;
++	u32 i;
+ 
+ 	for (i = iommu->first_device; i <= iommu->last_device; ++i)
+ 		set_iommu_for_device(iommu, i);
+@@ -1068,7 +1068,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
+  */
+ static void init_device_table(void)
+ {
+-	u16 devid;
++	u32 devid;
+ 
+ 	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
+ 		set_dev_entry_bit(devid, DEV_ENTRY_VALID);
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 396c693..6e082dc 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -567,8 +567,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ 	}
+ #endif
+ 
+-	/* As a rule processors have APIC timer running in deep C states */
+-	if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
++	/*
++	 * Family 0x12 and above processors have APIC timer
++	 * running in deep C states.
++	 */
++	if (c->x86 > 0x11)
+ 		set_cpu_cap(c, X86_FEATURE_ARAT);
+ 
+ 	/*
+@@ -584,10 +587,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+ 		 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
+ 		 */
+ 		u64 mask;
++		int err;
+ 
+-		rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
+-		mask |= (1 << 10);
+-		wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
++		err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
++		if (err == 0) {
++			mask |= (1 << 10);
++			checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
++		}
+ 	}
+ }
+ 
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 4cf7956..c40c432 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -298,7 +298,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+ {
+ 	set_user_gs(regs, 0);
+ 	regs->fs		= 0;
+-	set_fs(USER_DS);
+ 	regs->ds		= __USER_DS;
+ 	regs->es		= __USER_DS;
+ 	regs->ss		= __USER_DS;
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 868fdb4..39493bc 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -356,7 +356,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+ 	regs->cs		= __USER_CS;
+ 	regs->ss		= __USER_DS;
+ 	regs->flags		= 0x200;
+-	set_fs(USER_DS);
+ 	/*
+ 	 * Free the old FP and other extended state
+ 	 */
+diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
+index 6ba0f7b..af8debd 100644
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -72,7 +72,7 @@ ENTRY(copy_to_user)
+ 	addq %rdx,%rcx
+ 	jc bad_to_user
+ 	cmpq TI_addr_limit(%rax),%rcx
+-	jae bad_to_user
++	ja bad_to_user
+ 	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ 	CFI_ENDPROC
+ ENDPROC(copy_to_user)
+@@ -85,7 +85,7 @@ ENTRY(copy_from_user)
+ 	addq %rdx,%rcx
+ 	jc bad_from_user
+ 	cmpq TI_addr_limit(%rax),%rcx
+-	jae bad_from_user
++	ja bad_from_user
+ 	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
+ 	CFI_ENDPROC
+ ENDPROC(copy_from_user)
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 6ec047d..356a799 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1141,7 +1141,7 @@ static void drop_other_mm_ref(void *info)
+ 
+ 	active_mm = percpu_read(cpu_tlbstate.active_mm);
+ 
+-	if (active_mm == mm)
++	if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
+ 		leave_mm(smp_processor_id());
+ 
+ 	/* If this cpu still has a stale cr3 reference, then make sure
+diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
+index 8bff7e7..1b2b73f 100644
+--- a/arch/x86/xen/multicalls.c
++++ b/arch/x86/xen/multicalls.c
+@@ -189,10 +189,10 @@ struct multicall_space __xen_mc_entry(size_t args)
+ 	unsigned argidx = roundup(b->argidx, sizeof(u64));
+ 
+ 	BUG_ON(preemptible());
+-	BUG_ON(b->argidx > MC_ARGS);
++	BUG_ON(b->argidx >= MC_ARGS);
+ 
+ 	if (b->mcidx == MC_BATCH ||
+-	    (argidx + args) > MC_ARGS) {
++	    (argidx + args) >= MC_ARGS) {
+ 		mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
+ 		xen_mc_flush();
+ 		argidx = roundup(b->argidx, sizeof(u64));
+@@ -206,7 +206,7 @@ struct multicall_space __xen_mc_entry(size_t args)
+ 	ret.args = &b->args[argidx];
+ 	b->argidx = argidx + args;
+ 
+-	BUG_ON(b->argidx > MC_ARGS);
++	BUG_ON(b->argidx >= MC_ARGS);
+ 	return ret;
+ }
+ 
+@@ -216,7 +216,7 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
+ 	struct multicall_space ret = { NULL, NULL };
+ 
+ 	BUG_ON(preemptible());
+-	BUG_ON(b->argidx > MC_ARGS);
++	BUG_ON(b->argidx >= MC_ARGS);
+ 
+ 	if (b->mcidx == 0)
+ 		return ret;
+@@ -224,14 +224,14 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
+ 	if (b->entries[b->mcidx - 1].op != op)
+ 		return ret;
+ 
+-	if ((b->argidx + size) > MC_ARGS)
++	if ((b->argidx + size) >= MC_ARGS)
+ 		return ret;
+ 
+ 	ret.mc = &b->entries[b->mcidx - 1];
+ 	ret.args = &b->args[b->argidx];
+ 	b->argidx += size;
+ 
+-	BUG_ON(b->argidx > MC_ARGS);
++	BUG_ON(b->argidx >= MC_ARGS);
+ 	return ret;
+ }
+ 
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 71da511..cffd737 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -310,6 +310,7 @@ void blk_unplug_timeout(unsigned long data)
+ 	trace_block_unplug_timer(q);
+ 	kblockd_schedule_work(q, &q->unplug_work);
+ }
++EXPORT_SYMBOL(blk_put_queue);
+ 
+ void blk_unplug(struct request_queue *q)
+ {
+@@ -612,6 +613,7 @@ int blk_get_queue(struct request_queue *q)
+ 
+ 	return 1;
+ }
++EXPORT_SYMBOL(blk_get_queue);
+ 
+ static inline void blk_free_request(struct request_queue *q, struct request *rq)
+ {
+diff --git a/block/blk.h b/block/blk.h
+index 5ee3d7e..1414836 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -62,7 +62,8 @@ static inline struct request *__elv_next_request(struct request_queue *q)
+ 				return rq;
+ 		}
+ 
+-		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
++		if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) ||
++		    !q->elevator->ops->elevator_dispatch_fn(q, 0))
+ 			return NULL;
+ 	}
+ }
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 0b09703..d71d2c0 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1026,12 +1026,6 @@ static void acpi_device_set_id(struct acpi_device *device)
+ 		if (ACPI_IS_ROOT_DEVICE(device)) {
+ 			acpi_add_id(device, ACPI_SYSTEM_HID);
+ 			break;
+-		} else if (ACPI_IS_ROOT_DEVICE(device->parent)) {
+-			/* \_SB_, the only root-level namespace device */
+-			acpi_add_id(device, ACPI_BUS_HID);
+-			strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
+-			strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
+-			break;
+ 		}
+ 
+ 		status = acpi_get_object_info(device->handle, &info);
+@@ -1064,6 +1058,12 @@ static void acpi_device_set_id(struct acpi_device *device)
+ 			acpi_add_id(device, ACPI_BAY_HID);
+ 		else if (ACPI_SUCCESS(acpi_dock_match(device)))
+ 			acpi_add_id(device, ACPI_DOCK_HID);
++		else if (!acpi_device_hid(device) &&
++			 ACPI_IS_ROOT_DEVICE(device->parent)) {
++			acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
++			strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
++			strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
++		}
+ 
+ 		break;
+ 	case ACPI_BUS_TYPE_POWER:
+diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
+index f0bad9b..ccd2694 100644
+--- a/drivers/ata/pata_cmd64x.c
++++ b/drivers/ata/pata_cmd64x.c
+@@ -2,6 +2,7 @@
+  * pata_cmd64x.c 	- CMD64x PATA for new ATA layer
+  *			  (C) 2005 Red Hat Inc
+  *			  Alan Cox <alan at lxorguk.ukuu.org.uk>
++ *			  (C) 2009-2010 Bartlomiej Zolnierkiewicz
+  *
+  * Based upon
+  * linux/drivers/ide/pci/cmd64x.c		Version 1.30	Sept 10, 2002
+@@ -39,11 +40,10 @@
+ 
+ enum {
+ 	CFR 		= 0x50,
+-		CFR_INTR_CH0  = 0x02,
+-	CNTRL 		= 0x51,
+-		CNTRL_DIS_RA0 = 0x40,
+-		CNTRL_DIS_RA1 = 0x80,
+-		CNTRL_ENA_2ND = 0x08,
++		CFR_INTR_CH0  = 0x04,
++	CNTRL		= 0x51,
++		CNTRL_CH0     = 0x04,
++		CNTRL_CH1     = 0x08,
+ 	CMDTIM 		= 0x52,
+ 	ARTTIM0 	= 0x53,
+ 	DRWTIM0 	= 0x54,
+@@ -53,9 +53,6 @@ enum {
+ 		ARTTIM23_DIS_RA2  = 0x04,
+ 		ARTTIM23_DIS_RA3  = 0x08,
+ 		ARTTIM23_INTR_CH1 = 0x10,
+-	ARTTIM2 	= 0x57,
+-	ARTTIM3 	= 0x57,
+-	DRWTIM23	= 0x58,
+ 	DRWTIM2 	= 0x58,
+ 	BRST 		= 0x59,
+ 	DRWTIM3 	= 0x5b,
+@@ -63,14 +60,11 @@ enum {
+ 	MRDMODE		= 0x71,
+ 		MRDMODE_INTR_CH0 = 0x04,
+ 		MRDMODE_INTR_CH1 = 0x08,
+-		MRDMODE_BLK_CH0  = 0x10,
+-		MRDMODE_BLK_CH1	 = 0x20,
+ 	BMIDESR0	= 0x72,
+ 	UDIDETCR0	= 0x73,
+ 	DTPR0		= 0x74,
+ 	BMIDECR1	= 0x78,
+ 	BMIDECSR	= 0x79,
+-	BMIDESR1	= 0x7A,
+ 	UDIDETCR1	= 0x7B,
+ 	DTPR1		= 0x7C
+ };
+@@ -147,7 +141,9 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
+ 	/* Now convert the clocks into values we can actually stuff into
+ 	   the chip */
+ 
+-	if (t.recover > 1)
++	if (t.recover == 16)
++		t.recover = 0;
++	else if (t.recover > 1)
+ 		t.recover--;
+ 	else
+ 		t.recover = 15;
+@@ -245,7 +241,7 @@ static void cmd648_bmdma_stop(struct ata_queued_cmd *qc)
+ 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ 	u8 dma_intr;
+ 	int dma_mask = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0;
+-	int dma_reg = ap->port_no ? ARTTIM2 : CFR;
++	int dma_reg = ap->port_no ? ARTTIM23 : CFR;
+ 
+ 	ata_bmdma_stop(qc);
+ 
+@@ -294,8 +290,6 @@ static struct ata_port_operations cmd648_port_ops = {
+ 
+ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+-	u32 class_rev;
+-
+ 	static const struct ata_port_info cmd_info[6] = {
+ 		{	/* CMD 643 - no UDMA */
+ 			.flags = ATA_FLAG_SLAVE_POSS,
+@@ -337,27 +331,41 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 			.port_ops = &cmd648_port_ops
+ 		}
+ 	};
+-	const struct ata_port_info *ppi[] = { &cmd_info[id->driver_data], NULL };
+-	u8 mrdmode;
++	const struct ata_port_info *ppi[] = {
++		&cmd_info[id->driver_data],
++		&cmd_info[id->driver_data],
++		NULL
++	};
++	u8 mrdmode, reg;
+ 	int rc;
++	struct pci_dev *bridge = pdev->bus->self;
++	/* mobility split bridges don't report enabled ports correctly */
++	int port_ok = !(bridge && bridge->vendor ==
++			PCI_VENDOR_ID_MOBILITY_ELECTRONICS);
++	/* all (with exceptions below) apart from 643 have CNTRL_CH0 bit */
++	int cntrl_ch0_ok = (id->driver_data != 0);
+ 
+ 	rc = pcim_enable_device(pdev);
+ 	if (rc)
+ 		return rc;
+ 
+-	pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
+-	class_rev &= 0xFF;
+-
+ 	if (id->driver_data == 0)	/* 643 */
+ 		ata_pci_bmdma_clear_simplex(pdev);
+ 
+ 	if (pdev->device == PCI_DEVICE_ID_CMD_646) {
+ 		/* Does UDMA work ? */
+-		if (class_rev > 4)
++		if (pdev->revision > 4) {
+ 			ppi[0] = &cmd_info[2];
++			ppi[1] = &cmd_info[2];
++		}
+ 		/* Early rev with other problems ? */
+-		else if (class_rev == 1)
++		else if (pdev->revision == 1) {
+ 			ppi[0] = &cmd_info[3];
++			ppi[1] = &cmd_info[3];
++		}
++		/* revs 1,2 have no CNTRL_CH0 */
++		if (pdev->revision < 3)
++			cntrl_ch0_ok = 0;
+ 	}
+ 
+ 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+@@ -366,6 +374,20 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	mrdmode |= 0x02;	/* Memory read line enable */
+ 	pci_write_config_byte(pdev, MRDMODE, mrdmode);
+ 
++	/* check for enabled ports */
++	pci_read_config_byte(pdev, CNTRL, &reg);
++	if (!port_ok)
++		dev_printk(KERN_NOTICE, &pdev->dev, "Mobility Bridge detected, ignoring CNTRL port enable/disable\n");
++	if (port_ok && cntrl_ch0_ok && !(reg & CNTRL_CH0)) {
++		dev_printk(KERN_NOTICE, &pdev->dev, "Primary port is disabled\n");
++		ppi[0] = &ata_dummy_port_info;
++
++	}
++	if (port_ok && !(reg & CNTRL_CH1)) {
++		dev_printk(KERN_NOTICE, &pdev->dev, "Secondary port is disabled\n");
++		ppi[1] = &ata_dummy_port_info;
++	}
++
+ 	/* Force PIO 0 here.. */
+ 
+ 	/* PPC specific fixup copied from old driver */
+diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
+index d7f2da1..b9d8836 100644
+--- a/drivers/ata/pata_hpt366.c
++++ b/drivers/ata/pata_hpt366.c
+@@ -344,7 +344,6 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 	const struct ata_port_info *ppi[] = { &info_hpt366, NULL };
+ 
+ 	void *hpriv = NULL;
+-	u32 class_rev;
+ 	u32 reg1;
+ 	int rc;
+ 
+@@ -352,13 +351,10 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 	if (rc)
+ 		return rc;
+ 
+-	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+-	class_rev &= 0xFF;
+-
+ 	/* May be a later chip in disguise. Check */
+ 	/* Newer chips are not in the HPT36x driver. Ignore them */
+-	if (class_rev > 2)
+-			return -ENODEV;
++	if (dev->revision > 2)
++		return -ENODEV;
+ 
+ 	hpt36x_init_chipset(dev);
+ 
+diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
+index ec07c53..5af7f19 100644
+--- a/drivers/ata/pata_hpt37x.c
++++ b/drivers/ata/pata_hpt37x.c
+@@ -789,9 +789,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 	static const int MHz[4] = { 33, 40, 50, 66 };
+ 	void *private_data = NULL;
+ 	const struct ata_port_info *ppi[] = { NULL, NULL };
+-
++	u8 rev = dev->revision;
+ 	u8 irqmask;
+-	u32 class_rev;
+ 	u8 mcr1;
+ 	u32 freq;
+ 	int prefer_dpll = 1;
+@@ -806,19 +805,16 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 	if (rc)
+ 		return rc;
+ 
+-	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+-	class_rev &= 0xFF;
+-
+ 	if (dev->device == PCI_DEVICE_ID_TTI_HPT366) {
+ 		/* May be a later chip in disguise. Check */
+ 		/* Older chips are in the HPT366 driver. Ignore them */
+-		if (class_rev < 3)
++		if (rev < 3)
+ 			return -ENODEV;
+ 		/* N series chips have their own driver. Ignore */
+-		if (class_rev == 6)
++		if (rev == 6)
+ 			return -ENODEV;
+ 
+-		switch(class_rev) {
++		switch(rev) {
+ 			case 3:
+ 				ppi[0] = &info_hpt370;
+ 				chip_table = &hpt370;
+@@ -834,28 +830,29 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 				chip_table = &hpt372;
+ 				break;
+ 			default:
+-				printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype please report (%d).\n", class_rev);
++				printk(KERN_ERR "pata_hpt37x: Unknown HPT366 "
++				       "subtype, please report (%d).\n", rev);
+ 				return -ENODEV;
+ 		}
+ 	} else {
+ 		switch(dev->device) {
+ 			case PCI_DEVICE_ID_TTI_HPT372:
+ 				/* 372N if rev >= 2*/
+-				if (class_rev >= 2)
++				if (rev >= 2)
+ 					return -ENODEV;
+ 				ppi[0] = &info_hpt372;
+ 				chip_table = &hpt372a;
+ 				break;
+ 			case PCI_DEVICE_ID_TTI_HPT302:
+ 				/* 302N if rev > 1 */
+-				if (class_rev > 1)
++				if (rev > 1)
+ 					return -ENODEV;
+ 				ppi[0] = &info_hpt372;
+ 				/* Check this */
+ 				chip_table = &hpt302;
+ 				break;
+ 			case PCI_DEVICE_ID_TTI_HPT371:
+-				if (class_rev > 1)
++				if (rev > 1)
+ 					return -ENODEV;
+ 				ppi[0] = &info_hpt372;
+ 				chip_table = &hpt371;
+diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
+index d9f2913..100f227 100644
+--- a/drivers/ata/pata_hpt3x2n.c
++++ b/drivers/ata/pata_hpt3x2n.c
+@@ -452,10 +452,8 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 		.port_ops = &hpt3x2n_port_ops
+ 	};
+ 	const struct ata_port_info *ppi[] = { &info, NULL };
+-
++	u8 rev = dev->revision;
+ 	u8 irqmask;
+-	u32 class_rev;
+-
+ 	unsigned int pci_mhz;
+ 	unsigned int f_low, f_high;
+ 	int adjust;
+@@ -467,26 +465,23 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 	if (rc)
+ 		return rc;
+ 
+-	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
+-	class_rev &= 0xFF;
+-
+ 	switch(dev->device) {
+ 		case PCI_DEVICE_ID_TTI_HPT366:
+-			if (class_rev < 6)
++			if (rev < 6)
+ 				return -ENODEV;
+ 			break;
+ 		case PCI_DEVICE_ID_TTI_HPT371:
+-			if (class_rev < 2)
++			if (rev < 2)
+ 				return -ENODEV;
+ 			/* 371N if rev > 1 */
+ 			break;
+ 		case PCI_DEVICE_ID_TTI_HPT372:
+ 			/* 372N if rev >= 2*/
+-			if (class_rev < 2)
++			if (rev < 2)
+ 				return -ENODEV;
+ 			break;
+ 		case PCI_DEVICE_ID_TTI_HPT302:
+-			if (class_rev < 2)
++			if (rev < 2)
+ 				return -ENODEV;
+ 			break;
+ 		case PCI_DEVICE_ID_TTI_HPT372N:
+diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
+index 4cb649d..a2ace48 100644
+--- a/drivers/ata/pata_sil680.c
++++ b/drivers/ata/pata_sil680.c
+@@ -212,13 +212,11 @@ static struct ata_port_operations sil680_port_ops = {
+ 
+ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
+ {
+-	u32 class_rev	= 0;
+ 	u8 tmpbyte	= 0;
+ 
+-        pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
+-        class_rev &= 0xff;
+         /* FIXME: double check */
+-	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, (class_rev) ? 1 : 255);
++	pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
++			      pdev->revision ? 1 : 255);
+ 
+ 	pci_write_config_byte(pdev, 0x80, 0x00);
+ 	pci_write_config_byte(pdev, 0x84, 0x00);
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index 4f68843..2ef55c4 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -498,7 +498,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
+ 	struct kobject *kobj;
+ 
+ 	mutex_lock(&brd_devices_mutex);
+-	brd = brd_init_one(dev & MINORMASK);
++	brd = brd_init_one(MINOR(dev) >> part_shift);
+ 	kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
+ 	mutex_unlock(&brd_devices_mutex);
+ 
+@@ -531,15 +531,18 @@ static int __init brd_init(void)
+ 	if (max_part > 0)
+ 		part_shift = fls(max_part);
+ 
++	if ((1UL << part_shift) > DISK_MAX_PARTS)
++		return -EINVAL;
++
+ 	if (rd_nr > 1UL << (MINORBITS - part_shift))
+ 		return -EINVAL;
+ 
+ 	if (rd_nr) {
+ 		nr = rd_nr;
+-		range = rd_nr;
++		range = rd_nr << part_shift;
+ 	} else {
+ 		nr = CONFIG_BLK_DEV_RAM_COUNT;
+-		range = 1UL << (MINORBITS - part_shift);
++		range = 1UL << MINORBITS;
+ 	}
+ 
+ 	if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
+@@ -578,7 +581,7 @@ static void __exit brd_exit(void)
+ 	unsigned long range;
+ 	struct brd_device *brd, *next;
+ 
+-	range = rd_nr ? rd_nr :  1UL << (MINORBITS - part_shift);
++	range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
+ 
+ 	list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
+ 		brd_del_one(brd);
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 1c21a3f..8ec2d70 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1572,7 +1572,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
+ 	struct kobject *kobj;
+ 
+ 	mutex_lock(&loop_devices_mutex);
+-	lo = loop_init_one(dev & MINORMASK);
++	lo = loop_init_one(MINOR(dev) >> part_shift);
+ 	kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM);
+ 	mutex_unlock(&loop_devices_mutex);
+ 
+@@ -1605,15 +1605,18 @@ static int __init loop_init(void)
+ 	if (max_part > 0)
+ 		part_shift = fls(max_part);
+ 
++	if ((1UL << part_shift) > DISK_MAX_PARTS)
++		return -EINVAL;
++
+ 	if (max_loop > 1UL << (MINORBITS - part_shift))
+ 		return -EINVAL;
+ 
+ 	if (max_loop) {
+ 		nr = max_loop;
+-		range = max_loop;
++		range = max_loop << part_shift;
+ 	} else {
+ 		nr = 8;
+-		range = 1UL << (MINORBITS - part_shift);
++		range = 1UL << MINORBITS;
+ 	}
+ 
+ 	if (register_blkdev(LOOP_MAJOR, "loop"))
+@@ -1652,7 +1655,7 @@ static void __exit loop_exit(void)
+ 	unsigned long range;
+ 	struct loop_device *lo, *next;
+ 
+-	range = max_loop ? max_loop :  1UL << (MINORBITS - part_shift);
++	range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
+ 
+ 	list_for_each_entry_safe(lo, next, &loop_devices, lo_list)
+ 		loop_del_one(lo);
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index cc923a5..26ada47 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -754,6 +754,12 @@ static int __init nbd_init(void)
+ 	if (max_part > 0)
+ 		part_shift = fls(max_part);
+ 
++	if ((1UL << part_shift) > DISK_MAX_PARTS)
++		return -EINVAL;
++
++	if (nbds_max > 1UL << (MINORBITS - part_shift))
++		return -EINVAL;
++
+ 	for (i = 0; i < nbds_max; i++) {
+ 		struct gendisk *disk = alloc_disk(1 << part_shift);
+ 		if (!disk)
+diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
+index 4365717..1e116ac 100644
+--- a/drivers/char/i8k.c
++++ b/drivers/char/i8k.c
+@@ -138,8 +138,8 @@ static int i8k_smm(struct smm_regs *regs)
+ 		"movl %%edi,20(%%rax)\n\t"
+ 		"popq %%rdx\n\t"
+ 		"movl %%edx,0(%%rax)\n\t"
+-		"lahf\n\t"
+-		"shrl $8,%%eax\n\t"
++		"pushfq\n\t"
++		"popq %%rax\n\t"
+ 		"andl $1,%%eax\n"
+ 		:"=a"(rc)
+ 		:    "a"(regs)
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index c18e65e..c7ae026 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1183,12 +1183,28 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
+ 
+ 	unlock_policy_rwsem_write(cpu);
+ 
++	cpufreq_debug_enable_ratelimit();
++
++#ifdef CONFIG_HOTPLUG_CPU
++	/* when the CPU which is the parent of the kobj is hotplugged
++	 * offline, check for siblings, and create cpufreq sysfs interface
++	 * and symlinks
++	 */
++	if (unlikely(cpumask_weight(data->cpus) > 1)) {
++		/* first sibling now owns the new sysfs dir */
++		cpumask_clear_cpu(cpu, data->cpus);
++		cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
++
++		/* finally remove our own symlink */
++		lock_policy_rwsem_write(cpu);
++		__cpufreq_remove_dev(sys_dev);
++	}
++#endif
++
+ 	free_cpumask_var(data->related_cpus);
+ 	free_cpumask_var(data->cpus);
+ 	kfree(data);
+-	per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ 
+-	cpufreq_debug_enable_ratelimit();
+ 	return 0;
+ }
+ 
+diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
+index 5a62d67..6bbc164 100644
+--- a/drivers/cpufreq/cpufreq_stats.c
++++ b/drivers/cpufreq/cpufreq_stats.c
+@@ -164,17 +164,27 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
+ 	return -1;
+ }
+ 
++/* should be called late in the CPU removal sequence so that the stats
++ * memory is still available in case someone tries to use it.
++ */
+ static void cpufreq_stats_free_table(unsigned int cpu)
+ {
+ 	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+-	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+-	if (policy && policy->cpu == cpu)
+-		sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ 	if (stat) {
+ 		kfree(stat->time_in_state);
+ 		kfree(stat);
+ 	}
+ 	per_cpu(cpufreq_stats_table, cpu) = NULL;
++}
++
++/* must be called early in the CPU removal sequence (before
++ * cpufreq_remove_dev) so that policy is still valid.
++ */
++static void cpufreq_stats_free_sysfs(unsigned int cpu)
++{
++	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
++	if (policy && policy->cpu == cpu)
++		sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ 	if (policy)
+ 		cpufreq_cpu_put(policy);
+ }
+@@ -315,6 +325,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ 	case CPU_ONLINE_FROZEN:
+ 		cpufreq_update_policy(cpu);
+ 		break;
++	case CPU_DOWN_PREPARE:
++		cpufreq_stats_free_sysfs(cpu);
++		break;
+ 	case CPU_DEAD:
+ 	case CPU_DEAD_FROZEN:
+ 		cpufreq_stats_free_table(cpu);
+@@ -323,9 +336,11 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
+ 	return NOTIFY_OK;
+ }
+ 
++/* priority=1 so this will get called before cpufreq_remove_dev */
+ static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
+ {
+ 	.notifier_call = cpufreq_stat_cpu_callback,
++	.priority = 1,
+ };
+ 
+ static struct notifier_block notifier_policy_block = {
+@@ -372,6 +387,7 @@ static void __exit cpufreq_stats_exit(void)
+ 	unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
+ 	for_each_online_cpu(cpu) {
+ 		cpufreq_stats_free_table(cpu);
++		cpufreq_stats_free_sysfs(cpu);
+ 	}
+ }
+ 
+diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
+index f8e57c6..0537437 100644
+--- a/drivers/cpuidle/governors/menu.c
++++ b/drivers/cpuidle/governors/menu.c
+@@ -185,6 +185,7 @@ static int menu_select(struct cpuidle_device *dev)
+ 	int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
+ 	int i;
+ 	int multiplier;
++	struct timespec t;
+ 
+ 	if (data->needs_update) {
+ 		menu_update(dev);
+@@ -199,8 +200,9 @@ static int menu_select(struct cpuidle_device *dev)
+ 		return 0;
+ 
+ 	/* determine the expected residency time, round up */
++	t = ktime_to_timespec(tick_nohz_get_sleep_length());
+ 	data->expected_us =
+-	    DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000);
++		t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
+ 
+ 
+ 	data->bucket = which_bucket(data->expected_us);
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 44626bc..96b39fc 100644
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 41bb76f..3db54e9 100644
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 57f32f0..03345bb 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -348,6 +348,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
+ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+ 				  sector_t start, sector_t len, void *data)
+ {
++	struct request_queue *q;
+ 	struct queue_limits *limits = data;
+ 	struct block_device *bdev = dev->bdev;
+ 	sector_t dev_size =
+@@ -356,6 +357,22 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
+ 		limits->logical_block_size >> SECTOR_SHIFT;
+ 	char b[BDEVNAME_SIZE];
+ 
++	/*
++	 * Some devices exist without request functions,
++	 * such as loop devices not yet bound to backing files.
++	 * Forbid the use of such devices.
++	 */
++	q = bdev_get_queue(bdev);
++	if (!q || !q->make_request_fn) {
++		DMWARN("%s: %s is not yet initialised: "
++		       "start=%llu, len=%llu, dev_size=%llu",
++		       dm_device_name(ti->table->md), bdevname(bdev, b),
++		       (unsigned long long)start,
++		       (unsigned long long)len,
++		       (unsigned long long)dev_size);
++		return 1;
++	}
++
+ 	if (!dev_size)
+ 		return 0;
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 697a2c7..7c5129f 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -2262,7 +2262,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
+ 		if (rdev->raid_disk == -1)
+ 			return -EEXIST;
+ 		/* personality does all needed checks */
+-		if (rdev->mddev->pers->hot_add_disk == NULL)
++		if (rdev->mddev->pers->hot_remove_disk == NULL)
+ 			return -EINVAL;
+ 		err = rdev->mddev->pers->
+ 			hot_remove_disk(rdev->mddev, rdev->raid_disk);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 2394973..cf59d64 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -127,7 +127,7 @@ static inline int raid5_dec_bi_hw_segments(struct bio *bio)
+ 
+ static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
+ {
+-	bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
++	bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
+ }
+ 
+ /* Find first data disk in a raid6 stripe */
+@@ -446,7 +446,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
+ 		bi = &sh->dev[i].req;
+ 
+ 		bi->bi_rw = rw;
+-		if (rw == WRITE)
++		if (rw & WRITE)
+ 			bi->bi_end_io = raid5_end_write_request;
+ 		else
+ 			bi->bi_end_io = raid5_end_read_request;
+@@ -480,13 +480,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
+ 			bi->bi_io_vec[0].bv_offset = 0;
+ 			bi->bi_size = STRIPE_SIZE;
+ 			bi->bi_next = NULL;
+-			if (rw == WRITE &&
++			if ((rw & WRITE) &&
+ 			    test_bit(R5_ReWrite, &sh->dev[i].flags))
+ 				atomic_add(STRIPE_SECTORS,
+ 					&rdev->corrected_errors);
+ 			generic_make_request(bi);
+ 		} else {
+-			if (rw == WRITE)
++			if (rw & WRITE)
+ 				set_bit(STRIPE_DEGRADED, &sh->state);
+ 			pr_debug("skip op %ld on disc %d for sector %llu\n",
+ 				bi->bi_rw, i, (unsigned long long)sh->sector);
+diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
+index db6de74..b140257 100644
+--- a/drivers/mtd/mtdconcat.c
++++ b/drivers/mtd/mtdconcat.c
+@@ -306,7 +306,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+ 	if (!(mtd->flags & MTD_WRITEABLE))
+ 		return -EROFS;
+ 
+-	ops->retlen = 0;
++	ops->retlen = ops->oobretlen = 0;
+ 
+ 	for (i = 0; i < concat->num_subdev; i++) {
+ 		struct mtd_info *subdev = concat->subdev[i];
+@@ -321,7 +321,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+ 			devops.len = subdev->size - to;
+ 
+ 		err = subdev->write_oob(subdev, to, &devops);
+-		ops->retlen += devops.retlen;
++		ops->retlen += devops.oobretlen;
+ 		if (err)
+ 			return err;
+ 
+diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
+index dbceac7..1f423f2 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
+@@ -47,7 +47,7 @@
+ #include "iwl-6000-hw.h"
+ 
+ /* Highest firmware API version supported */
+-#define IWL5000_UCODE_API_MAX 5
++#define IWL5000_UCODE_API_MAX 2
+ #define IWL5150_UCODE_API_MAX 2
+ 
+ /* Lowest firmware API version supported */
+diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
+index 54c3a9d..cb74ac6 100644
+--- a/drivers/net/wireless/p54/p54usb.c
++++ b/drivers/net/wireless/p54/p54usb.c
+@@ -80,6 +80,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
+ 	{USB_DEVICE(0x06b9, 0x0121)},	/* Thomson SpeedTouch 121g */
+ 	{USB_DEVICE(0x0707, 0xee13)},   /* SMC 2862W-G version 2 */
+ 	{USB_DEVICE(0x083a, 0x4521)},   /* Siemens Gigaset USB Adapter 54 version 2 */
++	{USB_DEVICE(0x083a, 0xc501)},	/* Zoom Wireless-G 4410 */
+ 	{USB_DEVICE(0x083a, 0xf503)},	/* Accton FD7050E ver 1010ec  */
+ 	{USB_DEVICE(0x0846, 0x4240)},	/* Netgear WG111 (v2) */
+ 	{USB_DEVICE(0x0915, 0x2000)},	/* Cohiba Proto board */
+diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
+index cc8ec3a..1fd6c5f 100644
+--- a/drivers/pci/hotplug/pcihp_slot.c
++++ b/drivers/pci/hotplug/pcihp_slot.c
+@@ -158,6 +158,47 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+ 	 */
+ }
+ 
++/* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */
++static int pci_set_payload(struct pci_dev *dev)
++{
++       int pos, ppos;
++       u16 pctl, psz;
++       u16 dctl, dsz, dcap, dmax;
++       struct pci_dev *parent;
++
++       parent = dev->bus->self;
++       pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
++       if (!pos)
++               return 0;
++
++       /* Read Device MaxPayload capability and setting */
++       pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl);
++       pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap);
++       dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
++       dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD);
++
++       /* Read Parent MaxPayload setting */
++       ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
++       if (!ppos)
++               return 0;
++       pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
++       psz = (pctl &  PCI_EXP_DEVCTL_PAYLOAD) >> 5;
++
++       /* If parent payload > device max payload -> error
++        * If parent payload > device payload -> set speed
++        * If parent payload <= device payload -> do nothing
++        */
++       if (psz > dmax)
++               return -1;
++       else if (psz > dsz) {
++               dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz);
++               pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
++                                     (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) +
++                                     (psz << 5));
++       }
++       return 0;
++}
++
+ void pci_configure_slot(struct pci_dev *dev)
+ {
+ 	struct pci_dev *cdev;
+@@ -169,6 +210,10 @@ void pci_configure_slot(struct pci_dev *dev)
+ 			(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
+ 		return;
+ 
++       ret = pci_set_payload(dev);
++       if (ret)
++               dev_warn(&dev->dev, "could not set device max payload\n");
++
+ 	memset(&hpp, 0, sizeof(hpp));
+ 	ret = pci_get_hp_params(dev, &hpp);
+ 	if (ret)
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 812d4ac..0d3326d 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -373,8 +373,12 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
+ 			continue;	/* Wrong type */
+ 		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
+ 			return r;	/* Exact match */
+-		if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
+-			best = r;	/* Approximating prefetchable by non-prefetchable */
++		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
++		if (r->flags & IORESOURCE_PREFETCH)
++			continue;
++		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
++		if (!best)
++			best = r;
+ 	}
+ 	return best;
+ }
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index c8ece44..1e42381 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2540,6 +2540,16 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
+ 
+ #endif /* CONFIG_PCI_MSI */
+ 
++static void __devinit fixup_ti816x_class(struct pci_dev* dev)
++{
++	/* TI 816x devices do not have class code set when in PCIe boot mode */
++	if (dev->class == PCI_CLASS_NOT_DEFINED) {
++		dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
++		dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
++	}
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class);
++
+ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+ 			  struct pci_fixup *end)
+ {
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 47291bc..04830e8 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -293,7 +293,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
+ 		kfree(sdev);
+ 		goto out;
+ 	}
+-
++	blk_get_queue(sdev->request_queue);
+ 	sdev->request_queue->queuedata = sdev;
+ 	scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
+ 
+@@ -1336,8 +1336,10 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+ 		sdev = scsi_alloc_sdev(starget, 0, NULL);
+ 		if (!sdev)
+ 			return 0;
+-		if (scsi_device_get(sdev))
++		if (scsi_device_get(sdev)) {
++			__scsi_remove_device(sdev);
+ 			return 0;
++		}
+ 	}
+ 
+ 	sprintf(devname, "host %d channel %d id %d",
+@@ -1904,10 +1906,9 @@ struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
+ 		goto out;
+ 
+ 	sdev = scsi_alloc_sdev(starget, 0, NULL);
+-	if (sdev) {
+-		sdev->sdev_gendev.parent = get_device(&starget->dev);
++	if (sdev)
+ 		sdev->borken = 0;
+-	} else
++	else
+ 		scsi_target_reap(starget);
+ 	put_device(&starget->dev);
+  out:
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index ab6ac95..91a93e0 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -318,6 +318,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
+ 		kfree(evt);
+ 	}
+ 
++	blk_put_queue(sdev->request_queue);
+ 	/* NULL queue means the device can't be used */
+ 	sdev->request_queue = NULL;
+ 
+@@ -838,7 +839,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
+ 	struct request_queue *rq = sdev->request_queue;
+ 	struct scsi_target *starget = sdev->sdev_target;
+ 
+-	if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
++	error = scsi_device_set_state(sdev, SDEV_RUNNING);
++	if (error)
+ 		return error;
+ 
+ 	error = scsi_target_add(starget);
+@@ -849,13 +851,13 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
+ 	error = device_add(&sdev->sdev_gendev);
+ 	if (error) {
+ 		printk(KERN_INFO "error 1\n");
+-		goto out_remove;
++		return error;
+ 	}
+ 	error = device_add(&sdev->sdev_dev);
+ 	if (error) {
+ 		printk(KERN_INFO "error 2\n");
+ 		device_del(&sdev->sdev_gendev);
+-		goto out_remove;
++		return error;
+ 	}
+ 	transport_add_device(&sdev->sdev_gendev);
+ 	sdev->is_visible = 1;
+@@ -866,14 +868,14 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
+ 	else
+ 		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
+ 	if (error)
+-		goto out_remove;
++		return error;
+ 
+ 	if (sdev->host->hostt->change_queue_type)
+ 		error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
+ 	else
+ 		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
+ 	if (error)
+-		goto out_remove;
++		return error;
+ 
+ 	error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
+ 
+@@ -889,16 +891,11 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
+ 			error = device_create_file(&sdev->sdev_gendev,
+ 					sdev->host->hostt->sdev_attrs[i]);
+ 			if (error)
+-				goto out_remove;
++				return error;
+ 		}
+ 	}
+ 
+-	return 0;
+-
+- out_remove:
+-	__scsi_remove_device(sdev);
+ 	return error;
+-
+ }
+ 
+ void __scsi_remove_device(struct scsi_device *sdev)
+diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
+index 27aa40f..7f0eda2 100644
+--- a/drivers/scsi/ultrastor.c
++++ b/drivers/scsi/ultrastor.c
+@@ -306,7 +306,7 @@ static inline int find_and_clear_bit_16(unsigned long *field)
+ 	"0: bsfw %1,%w0\n\t"
+ 	"btr %0,%1\n\t"
+ 	"jnc 0b"
+-	: "=&r" (rv), "=m" (*field) :);
++	: "=&r" (rv), "+m" (*field) :);
+ 
+   return rv;
+ }
+diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
+index 719e0c1..3c5c62de 100644
+--- a/drivers/staging/usbip/usbip_common.c
++++ b/drivers/staging/usbip/usbip_common.c
+@@ -770,7 +770,7 @@ static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu,
+ 		be32_to_cpus(&pdu->status);
+ 		be32_to_cpus(&pdu->actual_length);
+ 		be32_to_cpus(&pdu->start_frame);
+-		cpu_to_be32s(&pdu->number_of_packets);
++		be32_to_cpus(&pdu->number_of_packets);
+ 		be32_to_cpus(&pdu->error_count);
+ 	}
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 4e32da6..2fc5dd3 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1602,6 +1602,8 @@ static struct usb_device_id acm_ids[] = {
+ 	{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
+ 	{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+ 	{ NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
++	{ NOKIA_PCSUITE_ACM_INFO(0x0335), }, /* Nokia E7 */
++	{ NOKIA_PCSUITE_ACM_INFO(0x03cd), }, /* Nokia C7 */
+ 	{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
+ 
+ 	/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 283f019..03eed28 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -326,7 +326,8 @@ static int get_hub_status(struct usb_device *hdev,
+ {
+ 	int i, status = -ETIMEDOUT;
+ 
+-	for (i = 0; i < USB_STS_RETRIES && status == -ETIMEDOUT; i++) {
++	for (i = 0; i < USB_STS_RETRIES &&
++			(status == -ETIMEDOUT || status == -EPIPE); i++) {
+ 		status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
+ 			USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0,
+ 			data, sizeof(*data), USB_STS_TIMEOUT);
+@@ -342,7 +343,8 @@ static int get_port_status(struct usb_device *hdev, int port1,
+ {
+ 	int i, status = -ETIMEDOUT;
+ 
+-	for (i = 0; i < USB_STS_RETRIES && status == -ETIMEDOUT; i++) {
++	for (i = 0; i < USB_STS_RETRIES &&
++			(status == -ETIMEDOUT || status == -EPIPE); i++) {
+ 		status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
+ 			USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
+ 			data, sizeof(*data), USB_STS_TIMEOUT);
+diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
+index 66450a1..e3170ca 100644
+--- a/drivers/usb/gadget/at91_udc.c
++++ b/drivers/usb/gadget/at91_udc.c
+@@ -1687,7 +1687,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* newer chips have more FIFO memory than rm9200 */
+-	if (cpu_is_at91sam9260()) {
++	if (cpu_is_at91sam9260() || cpu_is_at91sam9g20()) {
+ 		udc->ep[0].maxpacket = 64;
+ 		udc->ep[3].maxpacket = 64;
+ 		udc->ep[4].maxpacket = 512;
+diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
+index c9966cc..5201374 100644
+--- a/drivers/usb/gadget/f_rndis.c
++++ b/drivers/usb/gadget/f_rndis.c
+@@ -400,8 +400,7 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+ 	 */
+ 	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ 			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+-		if (w_length > req->length || w_value
+-				|| w_index != rndis->ctrl_id)
++		if (w_value || w_index != rndis->ctrl_id)
+ 			goto invalid;
+ 		/* read the request; process it later */
+ 		value = w_length;
+diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
+index 948a353..67f83e5 100644
+--- a/drivers/usb/host/ohci-pci.c
++++ b/drivers/usb/host/ohci-pci.c
+@@ -207,10 +207,18 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
+  */
+ static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd)
+ {
++	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ 	struct ohci_hcd	*ohci = hcd_to_ohci(hcd);
+ 
+-	ohci->flags |= OHCI_QUIRK_SHUTDOWN;
+-	ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
++	/* Evidently nVidia fixed their later hardware; this is a guess at
++	 * the changeover point.
++	 */
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB		0x026d
++
++	if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) {
++		ohci->flags |= OHCI_QUIRK_SHUTDOWN;
++		ohci_dbg(ohci, "enabled nVidia shutdown quirk\n");
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 64cb409..fce7b5e 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -452,9 +452,19 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
+ 	interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
+ 	if (interval != ep->desc.bInterval - 1)
+ 		dev_warn(&udev->dev,
+-			 "ep %#x - rounding interval to %d microframes\n",
++			 "ep %#x - rounding interval to %d %sframes\n",
+ 			 ep->desc.bEndpointAddress,
+-			 1 << interval);
++			 1 << interval,
++			 udev->speed == USB_SPEED_FULL ? "" : "micro");
++
++	if (udev->speed == USB_SPEED_FULL) {
++		/*
++		 * Full speed isoc endpoints specify interval in frames,
++		 * not microframes. We are using microframes everywhere,
++		 * so adjust accordingly.
++		 */
++		interval += 3;	/* 1 frame = 2^3 uframes */
++	}
+ 
+ 	return interval;
+ }
+@@ -511,12 +521,12 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+ 		break;
+ 
+ 	case USB_SPEED_FULL:
+-		if (usb_endpoint_xfer_int(&ep->desc)) {
++		if (usb_endpoint_xfer_isoc(&ep->desc)) {
+ 			interval = xhci_parse_exponent_interval(udev, ep);
+ 			break;
+ 		}
+ 		/*
+-		 * Fall through for isochronous endpoint interval decoding
++		 * Fall through for interrupt endpoint interval decoding
+ 		 * since it uses the same rules as low speed interrupt
+ 		 * endpoints.
+ 		 */
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 05afb5c..f77908b 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -114,6 +114,10 @@ static struct usb_device_id id_table [] = {
+ 	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
++	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
++	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
++	{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
++	{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 5171f22..afc4bd3 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -570,6 +570,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_IBS_APP70_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_TAVIR_STK500_PID) },
+ 	/*
+ 	 * ELV devices:
+ 	 */
+@@ -650,6 +651,7 @@ static struct usb_device_id id_table_combined [] = {
+ 	{ USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_3_PID) },
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) },
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_1_PID) },
+ 	{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_2_PID) },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index eca754b..40ac7c7 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -351,6 +351,7 @@
+  */
+ #define FTDI_4N_GALAXY_DE_1_PID	0xF3C0
+ #define FTDI_4N_GALAXY_DE_2_PID	0xF3C1
++#define FTDI_4N_GALAXY_DE_3_PID	0xF3C2
+ 
+ /*
+  * Linx Technologies product ids
+@@ -491,6 +492,11 @@
+ /* www.canusb.com Lawicel CANUSB device (FTDI_VID) */
+ #define FTDI_CANUSB_PID 0xFFA8 /* Product Id */
+ 
++/*
++ * TavIR AVR product ids (FTDI_VID)
++ */
++#define FTDI_TAVIR_STK500_PID	0xFA33	/* STK500 AVR programmer */
++
+ 
+ 
+ /********************************/
+diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
+index 5ac900e..867d97b 100644
+--- a/drivers/usb/serial/garmin_gps.c
++++ b/drivers/usb/serial/garmin_gps.c
+@@ -1,7 +1,7 @@
+ /*
+  * Garmin GPS driver
+  *
+- * Copyright (C) 2006-2009 Hermann Kneissel herkne at users.sourceforge.net
++ * Copyright (C) 2006-2011 Hermann Kneissel herkne at gmx.de
+  *
+  * The latest version of the driver can be found at
+  * http://sourceforge.net/projects/garmin-gps/
+@@ -51,7 +51,7 @@ static int debug;
+  */
+ 
+ #define VERSION_MAJOR	0
+-#define VERSION_MINOR	33
++#define VERSION_MINOR	36
+ 
+ #define _STR(s) #s
+ #define _DRIVER_VERSION(a, b) "v" _STR(a) "." _STR(b)
+@@ -411,6 +411,7 @@ static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
+  */
+ static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
+ {
++	unsigned long flags;
+ 	const __u8 *recpkt = garmin_data_p->inbuffer+GSP_INITIAL_OFFSET;
+ 	__le32 *usbdata = (__le32 *) garmin_data_p->inbuffer;
+ 
+@@ -459,7 +460,9 @@ static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
+ 	/* if this was an abort-transfer command, flush all
+ 	   queued data. */
+ 	if (isAbortTrfCmnd(garmin_data_p->inbuffer)) {
++		spin_lock_irqsave(&garmin_data_p->lock, flags);
+ 		garmin_data_p->flags |= FLAGS_DROP_DATA;
++		spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+ 		pkt_clear(garmin_data_p);
+ 	}
+ 
+@@ -944,7 +947,7 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	spin_lock_irqsave(&garmin_data_p->lock, flags);
+ 	garmin_data_p->mode  = initial_mode;
+ 	garmin_data_p->count = 0;
+-	garmin_data_p->flags = 0;
++	garmin_data_p->flags &= FLAGS_SESSION_REPLY1_SEEN;
+ 	spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+ 
+ 	/* shutdown any bulk reads that might be going on */
+@@ -1179,7 +1182,8 @@ static int garmin_write_room(struct tty_struct *tty)
+ 
+ 
+ static void garmin_read_process(struct garmin_data *garmin_data_p,
+-				 unsigned char *data, unsigned data_length)
++				 unsigned char *data, unsigned data_length,
++				 int bulk_data)
+ {
+ 	unsigned long flags;
+ 
+@@ -1194,7 +1198,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
+ 		   send it directly to the tty port */
+ 		if (garmin_data_p->flags & FLAGS_QUEUING) {
+ 			pkt_add(garmin_data_p, data, data_length);
+-		} else if (getLayerId(data) == GARMIN_LAYERID_APPL) {
++		} else if (bulk_data ||
++			   getLayerId(data) == GARMIN_LAYERID_APPL) {
+ 
+ 			spin_lock_irqsave(&garmin_data_p->lock, flags);
+ 			garmin_data_p->flags |= APP_RESP_SEEN;
+@@ -1238,7 +1243,7 @@ static void garmin_read_bulk_callback(struct urb *urb)
+ 	usb_serial_debug_data(debug, &port->dev,
+ 				__func__, urb->actual_length, data);
+ 
+-	garmin_read_process(garmin_data_p, data, urb->actual_length);
++	garmin_read_process(garmin_data_p, data, urb->actual_length, 1);
+ 
+ 	if (urb->actual_length == 0 &&
+ 			0 != (garmin_data_p->flags & FLAGS_BULK_IN_RESTART)) {
+@@ -1348,7 +1353,7 @@ static void garmin_read_int_callback(struct urb *urb)
+ 			__func__, garmin_data_p->serial_num);
+ 	}
+ 
+-	garmin_read_process(garmin_data_p, data, urb->actual_length);
++	garmin_read_process(garmin_data_p, data, urb->actual_length, 0);
+ 
+ 	port->interrupt_in_urb->dev = port->serial->dev;
+ 	retval = usb_submit_urb(urb, GFP_ATOMIC);
+@@ -1463,6 +1468,7 @@ static int garmin_attach(struct usb_serial *serial)
+ 	garmin_data_p->timer.function = timeout_handler;
+ 	garmin_data_p->port = port;
+ 	garmin_data_p->state = 0;
++	garmin_data_p->flags = 0;
+ 	garmin_data_p->count = 0;
+ 	usb_set_serial_port_data(port, garmin_data_p);
+ 
+diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
+index 99bd00f5..c53a8a5 100644
+--- a/drivers/usb/serial/moto_modem.c
++++ b/drivers/usb/serial/moto_modem.c
+@@ -25,6 +25,7 @@ static struct usb_device_id id_table [] = {
+ 	{ USB_DEVICE(0x05c6, 0x3197) },	/* unknown Motorola phone */
+ 	{ USB_DEVICE(0x0c44, 0x0022) },	/* unknown Mororola phone */
+ 	{ USB_DEVICE(0x22b8, 0x2a64) },	/* Motorola KRZR K1m */
++	{ USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */
+ 	{ USB_DEVICE(0x22b8, 0x2c64) }, /* Motorola V950 phone */
+ 	{ },
+ };
+diff --git a/drivers/xen/events.c b/drivers/xen/events.c
+index 1417015..009ca4e 100644
+--- a/drivers/xen/events.c
++++ b/drivers/xen/events.c
+@@ -536,7 +536,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
+ 	if (irq < 0)
+ 		return irq;
+ 
+-	irqflags |= IRQF_NO_SUSPEND;
++	irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
+ 	retval = request_irq(irq, handler, irqflags, devname, dev_id);
+ 	if (retval != 0) {
+ 		unbind_from_irq(irq);
+@@ -814,9 +814,6 @@ static void restore_cpu_virqs(unsigned int cpu)
+ 		evtchn_to_irq[evtchn] = irq;
+ 		irq_info[irq] = mk_virq_info(evtchn, virq);
+ 		bind_evtchn_to_cpu(evtchn, cpu);
+-
+-		/* Ready for use. */
+-		unmask_evtchn(evtchn);
+ 	}
+ }
+ 
+@@ -842,10 +839,6 @@ static void restore_cpu_ipis(unsigned int cpu)
+ 		evtchn_to_irq[evtchn] = irq;
+ 		irq_info[irq] = mk_ipi_info(evtchn, ipi);
+ 		bind_evtchn_to_cpu(evtchn, cpu);
+-
+-		/* Ready for use. */
+-		unmask_evtchn(evtchn);
+-
+ 	}
+ }
+ 
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index e65efa2..16cea86 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1203,6 +1203,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ 			if (!bdev->bd_part)
+ 				goto out_clear;
+ 
++			ret = 0;
+ 			if (disk->fops->open) {
+ 				ret = disk->fops->open(bdev, mode);
+ 				if (ret == -ERESTARTSYS) {
+@@ -1218,9 +1219,18 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ 					mutex_unlock(&bdev->bd_mutex);
+ 					goto restart;
+ 				}
+-				if (ret)
+-					goto out_clear;
+ 			}
++			/*
++			 * If the device is invalidated, rescan partition
++			 * if open succeeded or failed with -ENOMEDIUM.
++			 * The latter is necessary to prevent ghost
++			 * partitions on a removed medium.
++			 */
++			if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
++				rescan_partitions(disk, bdev);
++			if (ret)
++				goto out_clear;
++
+ 			if (!bdev->bd_openers) {
+ 				bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
+ 				bdi = blk_get_backing_dev_info(bdev);
+@@ -1228,8 +1238,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ 					bdi = &default_backing_dev_info;
+ 				bdev->bd_inode->i_data.backing_dev_info = bdi;
+ 			}
+-			if (bdev->bd_invalidated)
+-				rescan_partitions(disk, bdev);
+ 		} else {
+ 			struct block_device *whole;
+ 			whole = bdget_disk(disk, 0);
+@@ -1256,13 +1264,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ 		put_disk(disk);
+ 		disk = NULL;
+ 		if (bdev->bd_contains == bdev) {
+-			if (bdev->bd_disk->fops->open) {
++			ret = 0;
++			if (bdev->bd_disk->fops->open)
+ 				ret = bdev->bd_disk->fops->open(bdev, mode);
+-				if (ret)
+-					goto out_unlock_bdev;
+-			}
+-			if (bdev->bd_invalidated)
++			/* the same as first opener case, read comment there */
++			if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
+ 				rescan_partitions(bdev->bd_disk, bdev);
++			if (ret)
++				goto out_unlock_bdev;
+ 		}
+ 	}
+ 	bdev->bd_openers++;
+diff --git a/fs/dcookies.c b/fs/dcookies.c
+index a21cabd..dda0dc7 100644
+--- a/fs/dcookies.c
++++ b/fs/dcookies.c
+@@ -178,6 +178,8 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
+ 	/* FIXME: (deleted) ? */
+ 	path = d_path(&dcs->path, kbuf, PAGE_SIZE);
+ 
++	mutex_unlock(&dcookie_mutex);
++
+ 	if (IS_ERR(path)) {
+ 		err = PTR_ERR(path);
+ 		goto out_free;
+@@ -194,6 +196,7 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
+ 
+ out_free:
+ 	kfree(kbuf);
++	return err;
+ out:
+ 	mutex_unlock(&dcookie_mutex);
+ 	return err;
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index aa2480a..8f1a525 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -481,8 +481,8 @@ struct ecryptfs_write_tag_70_packet_silly_stack {
+ 	struct mutex *tfm_mutex;
+ 	char *block_aligned_filename;
+ 	struct ecryptfs_auth_tok *auth_tok;
+-	struct scatterlist src_sg;
+-	struct scatterlist dst_sg;
++	struct scatterlist src_sg[2];
++	struct scatterlist dst_sg[2];
+ 	struct blkcipher_desc desc;
+ 	char iv[ECRYPTFS_MAX_IV_BYTES];
+ 	char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
+@@ -695,23 +695,21 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
+ 	memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename,
+ 	       filename_size);
+ 	rc = virt_to_scatterlist(s->block_aligned_filename,
+-				 s->block_aligned_filename_size, &s->src_sg, 1);
+-	if (rc != 1) {
++				 s->block_aligned_filename_size, s->src_sg, 2);
++	if (rc < 1) {
+ 		printk(KERN_ERR "%s: Internal error whilst attempting to "
+-		       "convert filename memory to scatterlist; "
+-		       "expected rc = 1; got rc = [%d]. "
++		       "convert filename memory to scatterlist; rc = [%d]. "
+ 		       "block_aligned_filename_size = [%zd]\n", __func__, rc,
+ 		       s->block_aligned_filename_size);
+ 		goto out_release_free_unlock;
+ 	}
+ 	rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size,
+-				 &s->dst_sg, 1);
+-	if (rc != 1) {
++				 s->dst_sg, 2);
++	if (rc < 1) {
+ 		printk(KERN_ERR "%s: Internal error whilst attempting to "
+ 		       "convert encrypted filename memory to scatterlist; "
+-		       "expected rc = 1; got rc = [%d]. "
+-		       "block_aligned_filename_size = [%zd]\n", __func__, rc,
+-		       s->block_aligned_filename_size);
++		       "rc = [%d]. block_aligned_filename_size = [%zd]\n",
++		       __func__, rc, s->block_aligned_filename_size);
+ 		goto out_release_free_unlock;
+ 	}
+ 	/* The characters in the first block effectively do the job
+@@ -734,7 +732,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
+ 		       mount_crypt_stat->global_default_fn_cipher_key_bytes);
+ 		goto out_release_free_unlock;
+ 	}
+-	rc = crypto_blkcipher_encrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
++	rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg,
+ 					 s->block_aligned_filename_size);
+ 	if (rc) {
+ 		printk(KERN_ERR "%s: Error attempting to encrypt filename; "
+@@ -766,8 +764,8 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
+ 	struct mutex *tfm_mutex;
+ 	char *decrypted_filename;
+ 	struct ecryptfs_auth_tok *auth_tok;
+-	struct scatterlist src_sg;
+-	struct scatterlist dst_sg;
++	struct scatterlist src_sg[2];
++	struct scatterlist dst_sg[2];
+ 	struct blkcipher_desc desc;
+ 	char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
+ 	char iv[ECRYPTFS_MAX_IV_BYTES];
+@@ -872,13 +870,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
+ 	}
+ 	mutex_lock(s->tfm_mutex);
+ 	rc = virt_to_scatterlist(&data[(*packet_size)],
+-				 s->block_aligned_filename_size, &s->src_sg, 1);
+-	if (rc != 1) {
++				 s->block_aligned_filename_size, s->src_sg, 2);
++	if (rc < 1) {
+ 		printk(KERN_ERR "%s: Internal error whilst attempting to "
+ 		       "convert encrypted filename memory to scatterlist; "
+-		       "expected rc = 1; got rc = [%d]. "
+-		       "block_aligned_filename_size = [%zd]\n", __func__, rc,
+-		       s->block_aligned_filename_size);
++		       "rc = [%d]. block_aligned_filename_size = [%zd]\n",
++		       __func__, rc, s->block_aligned_filename_size);
+ 		goto out_unlock;
+ 	}
+ 	(*packet_size) += s->block_aligned_filename_size;
+@@ -892,13 +889,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
+ 		goto out_unlock;
+ 	}
+ 	rc = virt_to_scatterlist(s->decrypted_filename,
+-				 s->block_aligned_filename_size, &s->dst_sg, 1);
+-	if (rc != 1) {
++				 s->block_aligned_filename_size, s->dst_sg, 2);
++	if (rc < 1) {
+ 		printk(KERN_ERR "%s: Internal error whilst attempting to "
+ 		       "convert decrypted filename memory to scatterlist; "
+-		       "expected rc = 1; got rc = [%d]. "
+-		       "block_aligned_filename_size = [%zd]\n", __func__, rc,
+-		       s->block_aligned_filename_size);
++		       "rc = [%d]. block_aligned_filename_size = [%zd]\n",
++		       __func__, rc, s->block_aligned_filename_size);
+ 		goto out_free_unlock;
+ 	}
+ 	/* The characters in the first block effectively do the job of
+@@ -937,7 +933,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
+ 		       mount_crypt_stat->global_default_fn_cipher_key_bytes);
+ 		goto out_free_unlock;
+ 	}
+-	rc = crypto_blkcipher_decrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
++	rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg,
+ 					 s->block_aligned_filename_size);
+ 	if (rc) {
+ 		printk(KERN_ERR "%s: Error attempting to decrypt filename; "
+diff --git a/fs/exec.c b/fs/exec.c
+index 0cf881d..86fafc6 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1009,6 +1009,7 @@ int flush_old_exec(struct linux_binprm * bprm)
+ 
+ 	bprm->mm = NULL;		/* We're using it now */
+ 
++	set_fs(USER_DS);
+ 	current->flags &= ~PF_RANDOMIZE;
+ 	flush_thread();
+ 	current->personality &= ~bprm->per_clear;
+@@ -1276,10 +1277,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
+ 	if (retval)
+ 		return retval;
+ 
+-	/* kernel module loader fixup */
+-	/* so we don't try to load run modprobe in kernel space. */
+-	set_fs(USER_DS);
+-
+ 	retval = audit_bprm(bprm);
+ 	if (retval)
+ 		return retval;
+diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
+index a168780..6397b52 100644
+--- a/fs/ext3/namei.c
++++ b/fs/ext3/namei.c
+@@ -1425,10 +1425,19 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
+ 	frame->at = entries;
+ 	frame->bh = bh;
+ 	bh = bh2;
++	/*
++	 * Mark buffers dirty here so that if do_split() fails we write a
++	 * consistent set of buffers to disk.
++	 */
++	ext3_journal_dirty_metadata(handle, frame->bh);
++	ext3_journal_dirty_metadata(handle, bh);
+ 	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
+-	dx_release (frames);
+-	if (!(de))
++	if (!de) {
++		ext3_mark_inode_dirty(handle, dir);
++		dx_release(frames);
+ 		return retval;
++	}
++	dx_release(frames);
+ 
+ 	return add_dirent_to_buf(handle, dentry, inode, de, bh);
+ }
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 04e07e2..42bac1b 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1173,6 +1173,8 @@ repeat_load_buddy:
+ 	return 0;
+ 
+ err:
++	if (page)
++		page_cache_release(page);
+ 	if (e4b->bd_bitmap_page)
+ 		page_cache_release(e4b->bd_bitmap_page);
+ 	if (e4b->bd_buddy_page)
+diff --git a/fs/fat/file.c b/fs/fat/file.c
+index e8c159d..279937b 100644
+--- a/fs/fat/file.c
++++ b/fs/fat/file.c
+@@ -101,7 +101,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
+ 		if (attr & ATTR_SYS)
+ 			inode->i_flags |= S_IMMUTABLE;
+ 		else
+-			inode->i_flags &= S_IMMUTABLE;
++			inode->i_flags &= ~S_IMMUTABLE;
+ 	}
+ 
+ 	fat_save_attrs(inode, attr);
+diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
+index 4bd8825..17d29a8 100644
+--- a/fs/jbd/commit.c
++++ b/fs/jbd/commit.c
+@@ -746,8 +746,13 @@ wait_for_iobuf:
+                    required. */
+ 		JBUFFER_TRACE(jh, "file as BJ_Forget");
+ 		journal_file_buffer(jh, commit_transaction, BJ_Forget);
+-		/* Wake up any transactions which were waiting for this
+-		   IO to complete */
++		/*
++		 * Wake up any transactions which were waiting for this
++		 * IO to complete. The barrier must be here so that changes
++		 * by journal_file_buffer() take effect before wake_up_bit()
++		 * does the waitqueue check.
++		 */
++		smp_mb();
+ 		wake_up_bit(&bh->b_state, BH_Unshadow);
+ 		JBUFFER_TRACE(jh, "brelse shadowed buffer");
+ 		__brelse(bh);
+diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
+index bd224ee..45905ff 100644
+--- a/fs/jbd/journal.c
++++ b/fs/jbd/journal.c
+@@ -435,9 +435,12 @@ int __log_space_left(journal_t *journal)
+ int __log_start_commit(journal_t *journal, tid_t target)
+ {
+ 	/*
+-	 * Are we already doing a recent enough commit?
++	 * The only transaction we can possibly wait upon is the
++	 * currently running transaction (if it exists).  Otherwise,
++	 * the target tid must be an old one.
+ 	 */
+-	if (!tid_geq(journal->j_commit_request, target)) {
++	if (journal->j_running_transaction &&
++	    journal->j_running_transaction->t_tid == target) {
+ 		/*
+ 		 * We want a new commit: OK, mark the request and wakup the
+ 		 * commit thread.  We do _not_ do the commit ourselves.
+@@ -449,7 +452,14 @@ int __log_start_commit(journal_t *journal, tid_t target)
+ 			  journal->j_commit_sequence);
+ 		wake_up(&journal->j_wait_commit);
+ 		return 1;
+-	}
++	} else if (!tid_geq(journal->j_commit_request, target))
++		/* This should never happen, but if it does, preserve
++		   the evidence before kjournald goes into a loop and
++		   increments j_commit_sequence beyond all recognition. */
++		WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n",
++		    journal->j_commit_request, journal->j_commit_sequence,
++		    target, journal->j_running_transaction ?
++		    journal->j_running_transaction->t_tid : 0);
+ 	return 0;
+ }
+ 
+diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
+index a2a14cd..dd6efdb 100644
+--- a/fs/partitions/ldm.c
++++ b/fs/partitions/ldm.c
+@@ -1335,6 +1335,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
+ 
+ 	list_add_tail (&f->list, frags);
+ found:
++	if (rec >= f->num) {
++		ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num);
++		return false;
++	}
++
+ 	if (f->map & (1 << rec)) {
+ 		ldm_error ("Duplicate VBLK, part %d.", rec);
+ 		f->map &= 0x7F;			/* Mark the group as broken */
+diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
+index d321bae..841f77c 100644
+--- a/fs/ubifs/journal.c
++++ b/fs/ubifs/journal.c
+@@ -665,6 +665,7 @@ out_free:
+ 
+ out_release:
+ 	release_head(c, BASEHD);
++	kfree(dent);
+ out_ro:
+ 	ubifs_ro_mode(c, err);
+ 	if (last_reference)
+diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
+index 57085e4..238ad15 100644
+--- a/fs/ubifs/sb.c
++++ b/fs/ubifs/sb.c
+@@ -474,7 +474,8 @@ failed:
+  * @c: UBIFS file-system description object
+  *
+  * This function returns a pointer to the superblock node or a negative error
+- * code.
++ * code. Note, the user of this function is responsible of kfree()'ing the
++ * returned superblock buffer.
+  */
+ struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
+ {
+diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
+index 02feb59..aec6689 100644
+--- a/fs/ubifs/shrinker.c
++++ b/fs/ubifs/shrinker.c
+@@ -283,7 +283,11 @@ int ubifs_shrinker(int nr, gfp_t gfp_mask)
+ 	long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
+ 
+ 	if (nr == 0)
+-		return clean_zn_cnt;
++		/*
++		 * Due to the way UBIFS updates the clean znode counter it may
++		 * temporarily be negative.
++		 */
++		return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
+ 
+ 	if (!clean_zn_cnt) {
+ 		/*
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 333e181..8440ba8 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1550,6 +1550,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
+ 		}
+ 		sup->leb_cnt = cpu_to_le32(c->leb_cnt);
+ 		err = ubifs_write_sb_node(c, sup);
++		kfree(sup);
+ 		if (err)
+ 			goto out;
+ 	}
+diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
+index c82683a..71c14dd 100644
+--- a/fs/xfs/linux-2.6/xfs_sync.c
++++ b/fs/xfs/linux-2.6/xfs_sync.c
+@@ -711,16 +711,24 @@ xfs_inode_set_reclaim_tag(
+ }
+ 
+ void
+-__xfs_inode_clear_reclaim_tag(
+-	xfs_mount_t	*mp,
++__xfs_inode_clear_reclaim(
+ 	xfs_perag_t	*pag,
+ 	xfs_inode_t	*ip)
+ {
+-	radix_tree_tag_clear(&pag->pag_ici_root,
+-			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
+ 	pag->pag_ici_reclaimable--;
+ }
+ 
++void
++__xfs_inode_clear_reclaim_tag(
++       xfs_mount_t     *mp,
++       xfs_perag_t     *pag,
++       xfs_inode_t     *ip)
++{
++       radix_tree_tag_clear(&pag->pag_ici_root,
++                       XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
++       __xfs_inode_clear_reclaim(pag, ip);
++}
++
+ STATIC int
+ xfs_reclaim_inode(
+ 	struct xfs_inode	*ip,
+diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
+index 0b28c13..81dd3bc 100644
+--- a/fs/xfs/linux-2.6/xfs_sync.h
++++ b/fs/xfs/linux-2.6/xfs_sync.h
+@@ -48,6 +48,7 @@ int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
+ 
+ void xfs_inode_set_reclaim_tag(struct xfs_inode *ip);
+ void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip);
++void __xfs_inode_clear_reclaim(struct xfs_perag *pag, struct xfs_inode *ip);
+ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
+ 				struct xfs_inode *ip);
+ 
+diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
+index 16b6a33..320b79d 100644
+--- a/fs/xfs/xfs_iget.c
++++ b/fs/xfs/xfs_iget.c
+@@ -529,6 +529,7 @@ xfs_ireclaim(
+ 	write_lock(&pag->pag_ici_lock);
+ 	if (!radix_tree_delete(&pag->pag_ici_root, agino))
+ 		ASSERT(0);
++	__xfs_inode_clear_reclaim(pag, ip);
+ 	write_unlock(&pag->pag_ici_lock);
+ 	xfs_put_perag(mp, pag);
+ 
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index c49d6f5..4528f29 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -53,7 +53,7 @@
+  *                Used by threaded interrupts which need to keep the
+  *                irq line disabled until the threaded handler has been run.
+  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
+- *
++ * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
+  */
+ #define IRQF_DISABLED		0x00000020
+ #define IRQF_SAMPLE_RANDOM	0x00000040
+@@ -65,6 +65,7 @@
+ #define IRQF_IRQPOLL		0x00001000
+ #define IRQF_ONESHOT		0x00002000
+ #define IRQF_NO_SUSPEND		0x00004000
++#define IRQF_FORCE_RESUME	0x00008000
+ 
+ #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND)
+ 
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index fe2f4ee..a339386 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -605,6 +605,8 @@
+ #define PCI_DEVICE_ID_MATROX_G550	0x2527
+ #define PCI_DEVICE_ID_MATROX_VIA	0x4536
+ 
++#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS	0x14f2
++
+ #define PCI_VENDOR_ID_CT		0x102c
+ #define PCI_DEVICE_ID_CT_69000		0x00c0
+ #define PCI_DEVICE_ID_CT_65545		0x00d8
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index 632205c..4c3257d 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -88,12 +88,12 @@ static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
+ 	unsigned ret;
+ 
+ repeat:
+-	ret = sl->sequence;
+-	smp_rmb();
++	ret = ACCESS_ONCE(sl->sequence);
+ 	if (unlikely(ret & 1)) {
+ 		cpu_relax();
+ 		goto repeat;
+ 	}
++	smp_rmb();
+ 
+ 	return ret;
+ }
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 5fad88b..d40ecd5 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -265,8 +265,17 @@ EXPORT_SYMBOL(disable_irq);
+ 
+ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
+ {
+-	if (resume)
++	if (resume) {
++		if (!(desc->status & IRQ_SUSPENDED)) {
++			if (!desc->action)
++				return;
++			if (!(desc->action->flags & IRQF_FORCE_RESUME))
++				return;
++			/* Pretend that it got disabled ! */
++			desc->depth++;
++		}
+ 		desc->status &= ~IRQ_SUSPENDED;
++	}
+ 
+ 	switch (desc->depth) {
+ 	case 0:
+diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
+index a0bb09e..0067abb 100644
+--- a/kernel/irq/pm.c
++++ b/kernel/irq/pm.c
+@@ -53,9 +53,6 @@ void resume_device_irqs(void)
+ 	for_each_irq_desc(irq, desc) {
+ 		unsigned long flags;
+ 
+-		if (!(desc->status & IRQ_SUSPENDED))
+-			continue;
+-
+ 		spin_lock_irqsave(&desc->lock, flags);
+ 		__enable_irq(desc, irq, true);
+ 		spin_unlock_irqrestore(&desc->lock, flags);
+diff --git a/kernel/lockdep.c b/kernel/lockdep.c
+index 9af5672..d86fe89 100644
+--- a/kernel/lockdep.c
++++ b/kernel/lockdep.c
+@@ -3243,7 +3243,7 @@ int lock_is_held(struct lockdep_map *lock)
+ 	int ret = 0;
+ 
+ 	if (unlikely(current->lockdep_recursion))
+-		return ret;
++		return 1; /* avoid false negative lockdep_assert_held() */
+ 
+ 	raw_local_irq_save(flags);
+ 	check_flags(flags);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index f5e362f..1d1206a 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -32,6 +32,8 @@ struct timekeeper {
+ 	cycle_t cycle_interval;
+ 	/* Number of clock shifted nano seconds in one NTP interval. */
+ 	u64	xtime_interval;
++	/* shifted nano seconds left over when rounding cycle_interval */
++	s64	xtime_remainder;
+ 	/* Raw nano seconds accumulated per NTP interval. */
+ 	u32	raw_interval;
+ 
+@@ -62,7 +64,7 @@ struct timekeeper timekeeper;
+ static void timekeeper_setup_internals(struct clocksource *clock)
+ {
+ 	cycle_t interval;
+-	u64 tmp;
++	u64 tmp, ntpinterval;
+ 
+ 	timekeeper.clock = clock;
+ 	clock->cycle_last = clock->read(clock);
+@@ -70,6 +72,7 @@ static void timekeeper_setup_internals(struct clocksource *clock)
+ 	/* Do the ns -> cycle conversion first, using original mult */
+ 	tmp = NTP_INTERVAL_LENGTH;
+ 	tmp <<= clock->shift;
++	ntpinterval = tmp;
+ 	tmp += clock->mult/2;
+ 	do_div(tmp, clock->mult);
+ 	if (tmp == 0)
+@@ -80,6 +83,7 @@ static void timekeeper_setup_internals(struct clocksource *clock)
+ 
+ 	/* Go back from cycles -> shifted ns */
+ 	timekeeper.xtime_interval = (u64) interval * clock->mult;
++	timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval;
+ 	timekeeper.raw_interval =
+ 		((u64) interval * clock->mult) >> clock->shift;
+ 
+@@ -788,7 +792,8 @@ void update_wall_time(void)
+ 
+ 		/* accumulate error between NTP and clock interval */
+ 		timekeeper.ntp_error += tick_length;
+-		timekeeper.ntp_error -= timekeeper.xtime_interval <<
++		timekeeper.ntp_error -=
++		    (timekeeper.xtime_interval + timekeeper.xtime_remainder) <<
+ 					timekeeper.ntp_error_shift;
+ 	}
+ 
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 0afddc2..4872937 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2360,14 +2360,16 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
+ 		ftrace_match_records(parser->buffer, parser->idx, enable);
+ 	}
+ 
+-	mutex_lock(&ftrace_lock);
+-	if (ftrace_start_up && ftrace_enabled)
+-		ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+-	mutex_unlock(&ftrace_lock);
+-
+ 	trace_parser_put(parser);
+ 	kfree(iter);
+ 
++	if (file->f_mode & FMODE_WRITE) {
++		mutex_lock(&ftrace_lock);
++		if (ftrace_start_up && ftrace_enabled)
++			ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++		mutex_unlock(&ftrace_lock);
++	}
++
+ 	mutex_unlock(&ftrace_regex_lock);
+ 	return 0;
+ }
+diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
+index 619313e..507a22f 100644
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -144,7 +144,7 @@ static void init_shared_classes(void)
+ 
+ #define HARDIRQ_ENTER()				\
+ 	local_irq_disable();			\
+-	irq_enter();				\
++	__irq_enter();				\
+ 	WARN_ON(!in_irq());
+ 
+ #define HARDIRQ_EXIT()				\
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index f5a106e..0cf9863 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -995,10 +995,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
+ 	 */
+ 	chg = vma_needs_reservation(h, vma, addr);
+ 	if (chg < 0)
+-		return ERR_PTR(chg);
++		return ERR_PTR(-VM_FAULT_OOM);
+ 	if (chg)
+ 		if (hugetlb_get_quota(inode->i_mapping, chg))
+-			return ERR_PTR(-ENOSPC);
++			return ERR_PTR(-VM_FAULT_SIGBUS);
+ 
+ 	spin_lock(&hugetlb_lock);
+ 	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 8bf765c..c346660 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -1354,9 +1354,12 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ 	++(*pos);
+ 
+ 	list_for_each_continue_rcu(n, &object_list) {
+-		next_obj = list_entry(n, struct kmemleak_object, object_list);
+-		if (get_object(next_obj))
++		struct kmemleak_object *obj =
++			list_entry(n, struct kmemleak_object, object_list);
++		if (get_object(obj)) {
++			next_obj = obj;
+ 			break;
++		}
+ 	}
+ 
+ 	put_object(prev_obj);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 902e5fc..3ecab7e 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1841,6 +1841,7 @@ restart:
+ 	 */
+ 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
+ 
++rebalance:
+ 	/* This is the last chance, in general, before the goto nopage. */
+ 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
+ 			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
+@@ -1848,7 +1849,6 @@ restart:
+ 	if (page)
+ 		goto got_pg;
+ 
+-rebalance:
+ 	/* Allocate without watermarks if the context allows */
+ 	if (alloc_flags & ALLOC_NO_WATERMARKS) {
+ 		page = __alloc_pages_high_priority(gfp_mask, order,
+diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
+index b5674dc..7bbe81e 100644
+--- a/net/atm/atm_sysfs.c
++++ b/net/atm/atm_sysfs.c
+@@ -57,6 +57,14 @@ static ssize_t show_atmaddress(struct device *cdev,
+ 	return pos - buf;
+ }
+ 
++static ssize_t show_atmindex(struct device *cdev,
++			     struct device_attribute *attr, char *buf)
++{
++	struct atm_dev *adev = to_atm_dev(cdev);
++
++	return sprintf(buf, "%d\n", adev->number);
++}
++
+ static ssize_t show_carrier(struct device *cdev,
+ 			    struct device_attribute *attr, char *buf)
+ {
+@@ -97,6 +105,7 @@ static ssize_t show_link_rate(struct device *cdev,
+ 
+ static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
+ static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
++static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
+ static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
+ static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
+ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
+@@ -104,6 +113,7 @@ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
+ static struct device_attribute *atm_attrs[] = {
+ 	&dev_attr_atmaddress,
+ 	&dev_attr_address,
++	&dev_attr_atmindex,
+ 	&dev_attr_carrier,
+ 	&dev_attr_type,
+ 	&dev_attr_link_rate,
+diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
+index 1264ad0..58679f8 100644
+--- a/net/ipv6/netfilter/ip6t_REJECT.c
++++ b/net/ipv6/netfilter/ip6t_REJECT.c
+@@ -43,6 +43,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
+ 	int tcphoff, needs_ack;
+ 	const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
+ 	struct ipv6hdr *ip6h;
++#define DEFAULT_TOS_VALUE	0x0U
++	const __u8 tclass = DEFAULT_TOS_VALUE;
+ 	struct dst_entry *dst = NULL;
+ 	u8 proto;
+ 	struct flowi fl;
+@@ -121,7 +123,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
+ 	skb_put(nskb, sizeof(struct ipv6hdr));
+ 	skb_reset_network_header(nskb);
+ 	ip6h = ipv6_hdr(nskb);
+-	ip6h->version = 6;
++	*(__be32 *)ip6h =  htonl(0x60000000 | (tclass << 20));
+ 	ip6h->hop_limit = dst_metric(dst, RTAX_HOPLIMIT);
+ 	ip6h->nexthdr = IPPROTO_TCP;
+ 	ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index bfc8737..e5590a9 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -474,7 +474,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
+ 
+ 	/* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
+ 	fp = skb_shinfo(head)->frag_list;
+-	if (NFCT_FRAG6_CB(fp)->orig == NULL)
++	if (fp && NFCT_FRAG6_CB(fp)->orig == NULL)
+ 		/* at above code, head skb is divided into two skbs. */
+ 		fp = fp->next;
+ 
+@@ -600,12 +600,6 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+ 	hdr = ipv6_hdr(clone);
+ 	fhdr = (struct frag_hdr *)skb_transport_header(clone);
+ 
+-	if (!(fhdr->frag_off & htons(0xFFF9))) {
+-		pr_debug("Invalid fragment offset\n");
+-		/* It is not a fragmented frame */
+-		goto ret_orig;
+-	}
+-
+ 	if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
+ 		nf_ct_frag6_evictor();
+ 
+diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
+index 74ce892..5ec6374 100644
+--- a/net/netfilter/xt_DSCP.c
++++ b/net/netfilter/xt_DSCP.c
+@@ -99,7 +99,7 @@ tos_tg6(struct sk_buff *skb, const struct xt_target_param *par)
+ 	u_int8_t orig, nv;
+ 
+ 	orig = ipv6_get_dsfield(iph);
+-	nv   = (orig & info->tos_mask) ^ info->tos_value;
++	nv   = (orig & ~info->tos_mask) ^ info->tos_value;
+ 
+ 	if (orig != nv) {
+ 		if (!skb_make_writable(skb, sizeof(struct iphdr)))
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 1d61c33..79194ad 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1381,7 +1381,6 @@ static void xs_tcp_state_change(struct sock *sk)
+ 	case TCP_CLOSE_WAIT:
+ 		/* The server initiated a shutdown of the socket */
+ 		xprt_force_disconnect(xprt);
+-	case TCP_SYN_SENT:
+ 		xprt->connect_cookie++;
+ 	case TCP_CLOSING:
+ 		/*
+@@ -1843,6 +1842,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *tra
+ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ {
+ 	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
++	int ret = -ENOTCONN;
+ 
+ 	if (!transport->inet) {
+ 		struct sock *sk = sock->sk;
+@@ -1874,12 +1874,22 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ 	}
+ 
+ 	if (!xprt_bound(xprt))
+-		return -ENOTCONN;
++		goto out;
+ 
+ 	/* Tell the socket layer to start connecting... */
+ 	xprt->stat.connect_count++;
+ 	xprt->stat.connect_start = jiffies;
+-	return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
++	ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
++	switch (ret) {
++	case 0:
++	case -EINPROGRESS:
++		/* SYN_SENT! */
++		xprt->connect_cookie++;
++		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
++			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
++	}
++out:
++	return ret;
+ }
+ 
+ /**
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index b75e718..f0341e4 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -2995,12 +2995,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
+ 	i = 0;
+ 	if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
+ 		nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
++			request->ssids[i].ssid_len = nla_len(attr);
+ 			if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
+ 				err = -EINVAL;
+ 				goto out_free;
+ 			}
+ 			memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
+-			request->ssids[i].ssid_len = nla_len(attr);
+ 			i++;
+ 		}
+ 	}
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index ed550e4..083b777 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -12662,6 +12662,7 @@ static struct snd_pci_quirk alc268_cfg_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x015b, "Acer Aspire One",
+ 						ALC268_ACER_ASPIRE_ONE),
+ 	SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
++	SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron 910", ALC268_AUTO),
+ 	SND_PCI_QUIRK_MASK(0x1028, 0xfff0, 0x02b0,
+ 			"Dell Inspiron Mini9/Vostro A90", ALC268_DELL),
+ 	/* almost compatible with toshiba but with optional digital outs;
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 7e59c34..01da10b 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1600,7 +1600,7 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
+ 				"Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
+-				"Dell Studio 1558", STAC_DELL_M6_BOTH),
++				"Dell Studio 1558", STAC_DELL_M6_DMIC),
+ 	{} /* terminator */
+ };
+ 
+diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
+index a80f1ad..12506dc 100644
+--- a/sound/soc/codecs/wm_hubs.c
++++ b/sound/soc/codecs/wm_hubs.c
+@@ -639,17 +639,17 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
+ static const struct snd_soc_dapm_route lineout1_diff_routes[] = {
+ 	{ "LINEOUT1 Mixer", "IN1L Switch", "IN1L PGA" },
+ 	{ "LINEOUT1 Mixer", "IN1R Switch", "IN1R PGA" },
+-	{ "LINEOUT1 Mixer", "Output Switch", "Left Output Mixer" },
++	{ "LINEOUT1 Mixer", "Output Switch", "Left Output PGA" },
+ 
+ 	{ "LINEOUT1N Driver", NULL, "LINEOUT1 Mixer" },
+ 	{ "LINEOUT1P Driver", NULL, "LINEOUT1 Mixer" },
+ };
+ 
+ static const struct snd_soc_dapm_route lineout1_se_routes[] = {
+-	{ "LINEOUT1N Mixer", "Left Output Switch", "Left Output Mixer" },
+-	{ "LINEOUT1N Mixer", "Right Output Switch", "Left Output Mixer" },
++	{ "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" },
++	{ "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" },
+ 
+-	{ "LINEOUT1P Mixer", "Left Output Switch", "Left Output Mixer" },
++	{ "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" },
+ 
+ 	{ "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" },
+ 	{ "LINEOUT1P Driver", NULL, "LINEOUT1P Mixer" },
+@@ -658,17 +658,17 @@ static const struct snd_soc_dapm_route lineout1_se_routes[] = {
+ static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
+ 	{ "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" },
+ 	{ "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" },
+-	{ "LINEOUT2 Mixer", "Output Switch", "Right Output Mixer" },
++	{ "LINEOUT2 Mixer", "Output Switch", "Right Output PGA" },
+ 
+ 	{ "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" },
+ 	{ "LINEOUT2P Driver", NULL, "LINEOUT2 Mixer" },
+ };
+ 
+ static const struct snd_soc_dapm_route lineout2_se_routes[] = {
+-	{ "LINEOUT2N Mixer", "Left Output Switch", "Left Output Mixer" },
+-	{ "LINEOUT2N Mixer", "Right Output Switch", "Left Output Mixer" },
++	{ "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" },
++	{ "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" },
+ 
+-	{ "LINEOUT2P Mixer", "Right Output Switch", "Right Output Mixer" },
++	{ "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" },
+ 
+ 	{ "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
+ 	{ "LINEOUT2P Driver", NULL, "LINEOUT2P Mixer" },
+@@ -686,17 +686,21 @@ int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec)
+ 	snd_soc_update_bits(codec, WM8993_RIGHT_LINE_INPUT_3_4_VOLUME,
+ 			    WM8993_IN2_VU, WM8993_IN2_VU);
+ 
++	snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_LEFT,
++			    WM8993_SPKOUT_VU, WM8993_SPKOUT_VU);
+ 	snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_RIGHT,
+ 			    WM8993_SPKOUT_VU, WM8993_SPKOUT_VU);
+ 
+ 	snd_soc_update_bits(codec, WM8993_LEFT_OUTPUT_VOLUME,
+-			    WM8993_HPOUT1L_ZC, WM8993_HPOUT1L_ZC);
++			    WM8993_HPOUT1_VU | WM8993_HPOUT1L_ZC,
++			    WM8993_HPOUT1_VU | WM8993_HPOUT1L_ZC);
+ 	snd_soc_update_bits(codec, WM8993_RIGHT_OUTPUT_VOLUME,
+ 			    WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC,
+ 			    WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC);
+ 
+ 	snd_soc_update_bits(codec, WM8993_LEFT_OPGA_VOLUME,
+-			    WM8993_MIXOUTL_ZC, WM8993_MIXOUTL_ZC);
++			    WM8993_MIXOUTL_ZC | WM8993_MIXOUT_VU,
++			    WM8993_MIXOUTL_ZC | WM8993_MIXOUT_VU);
+ 	snd_soc_update_bits(codec, WM8993_RIGHT_OPGA_VOLUME,
+ 			    WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU,
+ 			    WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU);

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.43.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.43.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.43.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.43.patch)
@@ -0,0 +1,1092 @@
+diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
+index 7fcad58..3d6b43f 100644
+--- a/arch/um/kernel/dyn.lds.S
++++ b/arch/um/kernel/dyn.lds.S
+@@ -50,8 +50,18 @@ SECTIONS
+   .rela.got       : { *(.rela.got) }
+   .rel.bss        : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
+   .rela.bss       : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
+-  .rel.plt        : { *(.rel.plt) }
+-  .rela.plt       : { *(.rela.plt) }
++  .rel.plt : {
++	*(.rel.plt)
++	PROVIDE_HIDDEN(__rel_iplt_start = .);
++	*(.rel.iplt)
++	PROVIDE_HIDDEN(__rel_iplt_end = .);
++  }
++  .rela.plt : {
++	*(.rela.plt)
++	PROVIDE_HIDDEN(__rela_iplt_start = .);
++	*(.rela.iplt)
++	PROVIDE_HIDDEN(__rela_iplt_end = .);
++  }
+   .init           : {
+     KEEP (*(.init))
+   } =0x90909090
+diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
+index 664f942..79a077a 100644
+--- a/arch/um/kernel/uml.lds.S
++++ b/arch/um/kernel/uml.lds.S
+@@ -43,6 +43,23 @@ SECTIONS
+ 	__syscall_stub_end = .;
+   }
+ 
++  /*
++   * These are needed even in a static link, even if they wind up being empty.
++   * Newer glibc needs these __rel{,a}_iplt_{start,end} symbols.
++   */
++  .rel.plt : {
++	*(.rel.plt)
++	PROVIDE_HIDDEN(__rel_iplt_start = .);
++	*(.rel.iplt)
++	PROVIDE_HIDDEN(__rel_iplt_end = .);
++  }
++  .rela.plt : {
++	*(.rela.plt)
++	PROVIDE_HIDDEN(__rela_iplt_start = .);
++	*(.rela.iplt)
++	PROVIDE_HIDDEN(__rela_iplt_end = .);
++  }
++
+   #include "asm/common.lds.S"
+ 
+   init.data : { INIT_DATA }
+diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
+index 93a11d7..e696144 100644
+--- a/arch/um/os-Linux/mem.c
++++ b/arch/um/os-Linux/mem.c
+@@ -10,6 +10,7 @@
+ #include <errno.h>
+ #include <fcntl.h>
+ #include <string.h>
++#include <sys/stat.h>
+ #include <sys/mman.h>
+ #include <sys/param.h>
+ #include "init.h"
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index 356a799..3f90a2c 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1658,6 +1658,11 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
+ 		for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
+ 			pte_t pte;
+ 
++#ifdef CONFIG_X86_32
++			if (pfn > max_pfn_mapped)
++				max_pfn_mapped = pfn;
++#endif
++
+ 			if (!pte_none(pte_page[pteidx]))
+ 				continue;
+ 
+@@ -1770,7 +1775,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+ {
+ 	pmd_t *kernel_pmd;
+ 
+-	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
++	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
++				  xen_start_info->nr_pt_frames * PAGE_SIZE +
++				  512*1024);
+ 
+ 	kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
+ 	memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index aa1e953..847c947 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -1540,8 +1540,14 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
+ 	cic->dead_key = (unsigned long) cic->key;
+ 	cic->key = NULL;
+ 
+-	if (ioc->ioc_data == cic)
++	rcu_read_lock();
++	if (rcu_dereference(ioc->ioc_data) == cic) {
++		rcu_read_unlock();
++		spin_lock(&ioc->lock);
+ 		rcu_assign_pointer(ioc->ioc_data, NULL);
++		spin_unlock(&ioc->lock);
++	} else
++		rcu_read_unlock();
+ 
+ 	if (cic->cfqq[BLK_RW_ASYNC]) {
+ 		cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
+diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
+index cf0bfc6..d814a3d 100644
+--- a/drivers/char/tty_ldisc.c
++++ b/drivers/char/tty_ldisc.c
+@@ -539,7 +539,7 @@ static int tty_ldisc_halt(struct tty_struct *tty)
+ static int tty_ldisc_wait_idle(struct tty_struct *tty)
+ {
+ 	int ret;
+-	ret = wait_event_interruptible_timeout(tty_ldisc_idle,
++	ret = wait_event_timeout(tty_ldisc_idle,
+ 			atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
+ 	if (ret < 0)
+ 		return ret;
+@@ -735,6 +735,8 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
+ 	if (IS_ERR(ld))
+ 		return -1;
+ 
++	WARN_ON_ONCE(tty_ldisc_wait_idle(tty));
++
+ 	tty_ldisc_close(tty, tty->ldisc);
+ 	tty_ldisc_put(tty->ldisc);
+ 	tty->ldisc = NULL;
+diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
+index dd39c1e..26c352a 100644
+--- a/drivers/i2c/busses/i2c-taos-evm.c
++++ b/drivers/i2c/busses/i2c-taos-evm.c
+@@ -234,7 +234,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
+ 
+ 	if (taos->state != TAOS_STATE_IDLE) {
+ 		err = -ENODEV;
+-		dev_dbg(&serio->dev, "TAOS EVM reset failed (state=%d, "
++		dev_err(&serio->dev, "TAOS EVM reset failed (state=%d, "
+ 			"pos=%d)\n", taos->state, taos->pos);
+ 		goto exit_close;
+ 	}
+@@ -255,7 +255,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
+ 					 msecs_to_jiffies(250));
+ 	if (taos->state != TAOS_STATE_IDLE) {
+ 		err = -ENODEV;
+-		dev_err(&adapter->dev, "Echo off failed "
++		dev_err(&serio->dev, "TAOS EVM echo off failed "
+ 			"(state=%d)\n", taos->state);
+ 		goto exit_close;
+ 	}
+@@ -263,7 +263,7 @@ static int taos_connect(struct serio *serio, struct serio_driver *drv)
+ 	err = i2c_add_adapter(adapter);
+ 	if (err)
+ 		goto exit_close;
+-	dev_dbg(&serio->dev, "Connected to TAOS EVM\n");
++	dev_info(&serio->dev, "Connected to TAOS EVM\n");
+ 
+ 	taos->client = taos_instantiate_device(adapter);
+ 	return 0;
+@@ -288,7 +288,7 @@ static void taos_disconnect(struct serio *serio)
+ 	serio_set_drvdata(serio, NULL);
+ 	kfree(taos);
+ 
+-	dev_dbg(&serio->dev, "Disconnected from TAOS EVM\n");
++	dev_info(&serio->dev, "Disconnected from TAOS EVM\n");
+ }
+ 
+ static struct serio_device_id taos_serio_ids[] = {
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 7c5129f..c199c70 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -6623,6 +6623,7 @@ static int remove_and_add_spares(mddev_t *mddev)
+ 		list_for_each_entry(rdev, &mddev->disks, same_set) {
+ 			if (rdev->raid_disk >= 0 &&
+ 			    !test_bit(In_sync, &rdev->flags) &&
++			    !test_bit(Faulty, &rdev->flags) &&
+ 			    !test_bit(Blocked, &rdev->flags))
+ 				spares++;
+ 			if (rdev->raid_disk < 0
+diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
+index f854698..348cb71 100644
+--- a/drivers/media/video/uvc/uvc_queue.c
++++ b/drivers/media/video/uvc/uvc_queue.c
+@@ -165,6 +165,8 @@ int uvc_free_buffers(struct uvc_video_queue *queue)
+ 	}
+ 
+ 	if (queue->count) {
++		uvc_queue_cancel(queue, 0);
++		INIT_LIST_HEAD(&queue->mainqueue);
+ 		vfree(queue->mem);
+ 		queue->count = 0;
+ 	}
+diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
+index fb58830..0e5b54b 100644
+--- a/drivers/net/hamradio/6pack.c
++++ b/drivers/net/hamradio/6pack.c
+@@ -690,10 +690,10 @@ static void sixpack_close(struct tty_struct *tty)
+ {
+ 	struct sixpack *sp;
+ 
+-	write_lock(&disc_data_lock);
++	write_lock_bh(&disc_data_lock);
+ 	sp = tty->disc_data;
+ 	tty->disc_data = NULL;
+-	write_unlock(&disc_data_lock);
++	write_unlock_bh(&disc_data_lock);
+ 	if (!sp)
+ 		return;
+ 
+diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
+index db4b7f1..6cfc2b4 100644
+--- a/drivers/net/hamradio/mkiss.c
++++ b/drivers/net/hamradio/mkiss.c
+@@ -811,10 +811,10 @@ static void mkiss_close(struct tty_struct *tty)
+ {
+ 	struct mkiss *ax;
+ 
+-	write_lock(&disc_data_lock);
++	write_lock_bh(&disc_data_lock);
+ 	ax = tty->disc_data;
+ 	tty->disc_data = NULL;
+-	write_unlock(&disc_data_lock);
++	write_unlock_bh(&disc_data_lock);
+ 
+ 	if (!ax)
+ 		return;
+diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
+index 9a96550..caed62b 100644
+--- a/drivers/net/wireless/ath/ath5k/eeprom.c
++++ b/drivers/net/wireless/ath/ath5k/eeprom.c
+@@ -1588,14 +1588,12 @@ ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
+ 		if (!chinfo[pier].pd_curves)
+ 			continue;
+ 
+-		for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
++		for (pdg = 0; pdg < AR5K_EEPROM_N_PD_CURVES; pdg++) {
+ 			struct ath5k_pdgain_info *pd =
+ 					&chinfo[pier].pd_curves[pdg];
+ 
+-			if (pd != NULL) {
+-				kfree(pd->pd_step);
+-				kfree(pd->pd_pwr);
+-			}
++			kfree(pd->pd_step);
++			kfree(pd->pd_pwr);
+ 		}
+ 
+ 		kfree(chinfo[pier].pd_curves);
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index d784a8b..3ac64a5 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1187,13 +1187,22 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
+ 		for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ 			intf = udev->actconfig->interface[i];
+ 			status = usb_suspend_interface(udev, intf, msg);
++
++			/* Ignore errors during system sleep transitions */
++			if (!(msg.event & PM_EVENT_AUTO))
++				status = 0;
+ 			if (status != 0)
+ 				break;
+ 		}
+ 	}
+-	if (status == 0)
++	if (status == 0) {
+ 		status = usb_suspend_device(udev, msg);
+ 
++		/* Again, ignore errors during system sleep transitions */
++		if (!(msg.event & PM_EVENT_AUTO))
++			status = 0;
++	}
++
+ 	/* If the suspend failed, resume interfaces that did get suspended */
+ 	if (status != 0) {
+ 		pm_message_t msg2;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 03eed28..2b428fc 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2188,6 +2188,10 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
+ 				USB_DEVICE_REMOTE_WAKEUP, 0,
+ 				NULL, 0,
+ 				USB_CTRL_SET_TIMEOUT);
++
++		/* System sleep transitions should never fail */
++		if (!(msg.event & PM_EVENT_AUTO))
++			status = 0;
+ 	} else {
+ 		/* device has up to 10 msec to fully suspend */
+ 		dev_dbg(&udev->dev, "usb %ssuspend\n",
+@@ -2427,16 +2431,15 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
+ 	struct usb_device	*hdev = hub->hdev;
+ 	unsigned		port1;
+ 
+-	/* fail if children aren't already suspended */
++	/* Warn if children aren't already suspended */
+ 	for (port1 = 1; port1 <= hdev->maxchild; port1++) {
+ 		struct usb_device	*udev;
+ 
+ 		udev = hdev->children [port1-1];
+ 		if (udev && udev->can_submit) {
+-			if (!(msg.event & PM_EVENT_AUTO))
+-				dev_dbg(&intf->dev, "port %d nyet suspended\n",
+-						port1);
+-			return -EBUSY;
++			dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
++			if (msg.event & PM_EVENT_AUTO)
++				return -EBUSY;
+ 		}
+ 	}
+ 
+diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
+index a24a92f..56661a2 100644
+--- a/drivers/usb/host/xhci-hcd.c
++++ b/drivers/usb/host/xhci-hcd.c
+@@ -979,6 +979,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ 	u32 added_ctxs;
+ 	unsigned int last_ctx;
+ 	u32 new_add_flags, new_drop_flags, new_slot_info;
++	struct xhci_virt_device *virt_dev;
+ 	int ret = 0;
+ 
+ 	ret = xhci_check_args(hcd, udev, ep, 1, __func__);
+@@ -1007,11 +1008,25 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ 		return -EINVAL;
+ 	}
+ 
+-	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
+-	out_ctx = xhci->devs[udev->slot_id]->out_ctx;
++	virt_dev = xhci->devs[udev->slot_id];
++	in_ctx = virt_dev->in_ctx;
++	out_ctx = virt_dev->out_ctx;
+ 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
+ 	ep_index = xhci_get_endpoint_index(&ep->desc);
+ 	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
++
++	/* If this endpoint is already in use, and the upper layers are trying
++	 * to add it again without dropping it, reject the addition.
++	 */
++	if (virt_dev->eps[ep_index].ring &&
++			!(le32_to_cpu(ctrl_ctx->drop_flags) &
++				xhci_get_endpoint_flag(&ep->desc))) {
++		xhci_warn(xhci, "Trying to add endpoint 0x%x "
++				"without dropping it.\n",
++				(unsigned int) ep->desc.bEndpointAddress);
++		return -EINVAL;
++	}
++
+ 	/* If the HCD has already noted the endpoint is enabled,
+ 	 * ignore this request.
+ 	 */
+@@ -1026,8 +1041,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ 	 * process context, not interrupt context (or so documenation
+ 	 * for usb_set_interface() and usb_set_configuration() claim).
+ 	 */
+-	if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
+-				udev, ep, GFP_KERNEL) < 0) {
++	if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
+ 		dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
+ 				__func__, ep->desc.bEndpointAddress);
+ 		return -ENOMEM;
+diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
+index 08e8a6a..e797a2c 100644
+--- a/drivers/watchdog/mtx-1_wdt.c
++++ b/drivers/watchdog/mtx-1_wdt.c
+@@ -211,6 +211,12 @@ static int __devinit mtx1_wdt_probe(struct platform_device *pdev)
+ 	int ret;
+ 
+ 	mtx1_wdt_device.gpio = pdev->resource[0].start;
++	ret = gpio_request_one(mtx1_wdt_device.gpio,
++				GPIOF_OUT_INIT_HIGH, "mtx1-wdt");
++	if (ret < 0) {
++		dev_err(&pdev->dev, "failed to request gpio");
++		return ret;
++	}
+ 
+ 	spin_lock_init(&mtx1_wdt_device.lock);
+ 	init_completion(&mtx1_wdt_device.stop);
+@@ -236,6 +242,8 @@ static int __devexit mtx1_wdt_remove(struct platform_device *pdev)
+ 		mtx1_wdt_device.queue = 0;
+ 		wait_for_completion(&mtx1_wdt_device.stop);
+ 	}
++
++	gpio_free(mtx1_wdt_device.gpio);
+ 	misc_deregister(&mtx1_wdt_misc);
+ 	return 0;
+ }
+diff --git a/fs/inode.c b/fs/inode.c
+index 4d8e3be..8bbe005 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -256,6 +256,20 @@ void destroy_inode(struct inode *inode)
+ 		kmem_cache_free(inode_cachep, (inode));
+ }
+ 
++void address_space_init_once(struct address_space *mapping)
++{
++	memset(mapping, 0, sizeof(*mapping));
++	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
++	spin_lock_init(&mapping->tree_lock);
++	spin_lock_init(&mapping->i_mmap_lock);
++	INIT_LIST_HEAD(&mapping->private_list);
++	spin_lock_init(&mapping->private_lock);
++	INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
++	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
++	mutex_init(&mapping->unmap_mutex);
++}
++EXPORT_SYMBOL(address_space_init_once);
++
+ /*
+  * These are initializations that only need to be done
+  * once, because the fields are idempotent across use
+@@ -267,13 +281,7 @@ void inode_init_once(struct inode *inode)
+ 	INIT_HLIST_NODE(&inode->i_hash);
+ 	INIT_LIST_HEAD(&inode->i_dentry);
+ 	INIT_LIST_HEAD(&inode->i_devices);
+-	INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
+-	spin_lock_init(&inode->i_data.tree_lock);
+-	spin_lock_init(&inode->i_data.i_mmap_lock);
+-	INIT_LIST_HEAD(&inode->i_data.private_list);
+-	spin_lock_init(&inode->i_data.private_lock);
+-	INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
+-	INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
++	address_space_init_once(&inode->i_data);
+ 	i_size_ordered_init(inode);
+ #ifdef CONFIG_INOTIFY
+ 	INIT_LIST_HEAD(&inode->inotify_watches);
+diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
+index 84c2538..8dff317 100644
+--- a/fs/nilfs2/btnode.c
++++ b/fs/nilfs2/btnode.c
+@@ -34,19 +34,6 @@
+ #include "btnode.h"
+ 
+ 
+-void nilfs_btnode_cache_init_once(struct address_space *btnc)
+-{
+-	memset(btnc, 0, sizeof(*btnc));
+-	INIT_RADIX_TREE(&btnc->page_tree, GFP_ATOMIC);
+-	spin_lock_init(&btnc->tree_lock);
+-	INIT_LIST_HEAD(&btnc->private_list);
+-	spin_lock_init(&btnc->private_lock);
+-
+-	spin_lock_init(&btnc->i_mmap_lock);
+-	INIT_RAW_PRIO_TREE_ROOT(&btnc->i_mmap);
+-	INIT_LIST_HEAD(&btnc->i_mmap_nonlinear);
+-}
+-
+ static const struct address_space_operations def_btnode_aops = {
+ 	.sync_page		= block_sync_page,
+ };
+diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
+index 3e22751..067913e 100644
+--- a/fs/nilfs2/btnode.h
++++ b/fs/nilfs2/btnode.h
+@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
+ 	struct buffer_head *newbh;
+ };
+ 
+-void nilfs_btnode_cache_init_once(struct address_space *);
+ void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
+ void nilfs_btnode_cache_clear(struct address_space *);
+ int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t,
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 63e7b10..93c11af3 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -166,7 +166,7 @@ static void init_once(void *obj)
+ #ifdef CONFIG_NILFS_XATTR
+ 	init_rwsem(&ii->xattr_sem);
+ #endif
+-	nilfs_btnode_cache_init_once(&ii->i_btnode_cache);
++	address_space_init_once(&ii->i_btnode_cache);
+ 	ii->i_bmap = (struct nilfs_bmap *)&ii->i_bmap_union;
+ 	inode_init_once(&ii->vfs_inode);
+ }
+diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
+index 4b67559..d48ddf0 100644
+--- a/include/asm-generic/bug.h
++++ b/include/asm-generic/bug.h
+@@ -50,6 +50,22 @@ struct bug_entry {
+ #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
+ #endif
+ 
++#define __WARN_RATELIMIT(condition, state, format...)		\
++({								\
++	int rtn = 0;						\
++	if (unlikely(__ratelimit(state)))			\
++		rtn = WARN(condition, format);			\
++	rtn;							\
++})
++
++#define WARN_RATELIMIT(condition, format...)			\
++({								\
++	static DEFINE_RATELIMIT_STATE(_rs,			\
++				      DEFAULT_RATELIMIT_INTERVAL,	\
++				      DEFAULT_RATELIMIT_BURST);	\
++	__WARN_RATELIMIT(condition, &_rs, format);		\
++})
++
+ /*
+  * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
+  * significant issues that need prompt attention if they should ever
+diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
+index f73bc1b..ca5c0c4 100644
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -190,6 +190,7 @@ struct clocksource {
+ #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
+ 	/* Watchdog related data, used by the framework */
+ 	struct list_head wd_list;
++	cycle_t cs_last;
+ 	cycle_t wd_last;
+ #endif
+ };
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 1ff0962..1b9a47a 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -635,6 +635,7 @@ struct address_space {
+ 	spinlock_t		private_lock;	/* for use by the address_space */
+ 	struct list_head	private_list;	/* ditto */
+ 	struct address_space	*assoc_mapping;	/* ditto */
++	struct mutex		unmap_mutex;    /* to protect unmapping */
+ } __attribute__((aligned(sizeof(long))));
+ 	/*
+ 	 * On most architectures that alignment is already the case; but
+@@ -2158,6 +2159,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
+ 
+ extern int inode_init_always(struct super_block *, struct inode *);
+ extern void inode_init_once(struct inode *);
++extern void address_space_init_once(struct address_space *mapping);
+ extern void inode_add_to_lists(struct super_block *, struct inode *);
+ extern void iput(struct inode *);
+ extern struct inode * igrab(struct inode *);
+diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
+index dea7d6b..8851a01 100644
+--- a/include/linux/if_packet.h
++++ b/include/linux/if_packet.h
+@@ -63,6 +63,7 @@ struct tpacket_auxdata
+ 	__u16		tp_mac;
+ 	__u16		tp_net;
+ 	__u16		tp_vlan_tci;
++	__u16		tp_padding;
+ };
+ 
+ /* Rx ring - header status */
+@@ -103,6 +104,7 @@ struct tpacket2_hdr
+ 	__u32		tp_sec;
+ 	__u32		tp_nsec;
+ 	__u16		tp_vlan_tci;
++	__u16		tp_padding;
+ };
+ 
+ #define TPACKET2_HDRLEN		(TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
+diff --git a/include/net/netlink.h b/include/net/netlink.h
+index a63b219..c344646 100644
+--- a/include/net/netlink.h
++++ b/include/net/netlink.h
+@@ -384,7 +384,7 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
+  *
+  * Returns the first attribute which matches the specified type.
+  */
+-static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
++static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
+ 					     int hdrlen, int attrtype)
+ {
+ 	return nla_find(nlmsg_attrdata(nlh, hdrlen),
+diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
+index a8520b0..356f487 100644
+--- a/kernel/power/snapshot.c
++++ b/kernel/power/snapshot.c
+@@ -1120,9 +1120,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
+ 	return nr_alloc;
+ }
+ 
+-static unsigned long preallocate_image_memory(unsigned long nr_pages)
++static unsigned long preallocate_image_memory(unsigned long nr_pages,
++					      unsigned long avail_normal)
+ {
+-	return preallocate_image_pages(nr_pages, GFP_IMAGE);
++	unsigned long alloc;
++
++	if (avail_normal <= alloc_normal)
++		return 0;
++
++	alloc = avail_normal - alloc_normal;
++	if (nr_pages < alloc)
++		alloc = nr_pages;
++
++	return preallocate_image_pages(alloc, GFP_IMAGE);
+ }
+ 
+ #ifdef CONFIG_HIGHMEM
+@@ -1168,15 +1178,26 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
+  */
+ static void free_unnecessary_pages(void)
+ {
+-	unsigned long save_highmem, to_free_normal, to_free_highmem;
++	unsigned long save, to_free_normal, to_free_highmem;
+ 
+-	to_free_normal = alloc_normal - count_data_pages();
+-	save_highmem = count_highmem_pages();
+-	if (alloc_highmem > save_highmem) {
+-		to_free_highmem = alloc_highmem - save_highmem;
++	save = count_data_pages();
++	if (alloc_normal >= save) {
++		to_free_normal = alloc_normal - save;
++		save = 0;
++	} else {
++		to_free_normal = 0;
++		save -= alloc_normal;
++	}
++	save += count_highmem_pages();
++	if (alloc_highmem >= save) {
++		to_free_highmem = alloc_highmem - save;
+ 	} else {
+ 		to_free_highmem = 0;
+-		to_free_normal -= save_highmem - alloc_highmem;
++		save -= alloc_highmem;
++		if (to_free_normal > save)
++			to_free_normal -= save;
++		else
++			to_free_normal = 0;
+ 	}
+ 
+ 	memory_bm_position_reset(&copy_bm);
+@@ -1257,7 +1278,7 @@ int hibernate_preallocate_memory(void)
+ {
+ 	struct zone *zone;
+ 	unsigned long saveable, size, max_size, count, highmem, pages = 0;
+-	unsigned long alloc, save_highmem, pages_highmem;
++	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
+ 	struct timeval start, stop;
+ 	int error;
+ 
+@@ -1294,6 +1315,7 @@ int hibernate_preallocate_memory(void)
+ 		else
+ 			count += zone_page_state(zone, NR_FREE_PAGES);
+ 	}
++	avail_normal = count;
+ 	count += highmem;
+ 	count -= totalreserve_pages;
+ 
+@@ -1308,12 +1330,21 @@ int hibernate_preallocate_memory(void)
+ 	 */
+ 	if (size >= saveable) {
+ 		pages = preallocate_image_highmem(save_highmem);
+-		pages += preallocate_image_memory(saveable - pages);
++		pages += preallocate_image_memory(saveable - pages, avail_normal);
+ 		goto out;
+ 	}
+ 
+ 	/* Estimate the minimum size of the image. */
+ 	pages = minimum_image_size(saveable);
++	/*
++	 * To avoid excessive pressure on the normal zone, leave room in it to
++	 * accommodate an image of the minimum size (unless it's already too
++	 * small, in which case don't preallocate pages from it at all).
++	 */
++	if (avail_normal > pages)
++		avail_normal -= pages;
++	else
++		avail_normal = 0;
+ 	if (size < pages)
+ 		size = min_t(unsigned long, pages, max_size);
+ 
+@@ -1334,16 +1365,34 @@ int hibernate_preallocate_memory(void)
+ 	 */
+ 	pages_highmem = preallocate_image_highmem(highmem / 2);
+ 	alloc = (count - max_size) - pages_highmem;
+-	pages = preallocate_image_memory(alloc);
+-	if (pages < alloc)
+-		goto err_out;
+-	size = max_size - size;
+-	alloc = size;
+-	size = preallocate_highmem_fraction(size, highmem, count);
+-	pages_highmem += size;
+-	alloc -= size;
+-	pages += preallocate_image_memory(alloc);
+-	pages += pages_highmem;
++	pages = preallocate_image_memory(alloc, avail_normal);
++	if (pages < alloc) {
++		/* We have exhausted non-highmem pages, try highmem. */
++		alloc -= pages;
++		pages += pages_highmem;
++		pages_highmem = preallocate_image_highmem(alloc);
++		if (pages_highmem < alloc)
++			goto err_out;
++		pages += pages_highmem;
++		/*
++		 * size is the desired number of saveable pages to leave in
++		 * memory, so try to preallocate (all memory - size) pages.
++		 */
++		alloc = (count - pages) - size;
++		pages += preallocate_image_highmem(alloc);
++	} else {
++		/*
++		 * There are approximately max_size saveable pages at this point
++		 * and we want to reduce this number down to size.
++		 */
++		alloc = max_size - size;
++		size = preallocate_highmem_fraction(alloc, highmem, count);
++		pages_highmem += size;
++		alloc -= size;
++		size = preallocate_image_memory(alloc, avail_normal);
++		pages_highmem += preallocate_image_highmem(alloc - size);
++		pages += pages_highmem + size;
++	}
+ 
+ 	/*
+ 	 * We only need as many page frames for the image as there are saveable
+diff --git a/kernel/power/user.c b/kernel/power/user.c
+index b135356..b15c3d7 100644
+--- a/kernel/power/user.c
++++ b/kernel/power/user.c
+@@ -113,8 +113,10 @@ static int snapshot_open(struct inode *inode, struct file *filp)
+ 		if (error)
+ 			pm_notifier_call_chain(PM_POST_RESTORE);
+ 	}
+-	if (error)
++	if (error) {
++		free_basic_memory_bitmaps();
+ 		atomic_inc(&snapshot_device_available);
++	}
+ 	data->frozen = 0;
+ 	data->ready = 0;
+ 	data->platform_support = 0;
+diff --git a/kernel/taskstats.c b/kernel/taskstats.c
+index ea8384d..b080920 100644
+--- a/kernel/taskstats.c
++++ b/kernel/taskstats.c
+@@ -293,16 +293,18 @@ ret:
+ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
+ {
+ 	struct listener_list *listeners;
+-	struct listener *s, *tmp;
++	struct listener *s, *tmp, *s2;
+ 	unsigned int cpu;
+ 
+ 	if (!cpumask_subset(mask, cpu_possible_mask))
+ 		return -EINVAL;
+ 
++	s = NULL;
+ 	if (isadd == REGISTER) {
+ 		for_each_cpu(cpu, mask) {
+-			s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
+-					 cpu_to_node(cpu));
++			if (!s)
++				s = kmalloc_node(sizeof(struct listener),
++						 GFP_KERNEL, cpu_to_node(cpu));
+ 			if (!s)
+ 				goto cleanup;
+ 			s->pid = pid;
+@@ -311,9 +313,16 @@ static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
+ 
+ 			listeners = &per_cpu(listener_array, cpu);
+ 			down_write(&listeners->sem);
++			list_for_each_entry_safe(s2, tmp, &listeners->list, list) {
++				if (s2->pid == pid)
++					goto next_cpu;
++			}
+ 			list_add(&s->list, &listeners->list);
++			s = NULL;
++next_cpu:
+ 			up_write(&listeners->sem);
+ 		}
++		kfree(s);
+ 		return 0;
+ 	}
+ 
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index bf4fe98..a02e070 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -131,7 +131,6 @@ static struct clocksource *watchdog;
+ static struct timer_list watchdog_timer;
+ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
+ static DEFINE_SPINLOCK(watchdog_lock);
+-static cycle_t watchdog_last;
+ static int watchdog_running;
+ 
+ static int clocksource_watchdog_kthread(void *data);
+@@ -200,11 +199,6 @@ static void clocksource_watchdog(unsigned long data)
+ 	if (!watchdog_running)
+ 		goto out;
+ 
+-	wdnow = watchdog->read(watchdog);
+-	wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
+-				     watchdog->mult, watchdog->shift);
+-	watchdog_last = wdnow;
+-
+ 	list_for_each_entry(cs, &watchdog_list, wd_list) {
+ 
+ 		/* Clocksource already marked unstable? */
+@@ -214,19 +208,28 @@ static void clocksource_watchdog(unsigned long data)
+ 			continue;
+ 		}
+ 
++		local_irq_disable();
+ 		csnow = cs->read(cs);
++		wdnow = watchdog->read(watchdog);
++		local_irq_enable();
+ 
+ 		/* Clocksource initialized ? */
+ 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
+ 			cs->flags |= CLOCK_SOURCE_WATCHDOG;
+-			cs->wd_last = csnow;
++			cs->wd_last = wdnow;
++			cs->cs_last = csnow;
+ 			continue;
+ 		}
+ 
+-		/* Check the deviation from the watchdog clocksource. */
+-		cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
++		wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
++					     watchdog->mult, watchdog->shift);
++
++		cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
+ 					     cs->mask, cs->mult, cs->shift);
+-		cs->wd_last = csnow;
++		cs->cs_last = csnow;
++		cs->wd_last = wdnow;
++
++		/* Check the deviation from the watchdog clocksource. */
+ 		if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
+ 			clocksource_unstable(cs, cs_nsec - wd_nsec);
+ 			continue;
+@@ -264,7 +267,6 @@ static inline void clocksource_start_watchdog(void)
+ 		return;
+ 	init_timer(&watchdog_timer);
+ 	watchdog_timer.function = clocksource_watchdog;
+-	watchdog_last = watchdog->read(watchdog);
+ 	watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
+ 	add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
+ 	watchdog_running = 1;
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index eae56fd..2b413db 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -196,7 +196,7 @@ static void free_object(struct debug_obj *obj)
+ 	 * initialized:
+ 	 */
+ 	if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
+-		sched = !work_pending(&debug_obj_work);
++		sched = keventd_up() && !work_pending(&debug_obj_work);
+ 	hlist_add_head(&obj->node, &obj_pool);
+ 	obj_pool_free++;
+ 	obj_pool_used--;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0cf9863..5e1e508 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1074,6 +1074,14 @@ static void __init gather_bootmem_prealloc(void)
+ 		WARN_ON(page_count(page) != 1);
+ 		prep_compound_huge_page(page, h->order);
+ 		prep_new_huge_page(h, page, page_to_nid(page));
++		/*
++		 * If we had gigantic hugepages allocated at boot time, we need
++		 * to restore the 'stolen' pages to totalram_pages in order to
++		 * fix confusing memory reports from free(1) and another
++		 * side-effects, like CommitLimit going negative.
++		 */
++		if (h->order > (MAX_ORDER - 1))
++			totalram_pages += 1 << h->order;
+ 	}
+ }
+ 
+diff --git a/mm/ksm.c b/mm/ksm.c
+index e9501f8..318ab79 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -1215,6 +1215,12 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
+ 		slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
+ 		ksm_scan.mm_slot = slot;
+ 		spin_unlock(&ksm_mmlist_lock);
++		/*
++		 * Although we tested list_empty() above, a racing __ksm_exit
++		 * of the last mm on the list may have removed it since then.
++		 */
++		if (slot == &ksm_mm_head)
++			return NULL;
+ next_mm:
+ 		ksm_scan.address = 0;
+ 		ksm_scan.rmap_item = list_entry(&slot->rmap_list,
+diff --git a/mm/memory.c b/mm/memory.c
+index 53c1da0..6c836d3 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2454,6 +2454,7 @@ void unmap_mapping_range(struct address_space *mapping,
+ 		details.last_index = ULONG_MAX;
+ 	details.i_mmap_lock = &mapping->i_mmap_lock;
+ 
++	mutex_lock(&mapping->unmap_mutex);
+ 	spin_lock(&mapping->i_mmap_lock);
+ 
+ 	/* Protect against endless unmapping loops */
+@@ -2470,6 +2471,7 @@ void unmap_mapping_range(struct address_space *mapping,
+ 	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
+ 		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
+ 	spin_unlock(&mapping->i_mmap_lock);
++	mutex_unlock(&mapping->unmap_mutex);
+ }
+ EXPORT_SYMBOL(unmap_mapping_range);
+ 
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 0e39f94..aaca868 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -314,7 +314,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
+ 	 */
+ 	__dec_zone_page_state(page, NR_FILE_PAGES);
+ 	__inc_zone_page_state(newpage, NR_FILE_PAGES);
+-	if (PageSwapBacked(page)) {
++	if (!PageSwapCache(page) && PageSwapBacked(page)) {
+ 		__dec_zone_page_state(page, NR_SHMEM);
+ 		__inc_zone_page_state(newpage, NR_SHMEM);
+ 	}
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 0127397..d162169 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -291,7 +291,9 @@ load_b:
+ 			mem[f_k] = X;
+ 			continue;
+ 		default:
+-			WARN_ON(1);
++			WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
++				       fentry->code, fentry->jt,
++				       fentry->jf, fentry->k);
+ 			return 0;
+ 		}
+ 
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 57737b8..a289878 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -460,6 +460,9 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	if (addr_len < sizeof(struct sockaddr_in))
+ 		goto out;
+ 
++	if (addr->sin_family != AF_INET)
++		goto out;
++
+ 	chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
+ 
+ 	/* Not specified by any standard per-se, however it breaks too
+diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
+index 6fe360f..dba56d2 100644
+--- a/net/ipv4/inet_diag.c
++++ b/net/ipv4/inet_diag.c
+@@ -436,7 +436,7 @@ static int valid_cc(const void *bc, int len, int cc)
+ 			return 0;
+ 		if (cc == len)
+ 			return 1;
+-		if (op->yes < 4)
++		if (op->yes < 4 || op->yes & 3)
+ 			return 0;
+ 		len -= op->yes;
+ 		bc  += op->yes;
+@@ -446,11 +446,11 @@ static int valid_cc(const void *bc, int len, int cc)
+ 
+ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
+ {
+-	const unsigned char *bc = bytecode;
++	const void *bc = bytecode;
+ 	int  len = bytecode_len;
+ 
+ 	while (len > 0) {
+-		struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
++		const struct inet_diag_bc_op *op = bc;
+ 
+ //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
+ 		switch (op->code) {
+@@ -461,22 +461,20 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
+ 		case INET_DIAG_BC_S_LE:
+ 		case INET_DIAG_BC_D_GE:
+ 		case INET_DIAG_BC_D_LE:
+-			if (op->yes < 4 || op->yes > len + 4)
+-				return -EINVAL;
+ 		case INET_DIAG_BC_JMP:
+-			if (op->no < 4 || op->no > len + 4)
++			if (op->no < 4 || op->no > len + 4 || op->no & 3)
+ 				return -EINVAL;
+ 			if (op->no < len &&
+ 			    !valid_cc(bytecode, bytecode_len, len - op->no))
+ 				return -EINVAL;
+ 			break;
+ 		case INET_DIAG_BC_NOP:
+-			if (op->yes < 4 || op->yes > len + 4)
+-				return -EINVAL;
+ 			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
++		if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
++			return -EINVAL;
+ 		bc  += op->yes;
+ 		len -= op->yes;
+ 	}
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 31db78c..0ac8833 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1011,6 +1011,9 @@ csum_copy_err:
+ 
+ 	if (noblock)
+ 		return -EAGAIN;
++
++	/* starting over for a new packet */
++	msg->msg_flags &= ~MSG_TRUNC;
+ 	goto try_again;
+ }
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index cf538ed..ca520d4 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -304,8 +304,11 @@ csum_copy_err:
+ 	}
+ 	release_sock(sk);
+ 
+-	if (flags & MSG_DONTWAIT)
++	if (noblock)
+ 		return -EAGAIN;
++
++	/* starting over for a new packet */
++	msg->msg_flags &= ~MSG_TRUNC;
+ 	goto try_again;
+ }
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 9d9b78e..35cfa79 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -767,6 +767,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ 		h.h2->tp_sec = ts.tv_sec;
+ 		h.h2->tp_nsec = ts.tv_nsec;
+ 		h.h2->tp_vlan_tci = skb->vlan_tci;
++		h.h2->tp_padding = 0;
+ 		hdrlen = sizeof(*h.h2);
+ 		break;
+ 	default:
+@@ -1499,6 +1500,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 		aux.tp_net = skb_network_offset(skb);
+ 		aux.tp_vlan_tci = skb->vlan_tci;
+ 
++		aux.tp_padding = 0;
+ 		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
+ 	}
+ 
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index 4c32700..8f4fe73 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -548,13 +548,13 @@ retry:
+ 	}
+ 	inode = &gss_msg->inode->vfs_inode;
+ 	for (;;) {
+-		prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
++		prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
+ 		spin_lock(&inode->i_lock);
+ 		if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
+ 			break;
+ 		}
+ 		spin_unlock(&inode->i_lock);
+-		if (signalled()) {
++		if (fatal_signal_pending(current)) {
+ 			err = -ERESTARTSYS;
+ 			goto out_intr;
+ 		}
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 38829e2..d838bea 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -938,7 +938,7 @@ call_allocate(struct rpc_task *task)
+ 
+ 	dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
+ 
+-	if (RPC_IS_ASYNC(task) || !signalled()) {
++	if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
+ 		task->tk_action = call_allocate;
+ 		rpc_delay(task, HZ>>4);
+ 		return;

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.44.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.44.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.44.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.44.patch)
@@ -0,0 +1,1457 @@
+diff --git a/Makefile b/Makefile
+index 26f57ee..f035471 100644
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index 62619f2..a94e49c 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -431,7 +431,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
+ 		return -EFAULT;
+ 
+ 	len = namelen;
+-	if (namelen > 32)
++	if (len > 32)
+ 		len = 32;
+ 
+ 	down_read(&uts_sem);
+@@ -618,7 +618,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
+ 	down_read(&uts_sem);
+ 	res = sysinfo_table[offset];
+ 	len = strlen(res)+1;
+-	if (len > count)
++	if ((unsigned long)len > (unsigned long)count)
+ 		len = count;
+ 	if (copy_to_user(buf, res, len))
+ 		err = -EFAULT;
+@@ -673,7 +673,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
+ 		return 1;
+ 
+ 	case GSI_GET_HWRPB:
+-		if (nbytes < sizeof(*hwrpb))
++		if (nbytes > sizeof(*hwrpb))
+ 			return -EINVAL;
+ 		if (copy_to_user(buffer, hwrpb, nbytes) != 0)
+ 			return -EFAULT;
+@@ -1035,6 +1035,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
+ {
+ 	struct rusage r;
+ 	long ret, err;
++	unsigned int status = 0;
+ 	mm_segment_t old_fs;
+ 
+ 	if (!ur)
+@@ -1043,13 +1044,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
+ 	old_fs = get_fs();
+ 		
+ 	set_fs (KERNEL_DS);
+-	ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
++	ret = sys_wait4(pid, (unsigned int __user *) &status, options,
++			(struct rusage __user *) &r);
+ 	set_fs (old_fs);
+ 
+ 	if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
+ 		return -EFAULT;
+ 
+ 	err = 0;
++	err |= put_user(status, ustatus);
+ 	err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
+ 	err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
+ 	err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
+diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
+index 52dd804..8904495 100644
+--- a/arch/arm/mach-davinci/board-dm365-evm.c
++++ b/arch/arm/mach-davinci/board-dm365-evm.c
+@@ -413,7 +413,7 @@ fail:
+ 	 */
+ 	if (have_imager()) {
+ 		label = "HD imager";
+-		mux |= 1;
++		mux |= 2;
+ 
+ 		/* externally mux MMC1/ENET/AIC33 to imager */
+ 		mux |= BIT(6) | BIT(5) | BIT(3);
+@@ -434,7 +434,7 @@ fail:
+ 		resets &= ~BIT(1);
+ 
+ 		if (have_tvp7002()) {
+-			mux |= 2;
++			mux |= 1;
+ 			resets &= ~BIT(2);
+ 			label = "tvp7002 HD";
+ 		} else {
+diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
+index 102916f..b6bfe7e 100644
+--- a/arch/arm/mach-pxa/cm-x300.c
++++ b/arch/arm/mach-pxa/cm-x300.c
+@@ -143,10 +143,10 @@ static mfp_cfg_t cm_x300_mfp_cfg[] __initdata = {
+ 	GPIO99_GPIO,			/* Ethernet IRQ */
+ 
+ 	/* RTC GPIOs */
+-	GPIO95_GPIO,			/* RTC CS */
+-	GPIO96_GPIO,			/* RTC WR */
+-	GPIO97_GPIO,			/* RTC RD */
+-	GPIO98_GPIO,			/* RTC IO */
++	GPIO95_GPIO | MFP_LPM_DRIVE_HIGH,	/* RTC CS */
++	GPIO96_GPIO | MFP_LPM_DRIVE_HIGH,	/* RTC WR */
++	GPIO97_GPIO | MFP_LPM_DRIVE_HIGH,	/* RTC RD */
++	GPIO98_GPIO,				/* RTC IO */
+ 
+ 	/* Standard I2C */
+ 	GPIO21_I2C_SCL,
+diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
+index 5009198..d374974 100644
+--- a/arch/powerpc/kernel/crash.c
++++ b/arch/powerpc/kernel/crash.c
+@@ -176,12 +176,8 @@ static void crash_kexec_wait_realmode(int cpu)
+ 
+ 		while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
+ 			barrier();
+-			if (!cpu_possible(i)) {
++			if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
+ 				break;
+-			}
+-			if (!cpu_online(i)) {
+-				break;
+-			}
+ 			msecs--;
+ 			mdelay(1);
+ 		}
+diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
+index 3f6a89b..041e87c 100644
+--- a/arch/powerpc/platforms/pseries/hvconsole.c
++++ b/arch/powerpc/platforms/pseries/hvconsole.c
+@@ -73,7 +73,7 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
+ 	if (ret == H_SUCCESS)
+ 		return count;
+ 	if (ret == H_BUSY)
+-		return 0;
++		return -EAGAIN;
+ 	return -EIO;
+ }
+ 
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index 69cab24..987ef29 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -27,6 +27,9 @@
+ #define HPET_DEV_FSB_CAP		0x1000
+ #define HPET_DEV_PERI_CAP		0x2000
+ 
++#define HPET_MIN_CYCLES			128
++#define HPET_MIN_PROG_DELTA		(HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
++
+ #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
+ 
+ /*
+@@ -298,8 +301,9 @@ static void hpet_legacy_clockevent_register(void)
+ 	/* Calculate the min / max delta */
+ 	hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
+ 							   &hpet_clockevent);
+-	/* 5 usec minimum reprogramming delta. */
+-	hpet_clockevent.min_delta_ns = 5000;
++	/* Setup minimum reprogramming delta. */
++	hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA,
++							   &hpet_clockevent);
+ 
+ 	/*
+ 	 * Start hpet with the boot cpu mask and make it
+@@ -379,36 +383,37 @@ static int hpet_next_event(unsigned long delta,
+ 			   struct clock_event_device *evt, int timer)
+ {
+ 	u32 cnt;
++	s32 res;
+ 
+ 	cnt = hpet_readl(HPET_COUNTER);
+ 	cnt += (u32) delta;
+ 	hpet_writel(cnt, HPET_Tn_CMP(timer));
+ 
+ 	/*
+-	 * We need to read back the CMP register on certain HPET
+-	 * implementations (ATI chipsets) which seem to delay the
+-	 * transfer of the compare register into the internal compare
+-	 * logic. With small deltas this might actually be too late as
+-	 * the counter could already be higher than the compare value
+-	 * at that point and we would wait for the next hpet interrupt
+-	 * forever. We found out that reading the CMP register back
+-	 * forces the transfer so we can rely on the comparison with
+-	 * the counter register below. If the read back from the
+-	 * compare register does not match the value we programmed
+-	 * then we might have a real hardware problem. We can not do
+-	 * much about it here, but at least alert the user/admin with
+-	 * a prominent warning.
+-	 * An erratum on some chipsets (ICH9,..), results in comparator read
+-	 * immediately following a write returning old value. Workaround
+-	 * for this is to read this value second time, when first
+-	 * read returns old value.
++	 * HPETs are a complete disaster. The compare register is
++	 * based on a equal comparison and neither provides a less
++	 * than or equal functionality (which would require to take
++	 * the wraparound into account) nor a simple count down event
++	 * mode. Further the write to the comparator register is
++	 * delayed internally up to two HPET clock cycles in certain
++	 * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
++	 * longer delays. We worked around that by reading back the
++	 * compare register, but that required another workaround for
++	 * ICH9,10 chips where the first readout after write can
++	 * return the old stale value. We already had a minimum
++	 * programming delta of 5us enforced, but a NMI or SMI hitting
++	 * between the counter readout and the comparator write can
++	 * move us behind that point easily. Now instead of reading
++	 * the compare register back several times, we make the ETIME
++	 * decision based on the following: Return ETIME if the
++	 * counter value after the write is less than HPET_MIN_CYCLES
++	 * away from the event or if the counter is already ahead of
++	 * the event. The minimum programming delta for the generic
++	 * clockevents code is set to 1.5 * HPET_MIN_CYCLES.
+ 	 */
+-	if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) {
+-		WARN_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt,
+-		  KERN_WARNING "hpet: compare register read back failed.\n");
+-	}
++	res = (s32)(cnt - (u32)hpet_readl(HPET_COUNTER));
+ 
+-	return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
++	return res < HPET_MIN_CYCLES ? -ETIME : 0;
+ }
+ 
+ static void hpet_legacy_set_mode(enum clock_event_mode mode,
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 200fcde..cf98100 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -469,6 +469,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+ 		},
+ 	},
++	{	/* Handle problems with rebooting on the Latitude E5420. */
++		.callback = set_pci_reboot,
++		.ident = "Dell Latitude E5420",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
+index 4123553..36818f8 100644
+--- a/arch/x86/kernel/relocate_kernel_32.S
++++ b/arch/x86/kernel/relocate_kernel_32.S
+@@ -97,6 +97,8 @@ relocate_kernel:
+ 	ret
+ 
+ identity_mapped:
++	/* set return address to 0 if not preserving context */
++	pushl	$0
+ 	/* store the start address on the stack */
+ 	pushl   %edx
+ 
+diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
+index 4de8f5b..7a6f3b3 100644
+--- a/arch/x86/kernel/relocate_kernel_64.S
++++ b/arch/x86/kernel/relocate_kernel_64.S
+@@ -100,6 +100,8 @@ relocate_kernel:
+ 	ret
+ 
+ identity_mapped:
++	/* set return address to 0 if not preserving context */
++	pushq	$0
+ 	/* store the start address on the stack */
+ 	pushq   %rdx
+ 
+diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
+index 9486882..885ebe7 100644
+--- a/arch/xtensa/kernel/ptrace.c
++++ b/arch/xtensa/kernel/ptrace.c
+@@ -136,6 +136,9 @@ int ptrace_setxregs(struct task_struct *child, void __user *uregs)
+ 	elf_xtregs_t *xtregs = uregs;
+ 	int ret = 0;
+ 
++	if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
++		return -EFAULT;
++
+ #if XTENSA_HAVE_COPROCESSORS
+ 	/* Flush all coprocessors before we overwrite them. */
+ 	coprocessor_flush_all(ti);
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index fa9bed0..e5bdb9b 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2707,10 +2707,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
+ 	}
+ 
+ 	/*
+-	 * Some controllers can't be frozen very well and may set
+-	 * spuruious error conditions during reset.  Clear accumulated
+-	 * error information.  As reset is the final recovery action,
+-	 * nothing is lost by doing this.
++	 * Some controllers can't be frozen very well and may set spurious
++	 * error conditions during reset.  Clear accumulated error
++	 * information and re-thaw the port if frozen.  As reset is the
++	 * final recovery action and we cross check link onlineness against
++	 * device classification later, no hotplug event is lost by this.
+ 	 */
+ 	spin_lock_irqsave(link->ap->lock, flags);
+ 	memset(&link->eh_info, 0, sizeof(link->eh_info));
+@@ -2719,6 +2720,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
+ 	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ 	spin_unlock_irqrestore(link->ap->lock, flags);
+ 
++	if (ap->pflags & ATA_PFLAG_FROZEN)
++		ata_eh_thaw_port(ap);
++
+ 	/*
+ 	 * Make sure onlineness and classification result correspond.
+ 	 * Hotplug could have happened during reset and some
+diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
+index 5ae1b1c..04d6bf8 100644
+--- a/drivers/block/cciss.h
++++ b/drivers/block/cciss.h
+@@ -165,7 +165,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
+ 	 printk("Sending %x - down to controller\n", c->busaddr );
+ #endif /* CCISS_DEBUG */ 
+          writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+-	readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
++	readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+ 	 h->commands_outstanding++;
+ 	 if ( h->commands_outstanding > h->max_outstanding)
+ 		h->max_outstanding = h->commands_outstanding;
+diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
+index 9ac4972..5b6001b 100644
+--- a/drivers/hwmon/max1111.c
++++ b/drivers/hwmon/max1111.c
+@@ -39,6 +39,8 @@ struct max1111_data {
+ 	struct spi_transfer	xfer[2];
+ 	uint8_t *tx_buf;
+ 	uint8_t *rx_buf;
++	struct mutex		drvdata_lock;
++	/* protect msg, xfer and buffers from multiple access */
+ };
+ 
+ static int max1111_read(struct device *dev, int channel)
+@@ -47,6 +49,9 @@ static int max1111_read(struct device *dev, int channel)
+ 	uint8_t v1, v2;
+ 	int err;
+ 
++	/* writing to drvdata struct is not thread safe, wait on mutex */
++	mutex_lock(&data->drvdata_lock);
++
+ 	data->tx_buf[0] = (channel << MAX1111_CTRL_SEL_SH) |
+ 		MAX1111_CTRL_PD0 | MAX1111_CTRL_PD1 |
+ 		MAX1111_CTRL_SGL | MAX1111_CTRL_UNI | MAX1111_CTRL_STR;
+@@ -54,12 +59,15 @@ static int max1111_read(struct device *dev, int channel)
+ 	err = spi_sync(data->spi, &data->msg);
+ 	if (err < 0) {
+ 		dev_err(dev, "spi_sync failed with %d\n", err);
++		mutex_unlock(&data->drvdata_lock);
+ 		return err;
+ 	}
+ 
+ 	v1 = data->rx_buf[0];
+ 	v2 = data->rx_buf[1];
+ 
++	mutex_unlock(&data->drvdata_lock);
++
+ 	if ((v1 & 0xc0) || (v2 & 0x3f))
+ 		return -EINVAL;
+ 
+@@ -175,6 +183,8 @@ static int __devinit max1111_probe(struct spi_device *spi)
+ 	if (err)
+ 		goto err_free_data;
+ 
++	mutex_init(&data->drvdata_lock);
++
+ 	data->spi = spi;
+ 	spi_set_drvdata(spi, data);
+ 
+@@ -212,6 +222,7 @@ static int __devexit max1111_remove(struct spi_device *spi)
+ 
+ 	hwmon_device_unregister(data->hwmon_dev);
+ 	sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
++	mutex_destroy(&data->drvdata_lock);
+ 	kfree(data->rx_buf);
+ 	kfree(data->tx_buf);
+ 	kfree(data);
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index fcf717c..b03cd39 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -778,6 +778,11 @@ static int parse_features(struct arg_set *as, struct multipath *m)
+ 	if (!argc)
+ 		return 0;
+ 
++	if (argc > as->argc) {
++		ti->error = "not enough arguments for features";
++		return -EINVAL;
++	}
++
+ 	do {
+ 		param_name = shift(as);
+ 		argc--;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index d186687..c988ac2 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -36,6 +36,8 @@ static const char *_name = DM_NAME;
+ static unsigned int major = 0;
+ static unsigned int _major = 0;
+ 
++static DEFINE_IDR(_minor_idr);
++
+ static DEFINE_SPINLOCK(_minor_lock);
+ /*
+  * For bio-based dm.
+@@ -315,6 +317,12 @@ static void __exit dm_exit(void)
+ 
+ 	while (i--)
+ 		_exits[i]();
++
++	/*
++	 * Should be empty by this point.
++	 */
++	idr_remove_all(&_minor_idr);
++	idr_destroy(&_minor_idr);
+ }
+ 
+ /*
+@@ -1663,8 +1671,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
+ /*-----------------------------------------------------------------
+  * An IDR is used to keep track of allocated minor numbers.
+  *---------------------------------------------------------------*/
+-static DEFINE_IDR(_minor_idr);
+-
+ static void free_minor(int minor)
+ {
+ 	spin_lock(&_minor_lock);
+diff --git a/drivers/media/radio/si4713-i2c.c b/drivers/media/radio/si4713-i2c.c
+index 6a0028e..93653c6 100644
+--- a/drivers/media/radio/si4713-i2c.c
++++ b/drivers/media/radio/si4713-i2c.c
+@@ -1003,7 +1003,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
+ 		char ps_name[MAX_RDS_PS_NAME + 1];
+ 
+ 		len = control->size - 1;
+-		if (len > MAX_RDS_PS_NAME) {
++		if (len < 0 || len > MAX_RDS_PS_NAME) {
+ 			rval = -ERANGE;
+ 			goto exit;
+ 		}
+@@ -1025,7 +1025,7 @@ static int si4713_write_econtrol_string(struct si4713_device *sdev,
+ 		char radio_text[MAX_RDS_RADIO_TEXT + 1];
+ 
+ 		len = control->size - 1;
+-		if (len > MAX_RDS_RADIO_TEXT) {
++		if (len < 0 || len > MAX_RDS_RADIO_TEXT) {
+ 			rval = -ERANGE;
+ 			goto exit;
+ 		}
+diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
+index d258ed7..f0e9c75 100644
+--- a/drivers/media/video/bt8xx/bttv-driver.c
++++ b/drivers/media/video/bt8xx/bttv-driver.c
+@@ -3532,7 +3532,7 @@ static int radio_s_tuner(struct file *file, void *priv,
+ 	if (0 != t->index)
+ 		return -EINVAL;
+ 
+-	bttv_call_all(btv, tuner, g_tuner, t);
++	bttv_call_all(btv, tuner, s_tuner, t);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+index 13639b3..5891e30 100644
+--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
++++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+@@ -2979,6 +2979,8 @@ static void pvr2_subdev_update(struct pvr2_hdw *hdw)
+ 	if (hdw->input_dirty || hdw->audiomode_dirty || hdw->force_dirty) {
+ 		struct v4l2_tuner vt;
+ 		memset(&vt, 0, sizeof(vt));
++		vt.type = (hdw->input_val == PVR2_CVAL_INPUT_RADIO) ?
++			V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ 		vt.audmode = hdw->audiomode_val;
+ 		v4l2_device_call_all(&hdw->v4l2_dev, 0, tuner, s_tuner, &vt);
+ 	}
+@@ -5064,6 +5066,8 @@ void pvr2_hdw_status_poll(struct pvr2_hdw *hdw)
+ {
+ 	struct v4l2_tuner *vtp = &hdw->tuner_signal_info;
+ 	memset(vtp, 0, sizeof(*vtp));
++	vtp->type = (hdw->input_val == PVR2_CVAL_INPUT_RADIO) ?
++		V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ 	hdw->tuner_signal_stale = 0;
+ 	/* Note: There apparently is no replacement for VIDIOC_CROPCAP
+ 	   using v4l2-subdev - therefore we can't support that AT ALL right
+diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
+index 30cc334..265bfb5 100644
+--- a/drivers/media/video/v4l2-ioctl.c
++++ b/drivers/media/video/v4l2-ioctl.c
+@@ -1600,6 +1600,8 @@ static long __video_do_ioctl(struct file *file,
+ 		if (!ops->vidioc_g_tuner)
+ 			break;
+ 
++		p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
++			V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ 		ret = ops->vidioc_g_tuner(file, fh, p);
+ 		if (!ret)
+ 			dbgarg(cmd, "index=%d, name=%s, type=%d, "
+@@ -1618,6 +1620,8 @@ static long __video_do_ioctl(struct file *file,
+ 
+ 		if (!ops->vidioc_s_tuner)
+ 			break;
++		p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
++			V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ 		dbgarg(cmd, "index=%d, name=%s, type=%d, "
+ 				"capability=0x%x, rangelow=%d, "
+ 				"rangehigh=%d, signal=%d, afc=%d, "
+@@ -1636,6 +1640,8 @@ static long __video_do_ioctl(struct file *file,
+ 		if (!ops->vidioc_g_frequency)
+ 			break;
+ 
++		p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
++			V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ 		ret = ops->vidioc_g_frequency(file, fh, p);
+ 		if (!ret)
+ 			dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
+diff --git a/drivers/net/jme.c b/drivers/net/jme.c
+index a893f45..0fbca76 100644
+--- a/drivers/net/jme.c
++++ b/drivers/net/jme.c
+@@ -681,20 +681,28 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i)
+ 	struct jme_ring *rxring = &(jme->rxring[0]);
+ 	struct jme_buffer_info *rxbi = rxring->bufinf + i;
+ 	struct sk_buff *skb;
++	dma_addr_t mapping;
+ 
+ 	skb = netdev_alloc_skb(jme->dev,
+ 		jme->dev->mtu + RX_EXTRA_LEN);
+ 	if (unlikely(!skb))
+ 		return -ENOMEM;
+ 
++	mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
++			       offset_in_page(skb->data), skb_tailroom(skb),
++			       PCI_DMA_FROMDEVICE);
++	if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
++		dev_kfree_skb(skb);
++		return -ENOMEM;
++	}
++
++	if (likely(rxbi->mapping))
++		pci_unmap_page(jme->pdev, rxbi->mapping,
++			       rxbi->len, PCI_DMA_FROMDEVICE);
++
+ 	rxbi->skb = skb;
+ 	rxbi->len = skb_tailroom(skb);
+-	rxbi->mapping = pci_map_page(jme->pdev,
+-					virt_to_page(skb->data),
+-					offset_in_page(skb->data),
+-					rxbi->len,
+-					PCI_DMA_FROMDEVICE);
+-
++	rxbi->mapping = mapping;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 0d3326d..6f8352c 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1507,7 +1507,7 @@ void pci_enable_ari(struct pci_dev *dev)
+ {
+ 	int pos;
+ 	u32 cap;
+-	u16 ctrl;
++	u16 flags, ctrl;
+ 	struct pci_dev *bridge;
+ 
+ 	if (!dev->is_pcie || dev->devfn)
+@@ -1525,6 +1525,11 @@ void pci_enable_ari(struct pci_dev *dev)
+ 	if (!pos)
+ 		return;
+ 
++	/* ARI is a PCIe v2 feature */
++	pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
++	if ((flags & PCI_EXP_FLAGS_VERS) < 2)
++		return;
++
+ 	pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
+ 	if (!(cap & PCI_EXP_DEVCAP2_ARI))
+ 		return;
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index 33cf988..4f43306 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -840,6 +840,9 @@ static struct domain_device *sas_ex_discover_expander(
+ 
+ 	res = sas_discover_expander(child);
+ 	if (res) {
++		spin_lock_irq(&parent->port->dev_list_lock);
++		list_del(&child->dev_list_node);
++		spin_unlock_irq(&parent->port->dev_list_lock);
+ 		kfree(child);
+ 		return NULL;
+ 	}
+diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
+index 483370f..9ab8c86 100644
+--- a/drivers/scsi/pmcraid.c
++++ b/drivers/scsi/pmcraid.c
+@@ -3557,6 +3557,9 @@ static long pmcraid_ioctl_passthrough(
+ 			pmcraid_err("couldn't build passthrough ioadls\n");
+ 			goto out_free_buffer;
+ 		}
++	} else if (request_size < 0) {
++		rc = -EINVAL;
++		goto out_free_buffer;
+ 	}
+ 
+ 	/* If data is being written into the device, copy the data from user
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 802e91c..ae0ae2d 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -196,6 +196,7 @@ static struct {
+ 	{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ 	{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
+ 	{"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
++	{"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
+ 	{"IOMEGA", "Io20S         *F", NULL, BLIST_KEY},
+ 	{"INSITE", "Floptical   F*8I", NULL, BLIST_KEY},
+ 	{"INSITE", "I325VM", NULL, BLIST_KEY},
+@@ -242,6 +243,7 @@ static struct {
+ 	{"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
+ 	{"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
+ 	{"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
++	{"Traxdata", "CDR4120", NULL, BLIST_NOLUN},	/* locks up */
+ 	{"USB2.0", "SMARTMEDIA/XD", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36},
+ 	{"WangDAT", "Model 2600", "01.7", BLIST_SELECT_NO_ATN},
+ 	{"WangDAT", "Model 3200", "02.2", BLIST_SELECT_NO_ATN},
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index 3b082dd..340124d 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -157,6 +157,10 @@ static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
+ 	return NULL;
+ }
+ 
++/* For device slot and array device slot elements, byte 3 bit 6
++ * is "fault sensed" while byte 3 bit 5 is "fault reqstd". As this
++ * code stands these bits are shifted 4 positions right so in
++ * sysfs they will appear as bits 2 and 1 respectively. Strange. */
+ static void ses_get_fault(struct enclosure_device *edev,
+ 			  struct enclosure_component *ecomp)
+ {
+@@ -178,7 +182,7 @@ static int ses_set_fault(struct enclosure_device *edev,
+ 		/* zero is disabled */
+ 		break;
+ 	case ENCLOSURE_SETTING_ENABLED:
+-		desc[2] = 0x02;
++		desc[3] = 0x20;
+ 		break;
+ 	default:
+ 		/* SES doesn't do the SGPIO blink settings */
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index aaad76e..80a1071 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -367,8 +367,8 @@ static int do_devinfo_ioctl(struct comedi_device *dev,
+ 	/* fill devinfo structure */
+ 	devinfo.version_code = COMEDI_VERSION_CODE;
+ 	devinfo.n_subdevs = dev->n_subdevices;
+-	memcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
+-	memcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
++	strlcpy(devinfo.driver_name, dev->driver->driver_name, COMEDI_NAMELEN);
++	strlcpy(devinfo.board_name, dev->board_name, COMEDI_NAMELEN);
+ 
+ 	if (read_subdev)
+ 		devinfo.read_subdevice = read_subdev - dev->subdevices;
+diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
+index 5e09664..7adb671 100644
+--- a/drivers/usb/gadget/dummy_hcd.c
++++ b/drivers/usb/gadget/dummy_hcd.c
+@@ -1886,6 +1886,7 @@ static int dummy_hcd_probe(struct platform_device *pdev)
+ 	if (!hcd)
+ 		return -ENOMEM;
+ 	the_controller = hcd_to_dummy (hcd);
++	hcd->has_tt = 1;
+ 
+ 	retval = usb_add_hcd(hcd, 0, 0);
+ 	if (retval != 0) {
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 6ac3976..1bcf6ee 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -758,10 +758,11 @@ static int ehci_hub_control (
+ 			 * power switching; they're allowed to just limit the
+ 			 * current.  khubd will turn the power back on.
+ 			 */
+-			if (HCS_PPC (ehci->hcs_params)){
++			if ((temp & PORT_OC) && HCS_PPC(ehci->hcs_params)) {
+ 				ehci_writel(ehci,
+ 					temp & ~(PORT_RWC_BITS | PORT_POWER),
+ 					status_reg);
++				temp = ehci_readl(ehci, status_reg);
+ 			}
+ 		}
+ 
+diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
+index f51345f..0ee5b4b 100644
+--- a/drivers/usb/host/ehci-q.c
++++ b/drivers/usb/host/ehci-q.c
+@@ -103,7 +103,7 @@ qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
+ 	if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
+ 		unsigned	is_out, epnum;
+ 
+-		is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
++		is_out = qh->is_out;
+ 		epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
+ 		if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
+ 			hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
+@@ -923,6 +923,7 @@ done:
+ 	hw = qh->hw;
+ 	hw->hw_info1 = cpu_to_hc32(ehci, info1);
+ 	hw->hw_info2 = cpu_to_hc32(ehci, info2);
++	qh->is_out = !is_input;
+ 	usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
+ 	qh_refresh (ehci, qh);
+ 	return qh;
+diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
+index ac321ef..5b3ca74 100644
+--- a/drivers/usb/host/ehci.h
++++ b/drivers/usb/host/ehci.h
+@@ -366,6 +366,7 @@ struct ehci_qh {
+ #define NO_FRAME ((unsigned short)~0)			/* pick new start */
+ 
+ 	struct usb_device	*dev;		/* access to TT */
++	unsigned		is_out:1;	/* bulk or intr OUT */
+ 	unsigned		clearing_tt:1;	/* Clear-TT-Buf in progress */
+ };
+ 
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 464ed97..bcf7a88 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -34,6 +34,8 @@
+ #define OHCI_INTRSTATUS		0x0c
+ #define OHCI_INTRENABLE		0x10
+ #define OHCI_INTRDISABLE	0x14
++#define OHCI_FMINTERVAL		0x34
++#define OHCI_HCR		(1 << 0)	/* host controller reset */
+ #define OHCI_OCR		(1 << 3)	/* ownership change request */
+ #define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
+ #define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
+@@ -204,6 +206,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
+ 
+ 	/* reset controller, preserving RWC (and possibly IR) */
+ 	writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
++	readl(base + OHCI_CONTROL);
++
++	/* Some NVIDIA controllers stop working if kept in RESET for too long */
++	if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
++		u32 fminterval;
++		int cnt;
++
++		/* drive reset for at least 50 ms (7.1.7.5) */
++		msleep(50);
++
++		/* software reset of the controller, preserving HcFmInterval */
++		fminterval = readl(base + OHCI_FMINTERVAL);
++		writel(OHCI_HCR, base + OHCI_CMDSTATUS);
++
++		/* reset requires max 10 us delay */
++		for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
++			if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
++				break;
++			udelay(1);
++		}
++		writel(fminterval, base + OHCI_FMINTERVAL);
++
++		/* Now we're in the SUSPEND state with all devices reset
++		 * and wakeups and interrupts disabled
++		 */
++	}
+ 
+ 	/*
+ 	 * disable interrupts
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 24212be..b9afd6a 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -1634,6 +1634,7 @@ void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
+ 			}
+ 		}
+ 	}
++	musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
+ }
+ 
+ #else
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index b336017..4a18fd2 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -100,6 +100,8 @@ static struct usb_device_id id_table [] = {
+ 	{ USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
+ 	{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
+ 	{ USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
++	{ USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
++	{ USB_DEVICE(WINCHIPHEAD_VENDOR_ID, WINCHIPHEAD_USBSER_PRODUCT_ID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 4d043e4..ca0d237 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -5,7 +5,7 @@
+  *	it under the terms of the GNU General Public License as published by
+  *	the Free Software Foundation; either version 2 of the License, or
+  *	(at your option) any later version.
+- * 
++ *
+  */
+ 
+ #define BENQ_VENDOR_ID			0x04a5
+@@ -140,3 +140,11 @@
+ /* Sanwa KB-USB2 multimeter cable (ID: 11ad:0001) */
+ #define SANWA_VENDOR_ID		0x11ad
+ #define SANWA_PRODUCT_ID	0x0001
++
++/* ADLINK ND-6530 RS232,RS485 and RS422 adapter */
++#define ADLINK_VENDOR_ID		0x0b63
++#define ADLINK_ND6530_PRODUCT_ID	0x6530
++
++/* WinChipHead USB->RS 232 adapter */
++#define WINCHIPHEAD_VENDOR_ID		0x4348
++#define WINCHIPHEAD_USBSER_PRODUCT_ID	0x5523
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 16cea86..e65efa2 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1203,7 +1203,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ 			if (!bdev->bd_part)
+ 				goto out_clear;
+ 
+-			ret = 0;
+ 			if (disk->fops->open) {
+ 				ret = disk->fops->open(bdev, mode);
+ 				if (ret == -ERESTARTSYS) {
+@@ -1219,18 +1218,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ 					mutex_unlock(&bdev->bd_mutex);
+ 					goto restart;
+ 				}
++				if (ret)
++					goto out_clear;
+ 			}
+-			/*
+-			 * If the device is invalidated, rescan partition
+-			 * if open succeeded or failed with -ENOMEDIUM.
+-			 * The latter is necessary to prevent ghost
+-			 * partitions on a removed medium.
+-			 */
+-			if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
+-				rescan_partitions(disk, bdev);
+-			if (ret)
+-				goto out_clear;
+-
+ 			if (!bdev->bd_openers) {
+ 				bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
+ 				bdi = blk_get_backing_dev_info(bdev);
+@@ -1238,6 +1228,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ 					bdi = &default_backing_dev_info;
+ 				bdev->bd_inode->i_data.backing_dev_info = bdi;
+ 			}
++			if (bdev->bd_invalidated)
++				rescan_partitions(disk, bdev);
+ 		} else {
+ 			struct block_device *whole;
+ 			whole = bdget_disk(disk, 0);
+@@ -1264,14 +1256,13 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ 		put_disk(disk);
+ 		disk = NULL;
+ 		if (bdev->bd_contains == bdev) {
+-			ret = 0;
+-			if (bdev->bd_disk->fops->open)
++			if (bdev->bd_disk->fops->open) {
+ 				ret = bdev->bd_disk->fops->open(bdev, mode);
+-			/* the same as first opener case, read comment there */
+-			if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
++				if (ret)
++					goto out_unlock_bdev;
++			}
++			if (bdev->bd_invalidated)
+ 				rescan_partitions(bdev->bd_disk, bdev);
+-			if (ret)
+-				goto out_unlock_bdev;
+ 		}
+ 	}
+ 	bdev->bd_openers++;
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index c4dbc63..e29581e 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -33,7 +33,7 @@
+ #define MAX_SHARE_SIZE  64	/* used to be 20, this should still be enough */
+ #define MAX_USERNAME_SIZE 32	/* 32 is to allow for 15 char names + null
+ 				   termination then *2 for unicode versions */
+-#define MAX_PASSWORD_SIZE 16
++#define MAX_PASSWORD_SIZE 512  /* max for windows seems to be 256 wide chars */
+ 
+ #define CIFS_MIN_RCV_POOL 4
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 6234417..b6bb82e 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1588,17 +1588,29 @@ out_err:
+ }
+ 
+ static struct cifsSesInfo *
+-cifs_find_smb_ses(struct TCP_Server_Info *server, char *username)
++cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
+ {
+-	struct list_head *tmp;
+ 	struct cifsSesInfo *ses;
+ 
+ 	write_lock(&cifs_tcp_ses_lock);
+-	list_for_each(tmp, &server->smb_ses_list) {
+-		ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
+-		if (strncmp(ses->userName, username, MAX_USERNAME_SIZE))
+-			continue;
+-
++	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++		switch (server->secType) {
++		case Kerberos:
++			if (vol->linux_uid != ses->linux_uid)
++				continue;
++			break;
++		default:
++			/* anything else takes username/password */
++			if (strncmp(ses->userName, vol->username,
++				    MAX_USERNAME_SIZE))
++				continue;
++			if (strlen(vol->username) != 0 &&
++			    ses->password != NULL &&
++			    strncmp(ses->password,
++				    vol->password ? vol->password : "",
++				    MAX_PASSWORD_SIZE))
++				continue;
++		}
+ 		++ses->ses_count;
+ 		write_unlock(&cifs_tcp_ses_lock);
+ 		return ses;
+@@ -2362,7 +2374,7 @@ try_mount_again:
+ 		goto out;
+ 	}
+ 
+-	pSesInfo = cifs_find_smb_ses(srvTcp, volume_info->username);
++	pSesInfo = cifs_find_smb_ses(srvTcp, volume_info);
+ 	if (pSesInfo) {
+ 		cFYI(1, ("Existing smb sess found (status=%d)",
+ 			pSesInfo->status));
+diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
+index 387d92d..d7add4c 100644
+--- a/fs/ext3/xattr.c
++++ b/fs/ext3/xattr.c
+@@ -800,8 +800,16 @@ inserted:
+ 			/* We need to allocate a new block */
+ 			ext3_fsblk_t goal = ext3_group_first_block_no(sb,
+ 						EXT3_I(inode)->i_block_group);
+-			ext3_fsblk_t block = ext3_new_block(handle, inode,
+-							goal, &error);
++			ext3_fsblk_t block;
++
++			/*
++			 * Protect us agaist concurrent allocations to the
++			 * same inode from ext3_..._writepage(). Reservation
++			 * code does not expect racing allocations.
++			 */
++			mutex_lock(&EXT3_I(inode)->truncate_mutex);
++			block = ext3_new_block(handle, inode, goal, &error);
++			mutex_unlock(&EXT3_I(inode)->truncate_mutex);
+ 			if (error)
+ 				goto cleanup;
+ 			ea_idebug(inode, "creating block %d", block);
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index e81b2bf..7ee8ebc 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -88,7 +88,7 @@ static int nfs4_stat_to_errno(int);
+ #define encode_getfh_maxsz      (op_encode_hdr_maxsz)
+ #define decode_getfh_maxsz      (op_decode_hdr_maxsz + 1 + \
+ 				((3+NFS4_FHSIZE) >> 2))
+-#define nfs4_fattr_bitmap_maxsz 3
++#define nfs4_fattr_bitmap_maxsz 4
+ #define encode_getattr_maxsz    (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
+ #define nfs4_name_maxsz		(1 + ((3 + NFS4_MAXNAMLEN) >> 2))
+ #define nfs4_path_maxsz		(1 + ((3 + NFS4_MAXPATHLEN) >> 2))
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 3d09a10..7b5819c 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -2454,6 +2454,9 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole)
+ 	struct task_io_accounting acct = task->ioac;
+ 	unsigned long flags;
+ 
++	if (!ptrace_may_access(task, PTRACE_MODE_READ))
++		return -EACCES;
++
+ 	if (whole && lock_task_sighand(task, &flags)) {
+ 		struct task_struct *t = task;
+ 
+@@ -2575,7 +2578,7 @@ static const struct pid_entry tgid_base_stuff[] = {
+ 	REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
+ #endif
+ #ifdef CONFIG_TASK_IO_ACCOUNTING
+-	INF("io",	S_IRUGO, proc_tgid_io_accounting),
++	INF("io",	S_IRUSR, proc_tgid_io_accounting),
+ #endif
+ };
+ 
+@@ -2910,7 +2913,7 @@ static const struct pid_entry tid_base_stuff[] = {
+ 	REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
+ #endif
+ #ifdef CONFIG_TASK_IO_ACCOUNTING
+-	INF("io",	S_IRUGO, proc_tid_io_accounting),
++	INF("io",	S_IRUSR, proc_tid_io_accounting),
+ #endif
+ };
+ 
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index c27a182..9d7e8f7 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1164,9 +1164,12 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
+ static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
+ 					unsigned int offset)
+ {
++	if (!pskb_may_pull(skb, hlen))
++		return NULL;
++
+ 	NAPI_GRO_CB(skb)->frag0 = NULL;
+ 	NAPI_GRO_CB(skb)->frag0_len = 0;
+-	return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
++	return skb->data + offset;
+ }
+ 
+ static inline void *skb_gro_mac_header(struct sk_buff *skb)
+diff --git a/kernel/perf_event.c b/kernel/perf_event.c
+index fc5ab8e..37ebc14 100644
+--- a/kernel/perf_event.c
++++ b/kernel/perf_event.c
+@@ -3694,12 +3694,8 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
+ 	if (events && atomic_dec_and_test(&event->event_limit)) {
+ 		ret = 1;
+ 		event->pending_kill = POLL_HUP;
+-		if (nmi) {
+-			event->pending_disable = 1;
+-			perf_pending_queue(&event->pending,
+-					   perf_pending_event);
+-		} else
+-			perf_event_disable(event);
++		event->pending_disable = 1;
++		perf_pending_queue(&event->pending, perf_pending_event);
+ 	}
+ 
+ 	perf_event_output(event, nmi, data, regs);
+diff --git a/net/atm/br2684.c b/net/atm/br2684.c
+index 26a646d..c9230c3 100644
+--- a/net/atm/br2684.c
++++ b/net/atm/br2684.c
+@@ -554,6 +554,12 @@ static const struct net_device_ops br2684_netdev_ops = {
+ 	.ndo_validate_addr	= eth_validate_addr,
+ };
+ 
++static const struct net_device_ops br2684_netdev_ops_routed = {
++	.ndo_start_xmit 	= br2684_start_xmit,
++	.ndo_set_mac_address	= br2684_mac_addr,
++	.ndo_change_mtu		= eth_change_mtu
++};
++
+ static void br2684_setup(struct net_device *netdev)
+ {
+ 	struct br2684_dev *brdev = BRPRIV(netdev);
+@@ -569,11 +575,10 @@ static void br2684_setup(struct net_device *netdev)
+ static void br2684_setup_routed(struct net_device *netdev)
+ {
+ 	struct br2684_dev *brdev = BRPRIV(netdev);
+-	brdev->net_dev = netdev;
+ 
++	brdev->net_dev = netdev;
+ 	netdev->hard_header_len = 0;
+-
+-	netdev->netdev_ops = &br2684_netdev_ops;
++	netdev->netdev_ops = &br2684_netdev_ops_routed;
+ 	netdev->addr_len = 0;
+ 	netdev->mtu = 1500;
+ 	netdev->type = ARPHRD_PPP;
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 2114e45..8567d47 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -75,6 +75,7 @@ struct net_bridge_port
+ 	bridge_id			designated_bridge;
+ 	u32				path_cost;
+ 	u32				designated_cost;
++	unsigned long			designated_age;
+ 
+ 	struct timer_list		forward_delay_timer;
+ 	struct timer_list		hold_timer;
+diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
+index fd3f8d6..c7d6bfc 100644
+--- a/net/bridge/br_stp.c
++++ b/net/bridge/br_stp.c
+@@ -165,8 +165,7 @@ void br_transmit_config(struct net_bridge_port *p)
+ 	else {
+ 		struct net_bridge_port *root
+ 			= br_get_port(br, br->root_port);
+-		bpdu.message_age = br->max_age
+-			- (root->message_age_timer.expires - jiffies)
++		bpdu.message_age = (jiffies - root->designated_age)
+ 			+ MESSAGE_AGE_INCR;
+ 	}
+ 	bpdu.max_age = br->max_age;
+@@ -190,6 +189,7 @@ static inline void br_record_config_information(struct net_bridge_port *p,
+ 	p->designated_cost = bpdu->root_path_cost;
+ 	p->designated_bridge = bpdu->bridge_id;
+ 	p->designated_port = bpdu->port_id;
++	p->designated_age = jiffies + bpdu->message_age;
+ 
+ 	mod_timer(&p->message_age_timer, jiffies
+ 		  + (p->br->max_age - bpdu->message_age));
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index cfab9e4..f614584 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -1665,14 +1665,15 @@ static int __init ipgre_init(void)
+ 
+ 	printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
+ 
+-	if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
+-		printk(KERN_INFO "ipgre init: can't add protocol\n");
+-		return -EAGAIN;
+-	}
+-
+ 	err = register_pernet_gen_device(&ipgre_net_id, &ipgre_net_ops);
+ 	if (err < 0)
+-		goto gen_device_failed;
++		return err;
++
++	err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE);
++	if (err < 0) {
++		printk(KERN_INFO "ipgre init: can't add protocol\n");
++		goto add_proto_failed;
++	}
+ 
+ 	err = rtnl_link_register(&ipgre_link_ops);
+ 	if (err < 0)
+@@ -1688,9 +1689,9 @@ out:
+ tap_ops_failed:
+ 	rtnl_link_unregister(&ipgre_link_ops);
+ rtnl_link_failed:
+-	unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
+-gen_device_failed:
+ 	inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
++add_proto_failed:
++	unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
+ 	goto out;
+ }
+ 
+@@ -1698,9 +1699,9 @@ static void __exit ipgre_fini(void)
+ {
+ 	rtnl_link_unregister(&ipgre_tap_ops);
+ 	rtnl_link_unregister(&ipgre_link_ops);
+-	unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
+ 	if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
+ 		printk(KERN_INFO "ipgre close: can't remove protocol\n");
++	unregister_pernet_gen_device(ipgre_net_id, &ipgre_net_ops);
+ }
+ 
+ module_init(ipgre_init);
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index f37df1a..860b5c5 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -830,15 +830,14 @@ static int __init ipip_init(void)
+ 
+ 	printk(banner);
+ 
+-	if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) {
++	err = register_pernet_gen_device(&ipip_net_id, &ipip_net_ops);
++	if (err < 0)
++		return err;
++	err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
++	if (err < 0) {
++		unregister_pernet_device(&ipip_net_ops);
+ 		printk(KERN_INFO "ipip init: can't register tunnel\n");
+-		return -EAGAIN;
+ 	}
+-
+-	err = register_pernet_gen_device(&ipip_net_id, &ipip_net_ops);
+-	if (err)
+-		xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
+-
+ 	return err;
+ }
+ 
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 9a95c82..7fb3e02 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1466,27 +1466,29 @@ static int __init ip6_tunnel_init(void)
+ {
+ 	int  err;
+ 
+-	if (xfrm6_tunnel_register(&ip4ip6_handler, AF_INET)) {
++	err = register_pernet_device(&ip6_tnl_net_ops);
++	if (err < 0)
++		goto out_pernet;
++
++	err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
++	if (err < 0) {
+ 		printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
+-		err = -EAGAIN;
+-		goto out;
++		goto out_ip4ip6;
+ 	}
+ 
+-	if (xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6)) {
++	err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
++	if (err < 0) {
+ 		printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
+-		err = -EAGAIN;
+-		goto unreg_ip4ip6;
++		goto out_ip6ip6;
+ 	}
+ 
+-	err = register_pernet_gen_device(&ip6_tnl_net_id, &ip6_tnl_net_ops);
+-	if (err < 0)
+-		goto err_pernet;
+ 	return 0;
+-err_pernet:
+-	xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
+-unreg_ip4ip6:
++
++out_ip6ip6:
+ 	xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
+-out:
++out_ip4ip6:
++	unregister_pernet_device(&ip6_tnl_net_ops);
++out_pernet:
+ 	return err;
+ }
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index de2ffef..b128c07 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1086,15 +1086,14 @@ static int __init sit_init(void)
+ 
+ 	printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
+ 
+-	if (xfrm4_tunnel_register(&sit_handler, AF_INET6) < 0) {
+-		printk(KERN_INFO "sit init: Can't add protocol\n");
+-		return -EAGAIN;
+-	}
+-
+ 	err = register_pernet_gen_device(&sit_net_id, &sit_net_ops);
+ 	if (err < 0)
+-		xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
+-
++		return err;
++	err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
++	if (err < 0) {
++		unregister_pernet_device(&sit_net_ops);
++		printk(KERN_INFO "sit init: Can't add protocol\n");
++	}
+ 	return err;
+ }
+ 
+diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
+index 81a95c0..48bb1e3 100644
+--- a/net/ipv6/xfrm6_tunnel.c
++++ b/net/ipv6/xfrm6_tunnel.c
+@@ -344,32 +344,38 @@ static struct xfrm6_tunnel xfrm46_tunnel_handler = {
+ 
+ static int __init xfrm6_tunnel_init(void)
+ {
+-	if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0)
++	int rv;
++
++	rv = xfrm6_tunnel_spi_init();
++	if (rv < 0)
+ 		goto err;
+-	if (xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6))
+-		goto unreg;
+-	if (xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET))
+-		goto dereg6;
+-	if (xfrm6_tunnel_spi_init() < 0)
+-		goto dereg46;
++	rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
++	if (rv < 0)
++		goto out_type;
++	rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
++	if (rv < 0)
++		goto out_xfrm6;
++	rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
++	if (rv < 0)
++		goto out_xfrm46;
+ 	return 0;
+ 
+-dereg46:
+-	xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
+-dereg6:
++out_xfrm46:
+ 	xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
+-unreg:
++out_xfrm6:
+ 	xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
++out_type:
++	xfrm6_tunnel_spi_fini();
+ err:
+-	return -EAGAIN;
++	return rv;
+ }
+ 
+ static void __exit xfrm6_tunnel_fini(void)
+ {
+-	xfrm6_tunnel_spi_fini();
+ 	xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
+ 	xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
+ 	xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
++	xfrm6_tunnel_spi_fini();
+ }
+ 
+ module_init(xfrm6_tunnel_init);
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 5bea319..e67eea7 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2308,6 +2308,9 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
+ {
+ 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ 
++	if (!ifmgd->associated)
++		return;
++
+ 	if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
+ 		add_timer(&ifmgd->timer);
+ 	if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index 830faf4..4418bb4 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -533,7 +533,7 @@ void rpcb_getport_async(struct rpc_task *task)
+ 	u32 bind_version;
+ 	struct rpc_xprt *xprt;
+ 	struct rpc_clnt	*rpcb_clnt;
+-	static struct rpcbind_args *map;
++	struct rpcbind_args *map;
+ 	struct rpc_task	*child;
+ 	struct sockaddr_storage addr;
+ 	struct sockaddr *sap = (struct sockaddr *)&addr;
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 9191b2f..570da30 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -613,30 +613,25 @@ static void __rpc_execute(struct rpc_task *task)
+ 	BUG_ON(RPC_IS_QUEUED(task));
+ 
+ 	for (;;) {
++		void (*do_action)(struct rpc_task *);
+ 
+ 		/*
+-		 * Execute any pending callback.
++		 * Execute any pending callback first.
+ 		 */
+-		if (task->tk_callback) {
+-			void (*save_callback)(struct rpc_task *);
+-
+-			/*
+-			 * We set tk_callback to NULL before calling it,
+-			 * in case it sets the tk_callback field itself:
+-			 */
+-			save_callback = task->tk_callback;
+-			task->tk_callback = NULL;
+-			save_callback(task);
+-		} else {
++		do_action = task->tk_callback;
++		task->tk_callback = NULL;
++		if (do_action == NULL) {
+ 			/*
+ 			 * Perform the next FSM step.
+-			 * tk_action may be NULL when the task has been killed
+-			 * by someone else.
++			 * tk_action may be NULL if the task has been killed.
++			 * In particular, note that rpc_killall_tasks may
++			 * do this at any time, so beware when dereferencing.
+ 			 */
+-			if (task->tk_action == NULL)
++			do_action = task->tk_action;
++			if (do_action == NULL)
+ 				break;
+-			task->tk_action(task);
+ 		}
++		do_action(task);
+ 
+ 		/*
+ 		 * Lockless check for whether task is sleeping or not.
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index df760ad..cc1fb36 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -896,12 +896,13 @@ void svc_delete_xprt(struct svc_xprt *xprt)
+ 	if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
+ 		list_del_init(&xprt->xpt_list);
+ 	/*
+-	 * We used to delete the transport from whichever list
+-	 * it's sk_xprt.xpt_ready node was on, but we don't actually
+-	 * need to.  This is because the only time we're called
+-	 * while still attached to a queue, the queue itself
+-	 * is about to be destroyed (in svc_destroy).
++	 * The only time we're called while xpt_ready is still on a list
++	 * is while the list itself is about to be destroyed (in
++	 * svc_destroy).  BUT svc_xprt_enqueue could still be attempting
++	 * to add new entries to the sp_sockets list, so we can't leave
++	 * a freed xprt on it.
+ 	 */
++	list_del_init(&xprt->xpt_ready);
+ 	if (test_bit(XPT_TEMP, &xprt->xpt_flags))
+ 		serv->sv_tmpcnt--;
+ 
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index 08bfed5..038232d 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -341,7 +341,7 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
+ 			kfree(bufs);
+ 			return -EFAULT;
+ 		}
+-		bufs[ch] = compat_ptr(ptr);
++		bufs[i] = compat_ptr(ptr);
+ 		bufptr++;
+ 	}
+ 	if (dir == SNDRV_PCM_STREAM_PLAYBACK)
+diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
+index 62fbb84..77dae16 100644
+--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
++++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
+@@ -139,11 +139,20 @@ static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream)
+ 	pr_debug("%s enter\n", __func__);
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 		diff = sport_curr_offset_tx(sport);
+-		frames = bytes_to_frames(substream->runtime, diff);
+ 	} else {
+ 		diff = sport_curr_offset_rx(sport);
+-		frames = bytes_to_frames(substream->runtime, diff);
+ 	}
++
++	/*
++	 * TX at least can report one frame beyond the end of the
++	 * buffer if we hit the wraparound case - clamp to within the
++	 * buffer as the ALSA APIs require.
++	 */
++	if (diff == snd_pcm_lib_buffer_bytes(substream))
++		diff = 0;
++
++	frames = bytes_to_frames(substream->runtime, diff);
++
+ 	return frames;
+ }
+ 

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.45.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.45.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.45.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.45.patch)
@@ -0,0 +1,1053 @@
+diff --git a/Makefile b/Makefile
+index f035471..5ba9830 100644
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index bafac2e..2edcfed 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -890,7 +890,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
+ 	}
+ 	if (addr == 0)
+ 		return 0;
+-	RELOC(alloc_bottom) = addr;
++	RELOC(alloc_bottom) = addr + size;
+ 
+ 	prom_debug(" -> %x\n", addr);
+ 	prom_debug("  alloc_bottom : %x\n", RELOC(alloc_bottom));
+@@ -1704,7 +1704,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
+ 		chunk = alloc_up(room, 0);
+ 		if (chunk == 0)
+ 			prom_panic("No memory for flatten_device_tree (claim failed)");
+-		*mem_end = RELOC(alloc_top);
++		*mem_end = chunk + room;
+ 	}
+ 
+ 	ret = (void *)*mem_start;
+@@ -1923,7 +1923,7 @@ static void __init flatten_device_tree(void)
+ 	mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
+ 	if (mem_start == 0)
+ 		prom_panic("Can't allocate initial device-tree chunk\n");
+-	mem_end = RELOC(alloc_top);
++	mem_end = mem_start + room;
+ 
+ 	/* Get root of tree */
+ 	root = call_prom("peer", 1, 1, (phandle)0);
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 5f2c511..d5db4c6 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -371,7 +371,7 @@ static void pSeries_lpar_hptab_clear(void)
+ 		unsigned long ptel;
+ 	} ptes[4];
+ 	long lpar_rc;
+-	int i, j;
++	unsigned long i, j;
+ 
+ 	/* Read in batches of 4,
+ 	 * invalidate only valid entries not in the VRMA
+diff --git a/crypto/md5.c b/crypto/md5.c
+index 83eb529..156b6bc 100644
+--- a/crypto/md5.c
++++ b/crypto/md5.c
+@@ -20,6 +20,7 @@
+ #include <linux/module.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
++#include <linux/cryptohash.h>
+ #include <asm/byteorder.h>
+ 
+ #define MD5_DIGEST_SIZE		16
+@@ -27,103 +28,12 @@
+ #define MD5_BLOCK_WORDS		16
+ #define MD5_HASH_WORDS		4
+ 
+-#define F1(x, y, z)	(z ^ (x & (y ^ z)))
+-#define F2(x, y, z)	F1(z, x, y)
+-#define F3(x, y, z)	(x ^ y ^ z)
+-#define F4(x, y, z)	(y ^ (x | ~z))
+-
+-#define MD5STEP(f, w, x, y, z, in, s) \
+-	(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
+-
+ struct md5_ctx {
+ 	u32 hash[MD5_HASH_WORDS];
+ 	u32 block[MD5_BLOCK_WORDS];
+ 	u64 byte_count;
+ };
+ 
+-static void md5_transform(u32 *hash, u32 const *in)
+-{
+-	u32 a, b, c, d;
+-
+-	a = hash[0];
+-	b = hash[1];
+-	c = hash[2];
+-	d = hash[3];
+-
+-	MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+-	MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+-	MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+-	MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+-	MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+-	MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+-	MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+-	MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+-	MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+-	MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+-	MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+-	MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+-	MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+-	MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+-	MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+-	MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+-
+-	MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+-	MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+-	MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+-	MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+-	MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+-	MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+-	MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+-	MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+-	MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+-	MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+-	MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+-	MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+-	MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+-	MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+-	MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+-	MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+-
+-	MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+-	MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+-	MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+-	MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+-	MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+-	MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+-	MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+-	MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+-	MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+-	MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+-	MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+-	MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+-	MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+-	MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+-	MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+-	MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+-
+-	MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+-	MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+-	MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+-	MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+-	MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+-	MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+-	MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+-	MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+-	MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+-	MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+-	MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+-	MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+-	MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+-	MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+-	MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+-	MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+-
+-	hash[0] += a;
+-	hash[1] += b;
+-	hash[2] += c;
+-	hash[3] += d;
+-}
+-
+ /* XXX: this stuff can be optimized */
+ static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
+ {
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 908ac1f..3a19e2d 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1339,330 +1339,14 @@ ctl_table random_table[] = {
+ };
+ #endif 	/* CONFIG_SYSCTL */
+ 
+-/********************************************************************
+- *
+- * Random funtions for networking
+- *
+- ********************************************************************/
+-
+-/*
+- * TCP initial sequence number picking.  This uses the random number
+- * generator to pick an initial secret value.  This value is hashed
+- * along with the TCP endpoint information to provide a unique
+- * starting point for each pair of TCP endpoints.  This defeats
+- * attacks which rely on guessing the initial TCP sequence number.
+- * This algorithm was suggested by Steve Bellovin.
+- *
+- * Using a very strong hash was taking an appreciable amount of the total
+- * TCP connection establishment time, so this is a weaker hash,
+- * compensated for by changing the secret periodically.
+- */
+-
+-/* F, G and H are basic MD4 functions: selection, majority, parity */
+-#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+-#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
+-#define H(x, y, z) ((x) ^ (y) ^ (z))
+-
+-/*
+- * The generic round function.  The application is so specific that
+- * we don't bother protecting all the arguments with parens, as is generally
+- * good macro practice, in favor of extra legibility.
+- * Rotation is separate from addition to prevent recomputation
+- */
+-#define ROUND(f, a, b, c, d, x, s)	\
+-	(a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
+-#define K1 0
+-#define K2 013240474631UL
+-#define K3 015666365641UL
++static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+ 
+-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+-
+-static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
++static int __init random_int_secret_init(void)
+ {
+-	__u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
+-
+-	/* Round 1 */
+-	ROUND(F, a, b, c, d, in[ 0] + K1,  3);
+-	ROUND(F, d, a, b, c, in[ 1] + K1,  7);
+-	ROUND(F, c, d, a, b, in[ 2] + K1, 11);
+-	ROUND(F, b, c, d, a, in[ 3] + K1, 19);
+-	ROUND(F, a, b, c, d, in[ 4] + K1,  3);
+-	ROUND(F, d, a, b, c, in[ 5] + K1,  7);
+-	ROUND(F, c, d, a, b, in[ 6] + K1, 11);
+-	ROUND(F, b, c, d, a, in[ 7] + K1, 19);
+-	ROUND(F, a, b, c, d, in[ 8] + K1,  3);
+-	ROUND(F, d, a, b, c, in[ 9] + K1,  7);
+-	ROUND(F, c, d, a, b, in[10] + K1, 11);
+-	ROUND(F, b, c, d, a, in[11] + K1, 19);
+-
+-	/* Round 2 */
+-	ROUND(G, a, b, c, d, in[ 1] + K2,  3);
+-	ROUND(G, d, a, b, c, in[ 3] + K2,  5);
+-	ROUND(G, c, d, a, b, in[ 5] + K2,  9);
+-	ROUND(G, b, c, d, a, in[ 7] + K2, 13);
+-	ROUND(G, a, b, c, d, in[ 9] + K2,  3);
+-	ROUND(G, d, a, b, c, in[11] + K2,  5);
+-	ROUND(G, c, d, a, b, in[ 0] + K2,  9);
+-	ROUND(G, b, c, d, a, in[ 2] + K2, 13);
+-	ROUND(G, a, b, c, d, in[ 4] + K2,  3);
+-	ROUND(G, d, a, b, c, in[ 6] + K2,  5);
+-	ROUND(G, c, d, a, b, in[ 8] + K2,  9);
+-	ROUND(G, b, c, d, a, in[10] + K2, 13);
+-
+-	/* Round 3 */
+-	ROUND(H, a, b, c, d, in[ 3] + K3,  3);
+-	ROUND(H, d, a, b, c, in[ 7] + K3,  9);
+-	ROUND(H, c, d, a, b, in[11] + K3, 11);
+-	ROUND(H, b, c, d, a, in[ 2] + K3, 15);
+-	ROUND(H, a, b, c, d, in[ 6] + K3,  3);
+-	ROUND(H, d, a, b, c, in[10] + K3,  9);
+-	ROUND(H, c, d, a, b, in[ 1] + K3, 11);
+-	ROUND(H, b, c, d, a, in[ 5] + K3, 15);
+-	ROUND(H, a, b, c, d, in[ 9] + K3,  3);
+-	ROUND(H, d, a, b, c, in[ 0] + K3,  9);
+-	ROUND(H, c, d, a, b, in[ 4] + K3, 11);
+-	ROUND(H, b, c, d, a, in[ 8] + K3, 15);
+-
+-	return buf[1] + b; /* "most hashed" word */
+-	/* Alternative: return sum of all words? */
+-}
+-#endif
+-
+-#undef ROUND
+-#undef F
+-#undef G
+-#undef H
+-#undef K1
+-#undef K2
+-#undef K3
+-
+-/* This should not be decreased so low that ISNs wrap too fast. */
+-#define REKEY_INTERVAL (300 * HZ)
+-/*
+- * Bit layout of the tcp sequence numbers (before adding current time):
+- * bit 24-31: increased after every key exchange
+- * bit 0-23: hash(source,dest)
+- *
+- * The implementation is similar to the algorithm described
+- * in the Appendix of RFC 1185, except that
+- * - it uses a 1 MHz clock instead of a 250 kHz clock
+- * - it performs a rekey every 5 minutes, which is equivalent
+- * 	to a (source,dest) tulple dependent forward jump of the
+- * 	clock by 0..2^(HASH_BITS+1)
+- *
+- * Thus the average ISN wraparound time is 68 minutes instead of
+- * 4.55 hours.
+- *
+- * SMP cleanup and lock avoidance with poor man's RCU.
+- * 			Manfred Spraul <manfred at colorfullife.com>
+- *
+- */
+-#define COUNT_BITS 8
+-#define COUNT_MASK ((1 << COUNT_BITS) - 1)
+-#define HASH_BITS 24
+-#define HASH_MASK ((1 << HASH_BITS) - 1)
+-
+-static struct keydata {
+-	__u32 count; /* already shifted to the final position */
+-	__u32 secret[12];
+-} ____cacheline_aligned ip_keydata[2];
+-
+-static unsigned int ip_cnt;
+-
+-static void rekey_seq_generator(struct work_struct *work);
+-
+-static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
+-
+-/*
+- * Lock avoidance:
+- * The ISN generation runs lockless - it's just a hash over random data.
+- * State changes happen every 5 minutes when the random key is replaced.
+- * Synchronization is performed by having two copies of the hash function
+- * state and rekey_seq_generator always updates the inactive copy.
+- * The copy is then activated by updating ip_cnt.
+- * The implementation breaks down if someone blocks the thread
+- * that processes SYN requests for more than 5 minutes. Should never
+- * happen, and even if that happens only a not perfectly compliant
+- * ISN is generated, nothing fatal.
+- */
+-static void rekey_seq_generator(struct work_struct *work)
+-{
+-	struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
+-
+-	get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
+-	keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
+-	smp_wmb();
+-	ip_cnt++;
+-	schedule_delayed_work(&rekey_work,
+-			      round_jiffies_relative(REKEY_INTERVAL));
+-}
+-
+-static inline struct keydata *get_keyptr(void)
+-{
+-	struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
+-
+-	smp_rmb();
+-
+-	return keyptr;
+-}
+-
+-static __init int seqgen_init(void)
+-{
+-	rekey_seq_generator(NULL);
++	get_random_bytes(random_int_secret, sizeof(random_int_secret));
+ 	return 0;
+ }
+-late_initcall(seqgen_init);
+-
+-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+-__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+-				   __be16 sport, __be16 dport)
+-{
+-	__u32 seq;
+-	__u32 hash[12];
+-	struct keydata *keyptr = get_keyptr();
+-
+-	/* The procedure is the same as for IPv4, but addresses are longer.
+-	 * Thus we must use twothirdsMD4Transform.
+-	 */
+-
+-	memcpy(hash, saddr, 16);
+-	hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
+-	memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
+-
+-	seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
+-	seq += keyptr->count;
+-
+-	seq += ktime_to_ns(ktime_get_real());
+-
+-	return seq;
+-}
+-EXPORT_SYMBOL(secure_tcpv6_sequence_number);
+-#endif
+-
+-/*  The code below is shamelessly stolen from secure_tcp_sequence_number().
+- *  All blames to Andrey V. Savochkin <saw at msu.ru>.
+- */
+-__u32 secure_ip_id(__be32 daddr)
+-{
+-	struct keydata *keyptr;
+-	__u32 hash[4];
+-
+-	keyptr = get_keyptr();
+-
+-	/*
+-	 *  Pick a unique starting offset for each IP destination.
+-	 *  The dest ip address is placed in the starting vector,
+-	 *  which is then hashed with random data.
+-	 */
+-	hash[0] = (__force __u32)daddr;
+-	hash[1] = keyptr->secret[9];
+-	hash[2] = keyptr->secret[10];
+-	hash[3] = keyptr->secret[11];
+-
+-	return half_md4_transform(hash, keyptr->secret);
+-}
+-
+-#ifdef CONFIG_INET
+-
+-__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+-				 __be16 sport, __be16 dport)
+-{
+-	__u32 seq;
+-	__u32 hash[4];
+-	struct keydata *keyptr = get_keyptr();
+-
+-	/*
+-	 *  Pick a unique starting offset for each TCP connection endpoints
+-	 *  (saddr, daddr, sport, dport).
+-	 *  Note that the words are placed into the starting vector, which is
+-	 *  then mixed with a partial MD4 over random data.
+-	 */
+-	hash[0] = (__force u32)saddr;
+-	hash[1] = (__force u32)daddr;
+-	hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+-	hash[3] = keyptr->secret[11];
+-
+-	seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
+-	seq += keyptr->count;
+-	/*
+-	 *	As close as possible to RFC 793, which
+-	 *	suggests using a 250 kHz clock.
+-	 *	Further reading shows this assumes 2 Mb/s networks.
+-	 *	For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
+-	 *	For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
+-	 *	we also need to limit the resolution so that the u32 seq
+-	 *	overlaps less than one time per MSL (2 minutes).
+-	 *	Choosing a clock of 64 ns period is OK. (period of 274 s)
+-	 */
+-	seq += ktime_to_ns(ktime_get_real()) >> 6;
+-
+-	return seq;
+-}
+-
+-/* Generate secure starting point for ephemeral IPV4 transport port search */
+-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
+-{
+-	struct keydata *keyptr = get_keyptr();
+-	u32 hash[4];
+-
+-	/*
+-	 *  Pick a unique starting offset for each ephemeral port search
+-	 *  (saddr, daddr, dport) and 48bits of random data.
+-	 */
+-	hash[0] = (__force u32)saddr;
+-	hash[1] = (__force u32)daddr;
+-	hash[2] = (__force u32)dport ^ keyptr->secret[10];
+-	hash[3] = keyptr->secret[11];
+-
+-	return half_md4_transform(hash, keyptr->secret);
+-}
+-EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
+-
+-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+-			       __be16 dport)
+-{
+-	struct keydata *keyptr = get_keyptr();
+-	u32 hash[12];
+-
+-	memcpy(hash, saddr, 16);
+-	hash[4] = (__force u32)dport;
+-	memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
+-
+-	return twothirdsMD4Transform((const __u32 *)daddr, hash);
+-}
+-#endif
+-
+-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
+-/* Similar to secure_tcp_sequence_number but generate a 48 bit value
+- * bit's 32-47 increase every key exchange
+- *       0-31  hash(source, dest)
+- */
+-u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+-				__be16 sport, __be16 dport)
+-{
+-	u64 seq;
+-	__u32 hash[4];
+-	struct keydata *keyptr = get_keyptr();
+-
+-	hash[0] = (__force u32)saddr;
+-	hash[1] = (__force u32)daddr;
+-	hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+-	hash[3] = keyptr->secret[11];
+-
+-	seq = half_md4_transform(hash, keyptr->secret);
+-	seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
+-
+-	seq += ktime_to_ns(ktime_get_real());
+-	seq &= (1ull << 48) - 1;
+-
+-	return seq;
+-}
+-EXPORT_SYMBOL(secure_dccp_sequence_number);
+-#endif
+-
+-#endif /* CONFIG_INET */
+-
++late_initcall(random_int_secret_init);
+ 
+ /*
+  * Get a random word for internal kernel use only. Similar to urandom but
+@@ -1670,17 +1354,15 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
+  * value is not cryptographically secure but for several uses the cost of
+  * depleting entropy is too high
+  */
+-DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
++DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
+ unsigned int get_random_int(void)
+ {
+-	struct keydata *keyptr;
+ 	__u32 *hash = get_cpu_var(get_random_int_hash);
+-	int ret;
++	unsigned int ret;
+ 
+-	keyptr = get_keyptr();
+ 	hash[0] += current->pid + jiffies + get_cycles();
+-
+-	ret = half_md4_transform(hash, keyptr->secret);
++	md5_transform(hash, random_int_secret);
++	ret = hash[0];
+ 	put_cpu_var(get_random_int_hash);
+ 
+ 	return ret;
+diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
+index c118b2ad..1ba279b 100644
+--- a/include/linux/cryptohash.h
++++ b/include/linux/cryptohash.h
+@@ -7,6 +7,11 @@
+ void sha_init(__u32 *buf);
+ void sha_transform(__u32 *digest, const char *data, __u32 *W);
+ 
++#define MD5_DIGEST_WORDS 4
++#define MD5_MESSAGE_BYTES 64
++
++void md5_transform(__u32 *hash, __u32 const *in);
++
+ __u32 half_md4_transform(__u32 buf[4], __u32 const in[8]);
+ 
+ #endif
+diff --git a/include/linux/random.h b/include/linux/random.h
+index 25d02fe..2948046 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -53,17 +53,6 @@ extern void add_interrupt_randomness(int irq);
+ extern void get_random_bytes(void *buf, int nbytes);
+ void generate_random_uuid(unsigned char uuid_out[16]);
+ 
+-extern __u32 secure_ip_id(__be32 daddr);
+-extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+-extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+-				      __be16 dport);
+-extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+-					__be16 sport, __be16 dport);
+-extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+-					  __be16 sport, __be16 dport);
+-extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+-				       __be16 sport, __be16 dport);
+-
+ #ifndef MODULE
+ extern const struct file_operations random_fops, urandom_fops;
+ #endif
+diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
+new file mode 100644
+index 0000000..d97f689
+--- /dev/null
++++ b/include/net/secure_seq.h
+@@ -0,0 +1,20 @@
++#ifndef _NET_SECURE_SEQ
++#define _NET_SECURE_SEQ
++
++#include <linux/types.h>
++
++extern __u32 secure_ip_id(__be32 daddr);
++extern __u32 secure_ipv6_id(const __be32 daddr[4]);
++extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
++extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
++				      __be16 dport);
++extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
++					__be16 sport, __be16 dport);
++extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
++					  __be16 sport, __be16 dport);
++extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
++				       __be16 sport, __be16 dport);
++extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
++					 __be16 sport, __be16 dport);
++
++#endif /* _NET_SECURE_SEQ */
+diff --git a/lib/Makefile b/lib/Makefile
+index 452f188..5ecf2ba 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -10,7 +10,7 @@ endif
+ lib-y := ctype.o string.o vsprintf.o cmdline.o \
+ 	 rbtree.o radix-tree.o dump_stack.o \
+ 	 idr.o int_sqrt.o extable.o prio_tree.o \
+-	 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
++	 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
+ 	 proportions.o prio_heap.o ratelimit.o show_mem.o \
+ 	 is_single_threaded.o plist.o decompress.o flex_array.o
+ 
+diff --git a/lib/md5.c b/lib/md5.c
+new file mode 100644
+index 0000000..c777180
+--- /dev/null
++++ b/lib/md5.c
+@@ -0,0 +1,95 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/cryptohash.h>
++
++#define F1(x, y, z)	(z ^ (x & (y ^ z)))
++#define F2(x, y, z)	F1(z, x, y)
++#define F3(x, y, z)	(x ^ y ^ z)
++#define F4(x, y, z)	(y ^ (x | ~z))
++
++#define MD5STEP(f, w, x, y, z, in, s) \
++	(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
++
++void md5_transform(__u32 *hash, __u32 const *in)
++{
++	u32 a, b, c, d;
++
++	a = hash[0];
++	b = hash[1];
++	c = hash[2];
++	d = hash[3];
++
++	MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
++	MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
++	MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
++	MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
++	MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
++	MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
++	MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
++	MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
++	MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
++	MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
++	MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
++	MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
++	MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
++	MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
++	MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
++	MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
++
++	MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
++	MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
++	MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
++	MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
++	MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
++	MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
++	MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
++	MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
++	MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
++	MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
++	MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
++	MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
++	MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
++	MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
++	MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
++	MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
++
++	MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
++	MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
++	MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
++	MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
++	MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
++	MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
++	MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
++	MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
++	MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
++	MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
++	MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
++	MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
++	MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
++	MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
++	MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
++	MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
++
++	MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
++	MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
++	MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
++	MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
++	MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
++	MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
++	MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
++	MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
++	MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
++	MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
++	MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
++	MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
++	MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
++	MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
++	MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
++	MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
++
++	hash[0] += a;
++	hash[1] += b;
++	hash[2] += c;
++	hash[3] += d;
++}
++EXPORT_SYMBOL(md5_transform);
+diff --git a/net/core/Makefile b/net/core/Makefile
+index 796f46e..ace37e4 100644
+--- a/net/core/Makefile
++++ b/net/core/Makefile
+@@ -3,7 +3,7 @@
+ #
+ 
+ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
+-	 gen_stats.o gen_estimator.o net_namespace.o
++	 gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
+ 
+ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
+ obj-$(CONFIG_HAS_DMA) += skb_dma_map.o
+diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
+new file mode 100644
+index 0000000..45329d7
+--- /dev/null
++++ b/net/core/secure_seq.c
+@@ -0,0 +1,184 @@
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/cryptohash.h>
++#include <linux/module.h>
++#include <linux/cache.h>
++#include <linux/random.h>
++#include <linux/hrtimer.h>
++#include <linux/ktime.h>
++#include <linux/string.h>
++
++#include <net/secure_seq.h>
++
++static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
++
++static int __init net_secret_init(void)
++{
++	get_random_bytes(net_secret, sizeof(net_secret));
++	return 0;
++}
++late_initcall(net_secret_init);
++
++static u32 seq_scale(u32 seq)
++{
++	/*
++	 *	As close as possible to RFC 793, which
++	 *	suggests using a 250 kHz clock.
++	 *	Further reading shows this assumes 2 Mb/s networks.
++	 *	For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
++	 *	For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
++	 *	we also need to limit the resolution so that the u32 seq
++	 *	overlaps less than one time per MSL (2 minutes).
++	 *	Choosing a clock of 64 ns period is OK. (period of 274 s)
++	 */
++	return seq + (ktime_to_ns(ktime_get_real()) >> 6);
++}
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
++				   __be16 sport, __be16 dport)
++{
++	u32 secret[MD5_MESSAGE_BYTES / 4];
++	u32 hash[MD5_DIGEST_WORDS];
++	u32 i;
++
++	memcpy(hash, saddr, 16);
++	for (i = 0; i < 4; i++)
++		secret[i] = net_secret[i] + daddr[i];
++	secret[4] = net_secret[4] +
++		(((__force u16)sport << 16) + (__force u16)dport);
++	for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
++		secret[i] = net_secret[i];
++
++	md5_transform(hash, secret);
++
++	return seq_scale(hash[0]);
++}
++EXPORT_SYMBOL(secure_tcpv6_sequence_number);
++
++u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
++			       __be16 dport)
++{
++	u32 secret[MD5_MESSAGE_BYTES / 4];
++	u32 hash[MD5_DIGEST_WORDS];
++	u32 i;
++
++	memcpy(hash, saddr, 16);
++	for (i = 0; i < 4; i++)
++		secret[i] = net_secret[i] + (__force u32) daddr[i];
++	secret[4] = net_secret[4] + (__force u32)dport;
++	for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
++		secret[i] = net_secret[i];
++
++	md5_transform(hash, secret);
++
++	return hash[0];
++}
++#endif
++
++#ifdef CONFIG_INET
++__u32 secure_ip_id(__be32 daddr)
++{
++	u32 hash[MD5_DIGEST_WORDS];
++
++	hash[0] = (__force __u32) daddr;
++	hash[1] = net_secret[13];
++	hash[2] = net_secret[14];
++	hash[3] = net_secret[15];
++
++	md5_transform(hash, net_secret);
++
++	return hash[0];
++}
++
++__u32 secure_ipv6_id(const __be32 daddr[4])
++{
++	__u32 hash[4];
++
++	memcpy(hash, daddr, 16);
++	md5_transform(hash, net_secret);
++
++	return hash[0];
++}
++
++__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
++				 __be16 sport, __be16 dport)
++{
++	u32 hash[MD5_DIGEST_WORDS];
++
++	hash[0] = (__force u32)saddr;
++	hash[1] = (__force u32)daddr;
++	hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
++	hash[3] = net_secret[15];
++
++	md5_transform(hash, net_secret);
++
++	return seq_scale(hash[0]);
++}
++
++u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
++{
++	u32 hash[MD5_DIGEST_WORDS];
++
++	hash[0] = (__force u32)saddr;
++	hash[1] = (__force u32)daddr;
++	hash[2] = (__force u32)dport ^ net_secret[14];
++	hash[3] = net_secret[15];
++
++	md5_transform(hash, net_secret);
++
++	return hash[0];
++}
++EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
++#endif
++
++#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
++u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
++				__be16 sport, __be16 dport)
++{
++	u32 hash[MD5_DIGEST_WORDS];
++	u64 seq;
++
++	hash[0] = (__force u32)saddr;
++	hash[1] = (__force u32)daddr;
++	hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
++	hash[3] = net_secret[15];
++
++	md5_transform(hash, net_secret);
++
++	seq = hash[0] | (((u64)hash[1]) << 32);
++	seq += ktime_to_ns(ktime_get_real());
++	seq &= (1ull << 48) - 1;
++
++	return seq;
++}
++EXPORT_SYMBOL(secure_dccp_sequence_number);
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
++				  __be16 sport, __be16 dport)
++{
++	u32 secret[MD5_MESSAGE_BYTES / 4];
++	u32 hash[MD5_DIGEST_WORDS];
++	u64 seq;
++	u32 i;
++
++	memcpy(hash, saddr, 16);
++	for (i = 0; i < 4; i++)
++		secret[i] = net_secret[i] + daddr[i];
++	secret[4] = net_secret[4] +
++		(((__force u16)sport << 16) + (__force u16)dport);
++	for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
++		secret[i] = net_secret[i];
++
++	md5_transform(hash, secret);
++
++	seq = hash[0] | (((u64)hash[1]) << 32);
++	seq += ktime_to_ns(ktime_get_real());
++	seq &= (1ull << 48) - 1;
++
++	return seq;
++}
++EXPORT_SYMBOL(secure_dccpv6_sequence_number);
++#endif
++#endif
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 7302e14..d14c0a3 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -25,6 +25,7 @@
+ #include <net/timewait_sock.h>
+ #include <net/tcp_states.h>
+ #include <net/xfrm.h>
++#include <net/secure_seq.h>
+ 
+ #include "ackvec.h"
+ #include "ccid.h"
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index e48ca5d..9ed1962 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -28,6 +28,7 @@
+ #include <net/transp_v6.h>
+ #include <net/ip6_checksum.h>
+ #include <net/xfrm.h>
++#include <net/secure_seq.h>
+ 
+ #include "dccp.h"
+ #include "ipv6.h"
+@@ -69,13 +70,7 @@ static inline void dccp_v6_send_check(struct sock *sk, int unused_value,
+ 	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
+ }
+ 
+-static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+-						  __be16 sport, __be16 dport   )
+-{
+-	return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
+-}
+-
+-static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
++static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
+ {
+ 	return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
+ 					     ipv6_hdr(skb)->saddr.s6_addr32,
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 625cc5f..d717267 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -21,6 +21,7 @@
+ 
+ #include <net/inet_connection_sock.h>
+ #include <net/inet_hashtables.h>
++#include <net/secure_seq.h>
+ #include <net/ip.h>
+ 
+ /*
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index b1fbe18..13b229f 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -19,6 +19,7 @@
+ #include <linux/net.h>
+ #include <net/ip.h>
+ #include <net/inetpeer.h>
++#include <net/secure_seq.h>
+ 
+ /*
+  *  Theory of operations.
+diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
+index 6c4f11f..2d5073a 100644
+--- a/net/ipv4/netfilter/nf_nat_proto_common.c
++++ b/net/ipv4/netfilter/nf_nat_proto_common.c
+@@ -12,6 +12,7 @@
+ #include <linux/ip.h>
+ 
+ #include <linux/netfilter.h>
++#include <net/secure_seq.h>
+ #include <net/netfilter/nf_nat.h>
+ #include <net/netfilter/nf_nat_core.h>
+ #include <net/netfilter/nf_nat_rule.h>
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6c8f6c9..58f141b 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -107,6 +107,7 @@
+ #ifdef CONFIG_SYSCTL
+ #include <linux/sysctl.h>
+ #endif
++#include <net/secure_seq.h>
+ 
+ #define RT_FL_TOS(oldflp) \
+     ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 7cda24b..6fc7961 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -71,6 +71,7 @@
+ #include <net/timewait_sock.h>
+ #include <net/xfrm.h>
+ #include <net/netdma.h>
++#include <net/secure_seq.h>
+ 
+ #include <linux/inet.h>
+ #include <linux/ipv6.h>
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index 1bcc343..093e9b2 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -20,6 +20,7 @@
+ #include <net/inet_connection_sock.h>
+ #include <net/inet_hashtables.h>
+ #include <net/inet6_hashtables.h>
++#include <net/secure_seq.h>
+ #include <net/ip.h>
+ 
+ void __inet6_hash(struct sock *sk)
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 21d100b..faae6df 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -60,6 +60,7 @@
+ #include <net/timewait_sock.h>
+ #include <net/netdma.h>
+ #include <net/inet_common.h>
++#include <net/secure_seq.h>
+ 
+ #include <asm/uaccess.h>
+ 
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 8f8b17a..4a13844 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -530,6 +530,8 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
+ 	if (err < 0)
+ 		return err;
+ 	timer = timeri->timer;
++	if (!timer)
++		return -EINVAL;
+ 	spin_lock_irqsave(&timer->lock, flags);
+ 	timeri->cticks = timeri->ticks;
+ 	timeri->pticks = 0;
+diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
+index a48d309..c93e1aa 100644
+--- a/sound/usb/caiaq/input.c
++++ b/sound/usb/caiaq/input.c
+@@ -28,7 +28,7 @@ static unsigned short keycode_ak1[] =  { KEY_C, KEY_B, KEY_A };
+ static unsigned short keycode_rk2[] =  { KEY_1, KEY_2, KEY_3, KEY_4,
+ 					 KEY_5, KEY_6, KEY_7 };
+ static unsigned short keycode_rk3[] =  { KEY_1, KEY_2, KEY_3, KEY_4,
+-					 KEY_5, KEY_6, KEY_7, KEY_5, KEY_6 };
++					 KEY_5, KEY_6, KEY_7, KEY_8, KEY_9 };
+ 
+ static unsigned short keycode_kore[] = {
+ 	KEY_FN_F1,      /* "menu"               */

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.46.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.46.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/stable/2.6.32.46.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/all/stable/2.6.32.46.patch)
@@ -0,0 +1,533 @@
+diff --git a/Makefile b/Makefile
+index 5ba9830..9f479bf 100644
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index c7ee9c9..95b0712 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -117,7 +117,6 @@ static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_ri
+ 	    ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
+ 	    APIC_DM_INIT;
+ 	uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+-	mdelay(10);
+ 
+ 	val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+ 	    (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
+diff --git a/arch/x86/vdso/vdso32/sysenter.S b/arch/x86/vdso/vdso32/sysenter.S
+index e2800af..e354bce 100644
+--- a/arch/x86/vdso/vdso32/sysenter.S
++++ b/arch/x86/vdso/vdso32/sysenter.S
+@@ -43,7 +43,7 @@ __kernel_vsyscall:
+ 	.space 7,0x90
+ 
+ 	/* 14: System call restart point is here! (SYSENTER_RETURN-2) */
+-	jmp .Lenter_kernel
++	int $0x80
+ 	/* 16: System call normal return point is here! */
+ VDSO32_SYSENTER_RETURN:	/* Symbol used by sysenter.c via vdso32-syms.h */
+ 	pop %ebp
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 87c0625..0ed436e 100644
+diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
+index 405d3fb..a818105 100644
+--- a/drivers/hwmon/ibmaem.c
++++ b/drivers/hwmon/ibmaem.c
+@@ -429,13 +429,15 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
+ 	aem_send_message(ipmi);
+ 
+ 	res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT);
+-	if (!res)
+-		return -ETIMEDOUT;
++	if (!res) {
++		res = -ETIMEDOUT;
++		goto out;
++	}
+ 
+ 	if (ipmi->rx_result || ipmi->rx_msg_len != rs_size ||
+ 	    memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) {
+-		kfree(rs_resp);
+-		return -ENOENT;
++		res = -ENOENT;
++		goto out;
+ 	}
+ 
+ 	switch (size) {
+@@ -460,8 +462,11 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
+ 		break;
+ 	}
+ 	}
++	res = 0;
+ 
+-	return 0;
++out:
++	kfree(rs_resp);
++	return res;
+ }
+ 
+ /* Update AEM energy registers */
+diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
+index 33352ff..d617f2d 100644
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index d9461c9..ae1ffbc 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -267,7 +267,7 @@ usbtmc_abort_bulk_in_status:
+ 				dev_err(dev, "usb_bulk_msg returned %d\n", rv);
+ 				goto exit;
+ 			}
+-		} while ((actual = max_size) &&
++		} while ((actual == max_size) &&
+ 			 (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
+ 
+ 	if (actual == max_size) {
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index bcf7a88..eae8b18 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -444,7 +444,7 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ 
+ 	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
+ 	if (val & XHCI_HC_BIOS_OWNED) {
+-		writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset);
++		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
+ 
+ 		/* Wait for 5 seconds with 10 microsecond polling interval */
+ 		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 8ab4ab2..d469673 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -43,6 +43,7 @@ static struct usb_device_id id_table[] = {
+ 	{USB_DEVICE(0x05c6, 0x9203)},	/* Generic Gobi Modem device */
+ 	{USB_DEVICE(0x05c6, 0x9222)},	/* Generic Gobi Modem device */
+ 	{USB_DEVICE(0x05c6, 0x9008)},	/* Generic Gobi QDL device */
++	{USB_DEVICE(0x05c6, 0x9009)},	/* Generic Gobi Modem device */
+ 	{USB_DEVICE(0x05c6, 0x9201)},	/* Generic Gobi QDL device */
+ 	{USB_DEVICE(0x05c6, 0x9221)},	/* Generic Gobi QDL device */
+ 	{USB_DEVICE(0x05c6, 0x9231)},	/* Generic Gobi QDL device */
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 6ca33f2..4453f12 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1977,6 +1977,16 @@ UNUSUAL_DEV(  0x4146, 0xba01, 0x0100, 0x0100,
+ 		"Micro Mini 1GB",
+ 		US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+ 
++/*
++ * Nick Bowler <nbowler at elliptictech.com>
++ * SCSI stack spams (otherwise harmless) error messages.
++ */
++UNUSUAL_DEV(  0xc251, 0x4003, 0x0100, 0x0100,
++		"Keil Software, Inc.",
++		"V2M MotherBoard",
++		US_SC_DEVICE, US_PR_DEVICE, NULL,
++		US_FL_NOT_LOCKABLE),
++
+ /* Reported by Andrew Simmons <andrew.simmons at gmail.com> */
+ UNUSUAL_DEV(  0xed06, 0x4500, 0x0001, 0x0001,
+ 		"DataStor",
+diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
+index 34ddda8..9158c07 100644
+--- a/fs/befs/linuxvfs.c
++++ b/fs/befs/linuxvfs.c
+@@ -469,17 +469,22 @@ befs_follow_link(struct dentry *dentry, struct nameidata *nd)
+ 		befs_data_stream *data = &befs_ino->i_data.ds;
+ 		befs_off_t len = data->size;
+ 
+-		befs_debug(sb, "Follow long symlink");
+-
+-		link = kmalloc(len, GFP_NOFS);
+-		if (!link) {
+-			link = ERR_PTR(-ENOMEM);
+-		} else if (befs_read_lsymlink(sb, data, link, len) != len) {
+-			kfree(link);
+-			befs_error(sb, "Failed to read entire long symlink");
++		if (len == 0) {
++			befs_error(sb, "Long symlink with illegal length");
+ 			link = ERR_PTR(-EIO);
+ 		} else {
+-			link[len - 1] = '\0';
++			befs_debug(sb, "Follow long symlink");
++
++			link = kmalloc(len, GFP_NOFS);
++			if (!link) {
++				link = ERR_PTR(-ENOMEM);
++			} else if (befs_read_lsymlink(sb, data, link, len) != len) {
++				kfree(link);
++				befs_error(sb, "Failed to read entire long symlink");
++				link = ERR_PTR(-EIO);
++			} else {
++				link[len - 1] = '\0';
++			}
+ 		}
+ 	} else {
+ 		link = befs_ino->i_data.symlink;
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 650546f..1facb39 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -899,6 +899,10 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
+ 	if (outarg.namelen > FUSE_NAME_MAX)
+ 		goto err;
+ 
++	err = -EINVAL;
++	if (size != sizeof(outarg) + outarg.namelen + 1)
++		goto err;
++
+ 	name.name = buf;
+ 	name.len = outarg.namelen;
+ 	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
+diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
+index 2aac776..6ab70f4 100644
+--- a/fs/partitions/efi.c
++++ b/fs/partitions/efi.c
+@@ -311,6 +311,15 @@ is_gpt_valid(struct block_device *bdev, u64 lba,
+ 		goto fail;
+ 	}
+ 
++	/* Check the GUID Partition Table header size */
++	if (le32_to_cpu((*gpt)->header_size) >
++			bdev_logical_block_size(bdev)) {
++		pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
++			le32_to_cpu((*gpt)->header_size),
++			bdev_logical_block_size(bdev));
++		goto fail;
++	}
++
+ 	/* Check the GUID Partition Table CRC */
+ 	origcrc = le32_to_cpu((*gpt)->header_crc32);
+ 	(*gpt)->header_crc32 = 0;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 09dbee2..fb98c9f 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -203,6 +203,8 @@ static void drop_futex_key_refs(union futex_key *key)
+  * @uaddr:	virtual address of the futex
+  * @fshared:	0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
+  * @key:	address where result is stored.
++ * @rw:		mapping needs to be read/write (values: VERIFY_READ,
++ *              VERIFY_WRITE)
+  *
+  * Returns a negative error code or 0
+  * The key words are stored in *key on success.
+@@ -214,12 +216,12 @@ static void drop_futex_key_refs(union futex_key *key)
+  * lock_page() might sleep, the caller should not hold a spinlock.
+  */
+ static int
+-get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
++get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+ {
+ 	unsigned long address = (unsigned long)uaddr;
+ 	struct mm_struct *mm = current->mm;
+ 	struct page *page;
+-	int err;
++	int err, ro = 0;
+ 
+ 	/*
+ 	 * The futex address must be "naturally" aligned.
+@@ -247,14 +249,31 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
+ 
+ again:
+ 	err = get_user_pages_fast(address, 1, 1, &page);
++	/*
++	 * If write access is not required (eg. FUTEX_WAIT), try
++	 * and get read-only access.
++	 */
++	if (err == -EFAULT && rw == VERIFY_READ) {
++		err = get_user_pages_fast(address, 1, 0, &page);
++		ro = 1;
++	}
+ 	if (err < 0)
+ 		return err;
++	else
++		err = 0;
+ 
+ 	page = compound_head(page);
+ 	lock_page(page);
+ 	if (!page->mapping) {
+ 		unlock_page(page);
+ 		put_page(page);
++		/*
++		* ZERO_PAGE pages don't have a mapping. Avoid a busy loop
++		* trying to find one. RW mapping would have COW'd (and thus
++		* have a mapping) so this page is RO and won't ever change.
++		*/
++		if ((page == ZERO_PAGE(address)))
++			return -EFAULT;
+ 		goto again;
+ 	}
+ 
+@@ -266,6 +285,15 @@ again:
+ 	 * the object not the particular process.
+ 	 */
+ 	if (PageAnon(page)) {
++		/*
++		 * A RO anonymous page will never change and thus doesn't make
++		 * sense for futex operations.
++		 */
++		if (ro) {
++			err = -EFAULT;
++			goto out;
++		}
++
+ 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
+ 		key->private.mm = mm;
+ 		key->private.address = address;
+@@ -277,9 +305,10 @@ again:
+ 
+ 	get_futex_key_refs(key);
+ 
++out:
+ 	unlock_page(page);
+ 	put_page(page);
+-	return 0;
++	return err;
+ }
+ 
+ static inline
+@@ -880,7 +909,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
+ 	if (!bitset)
+ 		return -EINVAL;
+ 
+-	ret = get_futex_key(uaddr, fshared, &key);
++	ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
+ 	if (unlikely(ret != 0))
+ 		goto out;
+ 
+@@ -926,10 +955,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
+ 	int ret, op_ret;
+ 
+ retry:
+-	ret = get_futex_key(uaddr1, fshared, &key1);
++	ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
+ 	if (unlikely(ret != 0))
+ 		goto out;
+-	ret = get_futex_key(uaddr2, fshared, &key2);
++	ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
+ 	if (unlikely(ret != 0))
+ 		goto out_put_key1;
+ 
+@@ -1188,10 +1217,11 @@ retry:
+ 		pi_state = NULL;
+ 	}
+ 
+-	ret = get_futex_key(uaddr1, fshared, &key1);
++	ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
+ 	if (unlikely(ret != 0))
+ 		goto out;
+-	ret = get_futex_key(uaddr2, fshared, &key2);
++	ret = get_futex_key(uaddr2, fshared, &key2,
++			    requeue_pi ? VERIFY_WRITE : VERIFY_READ);
+ 	if (unlikely(ret != 0))
+ 		goto out_put_key1;
+ 
+@@ -1746,7 +1776,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
+ 	 */
+ retry:
+ 	q->key = FUTEX_KEY_INIT;
+-	ret = get_futex_key(uaddr, fshared, &q->key);
++	ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
+@@ -1912,7 +1942,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
+ 	q.requeue_pi_key = NULL;
+ retry:
+ 	q.key = FUTEX_KEY_INIT;
+-	ret = get_futex_key(uaddr, fshared, &q.key);
++	ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
+ 	if (unlikely(ret != 0))
+ 		goto out;
+ 
+@@ -2031,7 +2061,7 @@ retry:
+ 	if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
+ 		return -EPERM;
+ 
+-	ret = get_futex_key(uaddr, fshared, &key);
++	ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
+ 	if (unlikely(ret != 0))
+ 		goto out;
+ 
+@@ -2223,7 +2253,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
+ 	rt_waiter.task = NULL;
+ 
+ 	key2 = FUTEX_KEY_INIT;
+-	ret = get_futex_key(uaddr2, fshared, &key2);
++	ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
+ 	if (unlikely(ret != 0))
+ 		goto out;
+ 
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 680dcbb..7221e9e 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -671,9 +671,10 @@ static void free_unmap_vmap_area_addr(unsigned long addr)
+ #define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
+ #define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
+ #define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
+-#define VMAP_BBMAP_BITS		VMAP_MIN(VMAP_BBMAP_BITS_MAX,		\
+-					VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
+-						VMALLOC_PAGES / NR_CPUS / 16))
++#define VMAP_BBMAP_BITS		\
++		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
++		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
++			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
+ 
+ #define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
+ 
+diff --git a/net/atm/br2684.c b/net/atm/br2684.c
+index c9230c3..be1c1d2 100644
+--- a/net/atm/br2684.c
++++ b/net/atm/br2684.c
+@@ -208,8 +208,6 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
+ 		if (brdev->payload == p_bridged) {
+ 			skb_push(skb, 2);
+ 			memset(skb->data, 0, 2);
+-		} else { /* p_routed */
+-			skb_pull(skb, ETH_HLEN);
+ 		}
+ 	}
+ 	skb_debug(skb);
+diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
+index e7efcef..eeb2e23 100644
+--- a/sound/pci/ac97/ac97_patch.c
++++ b/sound/pci/ac97/ac97_patch.c
+@@ -1915,6 +1915,7 @@ static unsigned int ad1981_jacks_whitelist[] = {
+ 	0x103c0944, /* HP nc6220 */
+ 	0x103c0934, /* HP nc8220 */
+ 	0x103c006d, /* HP nx9105 */
++	0x103c300d, /* HP Compaq dc5100 SFF(PT003AW) */
+ 	0x17340088, /* FSC Scenic-W */
+ 	0 /* end */
+ };
+diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
+index 007b4bf..e89017a 100644
+--- a/sound/usb/caiaq/audio.c
++++ b/sound/usb/caiaq/audio.c
+@@ -138,8 +138,12 @@ static void stream_stop(struct snd_usb_caiaqdev *dev)
+ 
+ 	for (i = 0; i < N_URBS; i++) {
+ 		usb_kill_urb(dev->data_urbs_in[i]);
+-		usb_kill_urb(dev->data_urbs_out[i]);
++
++		if (test_bit(i, &dev->outurb_active_mask))
++			usb_kill_urb(dev->data_urbs_out[i]);
+ 	}
++
++	dev->outurb_active_mask = 0;
+ }
+ 
+ static int snd_usb_caiaq_substream_open(struct snd_pcm_substream *substream)
+@@ -466,8 +470,9 @@ static void read_completed(struct urb *urb)
+ {
+ 	struct snd_usb_caiaq_cb_info *info = urb->context;
+ 	struct snd_usb_caiaqdev *dev;
+-	struct urb *out;
+-	int frame, len, send_it = 0, outframe = 0;
++	struct urb *out = NULL;
++	int i, frame, len, send_it = 0, outframe = 0;
++	size_t offset = 0;
+ 
+ 	if (urb->status || !info)
+ 		return;
+@@ -477,7 +482,17 @@ static void read_completed(struct urb *urb)
+ 	if (!dev->streaming)
+ 		return;
+ 
+-	out = dev->data_urbs_out[info->index];
++	/* find an unused output urb that is unused */
++	for (i = 0; i < N_URBS; i++)
++		if (test_and_set_bit(i, &dev->outurb_active_mask) == 0) {
++			out = dev->data_urbs_out[i];
++			break;
++		}
++
++	if (!out) {
++		log("Unable to find an output urb to use\n");
++		goto requeue;
++	}
+ 
+ 	/* read the recently received packet and send back one which has
+ 	 * the same layout */
+@@ -488,7 +503,8 @@ static void read_completed(struct urb *urb)
+ 		len = urb->iso_frame_desc[outframe].actual_length;
+ 		out->iso_frame_desc[outframe].length = len;
+ 		out->iso_frame_desc[outframe].actual_length = 0;
+-		out->iso_frame_desc[outframe].offset = BYTES_PER_FRAME * frame;
++		out->iso_frame_desc[outframe].offset = offset;
++		offset += len;
+ 
+ 		if (len > 0) {
+ 			spin_lock(&dev->spinlock);
+@@ -504,11 +520,15 @@ static void read_completed(struct urb *urb)
+ 	}
+ 
+ 	if (send_it) {
+-		out->number_of_packets = FRAMES_PER_URB;
++		out->number_of_packets = outframe;
+ 		out->transfer_flags = URB_ISO_ASAP;
+ 		usb_submit_urb(out, GFP_ATOMIC);
++	} else {
++		struct snd_usb_caiaq_cb_info *oinfo = out->context;
++		clear_bit(oinfo->index, &dev->outurb_active_mask);
+ 	}
+ 
++requeue:
+ 	/* re-submit inbound urb */
+ 	for (frame = 0; frame < FRAMES_PER_URB; frame++) {
+ 		urb->iso_frame_desc[frame].offset = BYTES_PER_FRAME * frame;
+@@ -530,6 +550,8 @@ static void write_completed(struct urb *urb)
+ 		dev->output_running = 1;
+ 		wake_up(&dev->prepare_wait_queue);
+ 	}
++
++	clear_bit(info->index, &dev->outurb_active_mask);
+ }
+ 
+ static struct urb **alloc_urbs(struct snd_usb_caiaqdev *dev, int dir, int *ret)
+@@ -680,6 +702,9 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
+ 	if (!dev->data_cb_info)
+ 		return -ENOMEM;
+ 
++	dev->outurb_active_mask = 0;
++	BUILD_BUG_ON(N_URBS > (sizeof(dev->outurb_active_mask) * 8));
++
+ 	for (i = 0; i < N_URBS; i++) {
+ 		dev->data_cb_info[i].dev = dev;
+ 		dev->data_cb_info[i].index = i;
+diff --git a/sound/usb/caiaq/device.h b/sound/usb/caiaq/device.h
+index 44e3edf..94c0c36 100644
+--- a/sound/usb/caiaq/device.h
++++ b/sound/usb/caiaq/device.h
+@@ -92,6 +92,7 @@ struct snd_usb_caiaqdev {
+ 	int input_panic, output_panic, warned;
+ 	char *audio_in_buf, *audio_out_buf;
+ 	unsigned int samplerates, bpp;
++	unsigned long outurb_active_mask;
+ 
+ 	struct snd_pcm_substream *sub_playback[MAX_STREAMS];
+ 	struct snd_pcm_substream *sub_capture[MAX_STREAMS];
+diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
+index 8784649..1720d01 100644
+--- a/tools/perf/util/config.c
++++ b/tools/perf/util/config.c
+@@ -416,7 +416,6 @@ int perf_config_global(void)
+ int perf_config(config_fn_t fn, void *data)
+ {
+ 	int ret = 0, found = 0;
+-	char *repo_config = NULL;
+ 	const char *home = NULL;
+ 
+ 	/* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
+@@ -438,12 +437,6 @@ int perf_config(config_fn_t fn, void *data)
+ 		free(user_config);
+ 	}
+ 
+-	repo_config = perf_pathdup("config");
+-	if (!access(repo_config, R_OK)) {
+-		ret += perf_config_from_file(fn, repo_config, data);
+-		found += 1;
+-	}
+-	free(repo_config);
+ 	if (found == 0)
+ 		return -1;
+ 	return ret;

Copied: dists/squeeze-security/linux-2.6/debian/patches/bugfix/x86/revert-x86-hotplug-Use-mwait-to-offline-a-processor-.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/x86/revert-x86-hotplug-Use-mwait-to-offline-a-processor-.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/x86/revert-x86-hotplug-Use-mwait-to-offline-a-processor-.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/bugfix/x86/revert-x86-hotplug-Use-mwait-to-offline-a-processor-.patch)
@@ -0,0 +1,149 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Mon, 5 Sep 2011 04:51:04 +0100
+Subject: [PATCH] Revert "x86, hotplug: Use mwait to offline a processor, fix
+ the legacy case"
+
+This reverts commit 226917b0735f31cf5c704e07fdd590d99bbfae58, which
+was found to break hibernation on some systems.
+---
+ arch/x86/include/asm/processor.h |   23 ++++++++++
+ arch/x86/kernel/smpboot.c        |   85 +-------------------------------------
+ 2 files changed, 24 insertions(+), 84 deletions(-)
+
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index da35a70..fa04dea 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -765,6 +765,29 @@ extern unsigned long		boot_option_idle_override;
+ extern unsigned long		idle_halt;
+ extern unsigned long		idle_nomwait;
+ 
++/*
++ * on systems with caches, caches must be flashed as the absolute
++ * last instruction before going into a suspended halt.  Otherwise,
++ * dirty data can linger in the cache and become stale on resume,
++ * leading to strange errors.
++ *
++ * perform a variety of operations to guarantee that the compiler
++ * will not reorder instructions.  wbinvd itself is serializing
++ * so the processor will not reorder.
++ *
++ * Systems without cache can just go into halt.
++ */
++static inline void wbinvd_halt(void)
++{
++	mb();
++	/* check for clflush to determine if wbinvd is legal */
++	if (cpu_has_clflush)
++		asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory");
++	else
++		while (1)
++			halt();
++}
++
+ extern void enable_sep_cpu(void);
+ extern int sysenter_setup(void);
+ 
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 539bb6c..7e8e905 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1338,94 +1338,11 @@ void play_dead_common(void)
+ 	local_irq_disable();
+ }
+ 
+-#define MWAIT_SUBSTATE_MASK		0xf
+-#define MWAIT_SUBSTATE_SIZE		4
+-
+-#define CPUID_MWAIT_LEAF		5
+-#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
+-
+-/*
+- * We need to flush the caches before going to sleep, lest we have
+- * dirty data in our caches when we come back up.
+- */
+-static inline void mwait_play_dead(void)
+-{
+-	unsigned int eax, ebx, ecx, edx;
+-	unsigned int highest_cstate = 0;
+-	unsigned int highest_subcstate = 0;
+-	int i;
+-	void *mwait_ptr;
+-
+-	if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT))
+-		return;
+-	if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH))
+-		return;
+-	if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+-		return;
+-
+-	eax = CPUID_MWAIT_LEAF;
+-	ecx = 0;
+-	native_cpuid(&eax, &ebx, &ecx, &edx);
+-
+-	/*
+-	 * eax will be 0 if EDX enumeration is not valid.
+-	 * Initialized below to cstate, sub_cstate value when EDX is valid.
+-	 */
+-	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
+-		eax = 0;
+-	} else {
+-		edx >>= MWAIT_SUBSTATE_SIZE;
+-		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
+-			if (edx & MWAIT_SUBSTATE_MASK) {
+-				highest_cstate = i;
+-				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
+-			}
+-		}
+-		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
+-			(highest_subcstate - 1);
+-	}
+-
+-	/*
+-	 * This should be a memory location in a cache line which is
+-	 * unlikely to be touched by other processors.  The actual
+-	 * content is immaterial as it is not actually modified in any way.
+-	 */
+-	mwait_ptr = &current_thread_info()->flags;
+-
+-	wbinvd();
+-
+-	while (1) {
+-		/*
+-		 * The CLFLUSH is a workaround for erratum AAI65 for
+-		 * the Xeon 7400 series.  It's not clear it is actually
+-		 * needed, but it should be harmless in either case.
+-		 * The WBINVD is insufficient due to the spurious-wakeup
+-		 * case where we return around the loop.
+-		 */
+-		clflush(mwait_ptr);
+-		__monitor(mwait_ptr, 0, 0);
+-		mb();
+-		__mwait(eax, 0);
+-	}
+-}
+-
+-static inline void hlt_play_dead(void)
+-{
+-	if (current_cpu_data.x86 >= 4)
+-		wbinvd();
+-
+-	while (1) {
+-		native_halt();
+-	}
+-}
+-
+ void native_play_dead(void)
+ {
+ 	play_dead_common();
+ 	tboot_shutdown(TB_SHUTDOWN_WFS);
+-
+-	mwait_play_dead();	/* Only returns on failure */
+-	hlt_play_dead();
++	wbinvd_halt();
+ }
+ 
+ #else /* ... !CONFIG_HOTPLUG_CPU */
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/debian/bridge-avoid-ABI-change-in-2.6.32.44.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/bridge-avoid-ABI-change-in-2.6.32.44.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/debian/bridge-avoid-ABI-change-in-2.6.32.44.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/bridge-avoid-ABI-change-in-2.6.32.44.patch)
@@ -0,0 +1,21 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: bridge: Avoid ABI change in 2.6.32.44
+
+The change is in a private structure defined outside include/.  Hide
+the change from genksyms.
+
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 8567d47..476b65b 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -75,7 +75,10 @@ struct net_bridge_port
+ 	bridge_id			designated_bridge;
+ 	u32				path_cost;
+ 	u32				designated_cost;
++	/* This structure is private; ignore changes */
++#ifndef __GENKSYMS__
+ 	unsigned long			designated_age;
++#endif
+ 
+ 	struct timer_list		forward_delay_timer;
+ 	struct timer_list		hold_timer;

Copied: dists/squeeze-security/linux-2.6/debian/patches/debian/if_packet-avoid-ABI-change-in-2.6.32.43.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/if_packet-avoid-ABI-change-in-2.6.32.43.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/debian/if_packet-avoid-ABI-change-in-2.6.32.43.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/if_packet-avoid-ABI-change-in-2.6.32.43.patch)
@@ -0,0 +1,29 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: if_packet: Avoid ABI change in 2.6.32.43
+
+A change in 2.6.32.43 replaces 2 bytes of automatic padding for
+alignment with a named field, in 2 structures.  The changes are
+binary-compatible, so just hide them from genksysms.
+
+--- a/include/linux/if_packet.h
++++ b/include/linux/if_packet.h
+@@ -63,7 +63,9 @@
+ 	__u16		tp_mac;
+ 	__u16		tp_net;
+ 	__u16		tp_vlan_tci;
++#ifndef __GENKSYMS__
+ 	__u16		tp_padding;
++#endif
+ };
+ 
+ /* Rx ring - header status */
+@@ -104,7 +106,9 @@
+ 	__u32		tp_sec;
+ 	__u32		tp_nsec;
+ 	__u16		tp_vlan_tci;
++#ifndef __GENKSYMS__
+ 	__u16		tp_padding;
++#endif
+ };
+ 
+ #define TPACKET2_HDRLEN		(TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))

Copied: dists/squeeze-security/linux-2.6/debian/patches/debian/mm-avoid-ABI-change-in-2.6.32.43.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/mm-avoid-ABI-change-in-2.6.32.43.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/debian/mm-avoid-ABI-change-in-2.6.32.43.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/mm-avoid-ABI-change-in-2.6.32.43.patch)
@@ -0,0 +1,65 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: mm: Avoid ABI change in 2.6.32.43
+
+In 2.6.32.43, commit ffdd12eabed263a487ddc05fdf65be6e4fc717b4
+("mm: prevent concurrent unmap_mapping_range() on the same inode")
+added a mutex to struct address_space.  This structure is embedded
+in struct inode and changing it will result in widespread symbol
+version changes.
+
+Replace the mutex with a flag and synchronise on that.  This is
+inefficient, but I think this must be protecting against quite a
+rare race condition and so it doesn't matter that much.
+
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -266,7 +266,6 @@
+ 	spin_lock_init(&mapping->private_lock);
+ 	INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+ 	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+-	mutex_init(&mapping->unmap_mutex);
+ }
+ EXPORT_SYMBOL(address_space_init_once);
+ 
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -635,7 +635,6 @@
+ 	spinlock_t		private_lock;	/* for use by the address_space */
+ 	struct list_head	private_list;	/* ditto */
+ 	struct address_space	*assoc_mapping;	/* ditto */
+-	struct mutex		unmap_mutex;    /* to protect unmapping */
+ } __attribute__((aligned(sizeof(long))));
+ 	/*
+ 	 * On most architectures that alignment is already the case; but
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -23,6 +23,7 @@
+ 	AS_ENOSPC	= __GFP_BITS_SHIFT + 1,	/* ENOSPC on async write */
+ 	AS_MM_ALL_LOCKS	= __GFP_BITS_SHIFT + 2,	/* under mm_take_all_locks() */
+ 	AS_UNEVICTABLE	= __GFP_BITS_SHIFT + 3,	/* e.g., ramdisk, SHM_LOCK */
++	AS_UNMAP_MUTEX	= __GFP_BITS_SHIFT + 4, /* ersatz unmap_mutex */
+ };
+ 
+ static inline void mapping_set_error(struct address_space *mapping, int error)
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2454,7 +2454,8 @@
+ 		details.last_index = ULONG_MAX;
+ 	details.i_mmap_lock = &mapping->i_mmap_lock;
+ 
+-	mutex_lock(&mapping->unmap_mutex);
++	while (test_and_set_bit(AS_UNMAP_MUTEX, &mapping->flags))
++		schedule_timeout_uninterruptible(1);
+ 	spin_lock(&mapping->i_mmap_lock);
+ 
+ 	/* Protect against endless unmapping loops */
+@@ -2471,7 +2472,8 @@
+ 	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
+ 		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
+ 	spin_unlock(&mapping->i_mmap_lock);
+-	mutex_unlock(&mapping->unmap_mutex);
++	smp_mb__before_clear_bit();
++	clear_bit(AS_UNMAP_MUTEX, &mapping->flags);
+ }
+ EXPORT_SYMBOL(unmap_mapping_range);
+ 

Copied: dists/squeeze-security/linux-2.6/debian/patches/debian/revert-net-ipv4-Check-for-mistakenly-passed-in-non-I.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/revert-net-ipv4-Check-for-mistakenly-passed-in-non-I.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/debian/revert-net-ipv4-Check-for-mistakenly-passed-in-non-I.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/debian/revert-net-ipv4-Check-for-mistakenly-passed-in-non-I.patch)
@@ -0,0 +1,30 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Sat, 27 Aug 2011 15:09:53 +0100
+Subject: [PATCH] Revert "net/ipv4: Check for mistakenly passed in non-IPv4
+ address"
+
+This reverts commit d0733d2e29b652b2e7b1438ececa732e4eed98eb,
+cherry-picked in 2.6.32.43.  I suspect that some applications
+accidentally depend on the old behaviour, and we don't want to break
+them in a stable update.
+---
+ net/ipv4/af_inet.c |    3 ---
+ 1 files changed, 0 insertions(+), 3 deletions(-)
+
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index a289878..57737b8 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -460,9 +460,6 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	if (addr_len < sizeof(struct sockaddr_in))
+ 		goto out;
+ 
+-	if (addr->sin_family != AF_INET)
+-		goto out;
+-
+ 	chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
+ 
+ 	/* Not specified by any standard per-se, however it breaks too
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/PCI-Add-dummy-implementation-of-pci_dev_run_wake.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/PCI-Add-dummy-implementation-of-pci_dev_run_wake.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/PCI-Add-dummy-implementation-of-pci_dev_run_wake.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/PCI-Add-dummy-implementation-of-pci_dev_run_wake.patch)
@@ -0,0 +1,24 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Thu, 19 May 2011 04:29:25 +0100
+Subject: [PATCH 2/6] PCI: Add dummy implementation of pci_dev_run_wake()
+
+---
+ include/linux/pci.h |    2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index e07d194..d67702a 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -745,6 +745,8 @@ int pci_wake_from_d3(struct pci_dev *dev, bool enable);
+ pci_power_t pci_target_state(struct pci_dev *dev);
+ int pci_prepare_to_sleep(struct pci_dev *dev);
+ int pci_back_from_sleep(struct pci_dev *dev);
++/* Dummy for 2.6.32 */
++static inline bool pci_dev_run_wake(struct pci_dev *dev) { return false; }
+ 
+ /* Functions for PCI Hotplug drivers to use */
+ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/PCI-introduce-pci_pcie_cap.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/PCI-introduce-pci_pcie_cap.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/PCI-introduce-pci_pcie_cap.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/PCI-introduce-pci_pcie_cap.patch)
@@ -0,0 +1,42 @@
+From 8540f0cfe3c3eb59888f42ced0882cd549b84578 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Wed, 11 Nov 2009 14:29:54 +0900
+Subject: [PATCH] PCI: introduce pci_pcie_cap()
+
+Added in commit d7b7e60526d54da4c94afe5f137714cee7d05c41 upstream.
+However, we don't have and can't add the 'pcie_cap' field to
+struct pci_dev.
+---
+ include/linux/pci.h |   16 ++++++++++++++++
+ 1 files changed, 16 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 9a18b5f..81c2d57 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1300,6 +1300,22 @@ extern void pci_hp_create_module_link(struct pci_slot *pci_slot);
+ extern void pci_hp_remove_module_link(struct pci_slot *pci_slot);
+ #endif
+ 
++/**
++ * pci_pcie_cap - get the saved PCIe capability offset
++ * @dev: PCI device
++ *
++ * PCIe capability offset is calculated at PCI device initialization
++ * time and saved in the data structure. This function returns saved
++ * PCIe capability offset. Using this instead of pci_find_capability()
++ * reduces unnecessary search in the PCI configuration space. If you
++ * need to calculate PCIe capability offset from raw device for some
++ * reasons, please use pci_find_capability() instead.
++ */
++static inline int pci_pcie_cap(struct pci_dev *dev)
++{
++	return pci_find_capability(dev, PCI_CAP_ID_EXP);
++}
++
+ #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
+ #define PCI_VPD_LRDT_ID(x)		(x | PCI_VPD_LRDT)
+ 
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/dma-mapping-dma-mapping.h-add-dma_set_coherent_mask.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/dma-mapping-dma-mapping.h-add-dma_set_coherent_mask.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/dma-mapping-dma-mapping.h-add-dma_set_coherent_mask.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/dma-mapping-dma-mapping.h-add-dma_set_coherent_mask.patch)
@@ -0,0 +1,90 @@
+From: FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
+Date: Wed, 10 Mar 2010 15:23:39 -0800
+Subject: [PATCH 4/6] dma-mapping: dma-mapping.h: add dma_set_coherent_mask
+
+dma_set_coherent_mask corresponds to pci_set_consistent_dma_mask.  This is
+necessary to move to the generic device model DMA API from the PCI bus
+specific API in the long term.
+
+dma_set_coherent_mask works in the exact same way that
+pci_set_consistent_dma_mask does.  So this patch also changes
+pci_set_consistent_dma_mask to call dma_set_coherent_mask.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
+Cc: James Bottomley <James.Bottomley at suse.de>
+Cc: David S. Miller <davem at davemloft.net>
+Cc: Jesse Barnes <jbarnes at virtuousgeek.org>
+Cc: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+Cc: Russell King <linux at arm.linux.org.uk>
+Cc: Greg KH <greg at kroah.com>
+Cc: Kay Sievers <kay.sievers at vrfy.org>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+[bwh: Adjust context for 2.6.32]
+---
+ Documentation/DMA-API.txt   |   10 ++++++++++
+ drivers/pci/pci.c           |    7 +++----
+ include/linux/dma-mapping.h |    8 ++++++++
+ 3 files changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
+index 5aceb88..d8b693a 100644
+--- a/Documentation/DMA-API.txt
++++ b/Documentation/DMA-API.txt
+@@ -167,6 +167,16 @@ parameters if it is.
+ 
+ Returns: 0 if successful and a negative error if not.
+ 
++int
++dma_set_coherent_mask(struct device *dev, u64 mask)
++int
++pci_set_consistent_dma_mask(struct pci_device *dev, u64 mask)
++
++Checks to see if the mask is possible and updates the device
++parameters if it is.
++
++Returns: 0 if successful and a negative error if not.
++
+ u64
+ dma_get_required_mask(struct device *dev)
+ 
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 812d4ac..a42bc35 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -2066,10 +2066,9 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask)
+ int
+ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
+ {
+-	if (!pci_dma_supported(dev, mask))
+-		return -EIO;
+-
+-	dev->dev.coherent_dma_mask = mask;
++	int ret = dma_set_coherent_mask(&dev->dev, mask);
++	if (ret)
++		return ret;
+ 
+ 	return 0;
+ }
+diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
+index 91b7618..8357721 100644
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -127,6 +127,14 @@ static inline u64 dma_get_mask(struct device *dev)
+ 	return DMA_BIT_MASK(32);
+ }
+ 
++static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
++{
++	if (!dma_supported(dev, mask))
++		return -EIO;
++	dev->coherent_dma_mask = mask;
++	return 0;
++}
++
+ extern u64 dma_get_required_mask(struct device *dev);
+ 
+ static inline unsigned int dma_get_max_seg_size(struct device *dev)
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/dma-mapping.h-add-the-dma_unmap-state-API.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/dma-mapping.h-add-the-dma_unmap-state-API.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/dma-mapping.h-add-the-dma_unmap-state-API.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/dma-mapping.h-add-the-dma_unmap-state-API.patch)
@@ -0,0 +1,29 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Sat, 21 May 2011 18:12:37 +0100
+Subject: [PATCH 5/6] dma-mapping.h: add the dma_unmap state API
+
+For 2.6.32, define the generic API using the PCI API instead of the
+other way around as in 0acedc124aca35f5cce9d4ee288dc372bf517e09.
+---
+ include/linux/dma-mapping.h |    7 +++++++
+ 1 files changed, 7 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
+index 8357721..78211a0 100644
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -240,4 +240,11 @@ struct dma_attrs;
+ 
+ #endif /* CONFIG_HAVE_DMA_ATTRS */
+ 
++#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) DECLARE_PCI_UNMAP_ADDR(ADDR_NAME);
++#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)   DECLARE_PCI_UNMAP_LEN(LEN_NAME);
++#define dma_unmap_addr             pci_unmap_addr
++#define dma_unmap_addr_set         pci_unmap_addr_set
++#define dma_unmap_len              pci_unmap_len
++#define dma_unmap_len_set          pci_unmap_len_set
++
+ #endif
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/err.h-add-helper-function-to-simplify-pointer-error-.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/err.h-add-helper-function-to-simplify-pointer-error-.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/err.h-add-helper-function-to-simplify-pointer-error-.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/err.h-add-helper-function-to-simplify-pointer-error-.patch)
@@ -0,0 +1,38 @@
+From: Phil Carmody <ext-phil.2.carmody at nokia.com>
+Date: Mon, 14 Dec 2009 18:00:29 -0800
+Subject: [PATCH] err.h: add helper function to simplify pointer error
+ checking
+
+commit 603c4ba96be998a8dd7a6f9b23681c49acdf4b64 upstream.
+
+There are quite a few instances in the kernel of checks of pointers both
+against NULL and against the errno range, handling both cases identically.
+This additional helper function would simplify such code.
+
+[akpm at linux-foundation.org: build fix]
+Signed-off-by: Phil Carmody <ext-phil.2.carmody at nokia.com>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
+---
+ include/linux/err.h |    5 +++++
+ 1 files changed, 5 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/err.h b/include/linux/err.h
+index ec87f31..1b12642 100644
+--- a/include/linux/err.h
++++ b/include/linux/err.h
+@@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr)
+ 	return IS_ERR_VALUE((unsigned long)ptr);
+ }
+ 
++static inline long IS_ERR_OR_NULL(const void *ptr)
++{
++	return !ptr || IS_ERR_VALUE((unsigned long)ptr);
++}
++
+ /**
+  * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+  * @ptr: The pointer to cast.
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/etherdevice-Dummy-implementation-of-dev_hw_addr_rand.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/etherdevice-Dummy-implementation-of-dev_hw_addr_rand.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/etherdevice-Dummy-implementation-of-dev_hw_addr_rand.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/etherdevice-Dummy-implementation-of-dev_hw_addr_rand.patch)
@@ -0,0 +1,28 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Thu, 22 Jul 2010 02:50:21 +0000
+Subject: [PATCH 6/6] etherdevice: Dummy implementation of
+ dev_hw_addr_random()
+
+---
+ include/linux/etherdevice.h |    5 +++++
+ 1 files changed, 5 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
+index 3d7a668..2ec7be7 100644
+--- a/include/linux/etherdevice.h
++++ b/include/linux/etherdevice.h
+@@ -126,6 +126,11 @@ static inline void random_ether_addr(u8 *addr)
+ 	addr [0] |= 0x02;	/* set local assignment bit (IEEE802) */
+ }
+ 
++static inline void dev_hw_addr_random(struct net_device *dev, u8 *hwaddr)
++{
++	random_ether_addr(hwaddr);
++}
++
+ /**
+  * compare_ether_addr - Compare two Ethernet addresses
+  * @addr1: Pointer to a six-byte array containing the Ethernet address
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/net-Add-netdev_alloc_skb_ip_align-helper.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/net-Add-netdev_alloc_skb_ip_align-helper.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/net-Add-netdev_alloc_skb_ip_align-helper.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/net-Add-netdev_alloc_skb_ip_align-helper.patch)
@@ -0,0 +1,39 @@
+From: Eric Dumazet <eric.dumazet at gmail.com>
+Date: Wed, 7 Oct 2009 17:11:23 +0000
+Subject: [PATCH 1/6] net: Add netdev_alloc_skb_ip_align() helper
+
+commit 61321bbd6235ca9a40ba3bc249e8906cc66233c3 upstream.
+
+Instead of hardcoding NET_IP_ALIGN stuff in various network drivers,
+we can add a helper around netdev_alloc_skb()
+
+Signed-off-by: Eric Dumazet <eric.dumazet at gmail.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+---
+ include/linux/skbuff.h |   10 ++++++++++
+ 1 files changed, 10 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 5297a90..7d02bbf 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1489,6 +1489,16 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
+ 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
+ }
+ 
++static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
++		unsigned int length)
++{
++	struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
++
++	if (NET_IP_ALIGN && skb)
++		skb_reserve(skb, NET_IP_ALIGN);
++	return skb;
++}
++
+ extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
+ 
+ /**
+-- 
+1.7.5.4
+

Modified: dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/0002-venfs-Backport-some-patches-from-rhel6-branch.patch
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/0002-venfs-Backport-some-patches-from-rhel6-branch.patch	Sun Jan  8 10:41:18 2012	(r18473)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/0002-venfs-Backport-some-patches-from-rhel6-branch.patch	Sun Jan  8 10:47:12 2012	(r18474)
@@ -33,6 +33,7 @@
 
 Signed-off-by: Stanislav Kinsbursky <skinsbursky at parallels.com>
 Signed-off-by: Pavel Emelyanov <xemul at openvz.org>
+[bwh: Fix context in net/sunrpc/sched.c to apply after 2.6.32.44]
 ---
  fs/filesystems.c             |    2 +-
  fs/lockd/clntlock.c          |    4 +++
@@ -766,7 +767,7 @@
  /*
   * Disable the timer for a given RPC task. Should be called with
 @@ -617,6 +619,19 @@ static void __rpc_execute(struct rpc_task *task)
- 	for (;;) {
+ 		void (*do_action)(struct rpc_task *);
  
  		/*
 +		 * Finish this task with error state if RPC client is already
@@ -782,7 +783,7 @@
 +		}	
 +
 +		/*
- 		 * Execute any pending callback.
+ 		 * Execute any pending callback first.
  		 */
  		if (task->tk_callback) {
 @@ -715,7 +730,9 @@ void rpc_execute(struct rpc_task *task)
@@ -829,6 +830,3 @@
  	return xprt;
  }
 +EXPORT_SYMBOL(xprt_get);
--- 
-1.7.4.1
-

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/cpt-Allow-ext4-mount.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/openvz/cpt-Allow-ext4-mount.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/cpt-Allow-ext4-mount.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/openvz/cpt-Allow-ext4-mount.patch)
@@ -0,0 +1,17 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: cpt: Allow ext4 mounts
+
+The existing checkpoint code apparently works for ext4 and most other
+filesystems backed by a block device.  The OpenVZ patch for RHEL 6
+allows it without any apparent need for specific support elsewhere in
+the code.
+
+--- a/kernel/cpt/cpt_files.h
++++ b/kernel/cpt/cpt_files.h
+@@ -74,4 +74,5 @@
+ 	 strcmp(mnt->mnt_sb->s_type->name, "devpts") != 0 && \
+ 	 strcmp(mnt->mnt_sb->s_type->name, "proc") != 0 && \
+ 	 strcmp(mnt->mnt_sb->s_type->name, "sysfs") != 0 && \
+-	 strcmp(mnt->mnt_sb->s_type->name, "binfmt_misc") != 0)
++	 strcmp(mnt->mnt_sb->s_type->name, "binfmt_misc") != 0 && \
++	 strcmp(mnt->mnt_sb->s_type->name, "ext4") != 0)

Modified: dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/openvz.patch
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/openvz.patch	Sun Jan  8 10:41:18 2012	(r18473)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/openvz.patch	Sun Jan  8 10:47:12 2012	(r18474)
@@ -6535,6 +6535,8 @@
 
 [bwh: Fix conflict with bugfix/all/af_unix-limit-recursion-level.patch]
 [bwh: Fix conflict with change to xs_reset_transport() in 2.6.32.36]
+[bwh: Fix context for changes to flush_old_exec() and nf_ct_frag6_gather()
+ after 2.6.32.42]
 
 diff --git a/COPYING.Parallels b/COPYING.Parallels
 new file mode 100644
@@ -16887,9 +16889,9 @@
  
 -	bprm->mm = NULL;		/* We're using it now */
 -
+ 	set_fs(USER_DS);
  	current->flags &= ~PF_RANDOMIZE;
  	flush_thread();
- 	current->personality &= ~bprm->per_clear;
 @@ -1334,6 +1364,10 @@ int do_execve(char * filename,
  	bool clear_in_exec;
  	int retval;
@@ -87498,8 +87500,8 @@
  	struct sk_buff *clone;
  	struct net_device *dev = skb->dev;
 @@ -606,10 +609,11 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
- 		goto ret_orig;
- 	}
+ 	hdr = ipv6_hdr(clone);
+ 	fhdr = (struct frag_hdr *)skb_transport_header(clone);
  
 -	if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
 -		nf_ct_frag6_evictor();

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/ptrace_dont_allow_process_without_memory_map_v2.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/openvz/ptrace_dont_allow_process_without_memory_map_v2.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/openvz/ptrace_dont_allow_process_without_memory_map_v2.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/openvz/ptrace_dont_allow_process_without_memory_map_v2.patch)
@@ -0,0 +1,43 @@
+Date: Sun, 15 May 2011 08:47 -0400
+From: Cyrill Gorcunov <gorcunov at openvz.org>
+Subject: ptrace: Don't allow to trace a process without memory map v2
+
+There is a chance the user start tracing process which
+has no memory map yet because of being a child process
+which moves to VE environment at creation time.
+
+A typical example is to start VE via vzctl with --wait argument
+trace vzctl child process while it's in waiting state. The
+result is NULL dereference and kernel panic.
+
+Fix it by declining to trace such processes.
+
+http://bugzilla.openvz.org/show_bug.cgi?id=1866
+
+v2: Pavel spotted task->mm access without task_lock
+    guardian. Fixed.
+
+Signed-off-by: Cyrill Gorcunov <gorcunov at openvz.org>
+---
+ kernel/ptrace.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+Index: linux-2.6-ovz/kernel/ptrace.c
+===================================================================
+--- linux-2.6-ovz.orig/kernel/ptrace.c
++++ linux-2.6-ovz/kernel/ptrace.c
+@@ -198,8 +198,13 @@ int ptrace_attach(struct task_struct *ta
+ 	task_unlock(task);
+ 	if (retval)
+ 		goto unlock_creds;
++
+ 	retval = -EACCES;
+-	if (task->mm->vps_dumpable == 2)
++	task_lock(task);
++	if (task->mm && task->mm->vps_dumpable != 2)
++		retval = 0;
++	task_unlock(task);
++	if (retval)
+ 		goto unlock_creds;
+ 
+ 	write_lock_irq(&tasklist_lock);

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS-2.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS-2.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS-2.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS-2.patch)
@@ -0,0 +1,36 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Wed, 14 Sep 2011 14:44:42 +0100
+Subject: [PATCH] pm: Define SET_SYSTEM_SLEEP_PM_OPS()
+
+From commit d690b2cd222afc75320b9b8e9da7df02e9e630ca upstream.
+Second try, with #ifdef CONFIG_PM_SLEEP.
+---
+ include/linux/pm.h |   12 ++++++++++++
+ 1 files changed, 12 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 3b7e04b..fe277b5 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -213,6 +213,18 @@ struct dev_pm_ops {
+ 	int (*runtime_idle)(struct device *dev);
+ };
+ 
++#ifdef CONFIG_PM_SLEEP
++#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
++	.suspend = suspend_fn, \
++	.resume = resume_fn, \
++	.freeze = suspend_fn, \
++	.thaw = resume_fn, \
++	.poweroff = suspend_fn, \
++	.restore = resume_fn,
++#else
++#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
++#endif
++
+ /*
+  * Use this if you want to use the same suspend and resume callbacks for suspend
+  * to RAM and hibernation.
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS.patch)
@@ -0,0 +1,31 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Thu, 19 May 2011 04:48:31 +0100
+Subject: [PATCH 3/6] pm: Define SET_SYSTEM_SLEEP_PM_OPS()
+
+From commit d690b2cd222afc75320b9b8e9da7df02e9e630ca upstream.
+---
+ include/linux/pm.h |    8 ++++++++
+ 1 files changed, 8 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/pm.h b/include/linux/pm.h
+index 3b7e04b..a0cc041 100644
+--- a/include/linux/pm.h
++++ b/include/linux/pm.h
+@@ -213,6 +213,14 @@ struct dev_pm_ops {
+ 	int (*runtime_idle)(struct device *dev);
+ };
+ 
++#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
++	.suspend = suspend_fn, \
++	.resume = resume_fn, \
++	.freeze = suspend_fn, \
++	.thaw = resume_fn, \
++	.poweroff = suspend_fn, \
++	.restore = resume_fn,
++
+ /*
+  * Use this if you want to use the same suspend and resume callbacks for suspend
+  * to RAM and hibernation.
+-- 
+1.7.5.4
+

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/vserver/vs2.3.0.36.29.7.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/vserver/vs2.3.0.36.29.7.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/vserver/vs2.3.0.36.29.7.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/vserver/vs2.3.0.36.29.7.patch)
@@ -0,0 +1,28333 @@
+[bwh: Adjust context in drivers/block/Kconfig changed by drbd backport]
+
+--- a/Documentation/scheduler/sched-cfs-hard-limits.txt	1970-01-01 01:00:00.000000000 +0100
++++ a/Documentation/scheduler/sched-cfs-hard-limits.txt	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,48 @@
++CPU HARD LIMITS FOR CFS GROUPS
++==============================
++
++1. Overview
++2. Interface
++3. Examples
++
++1. Overview
++-----------
++
++CFS is a proportional share scheduler which tries to divide the CPU time
++proportionately between tasks or groups of tasks (task group/cgroup) depending
++on the priority/weight of the task or shares assigned to groups of tasks.
++In CFS, a task/task group can get more than its share of CPU if there are
++enough idle CPU cycles available in the system, due to the work conserving
++nature of the scheduler. However in certain scenarios (like pay-per-use),
++it is desirable not to provide extra time to a group even in the presence
++of idle CPU cycles. This is where hard limiting can be of use.
++
++Hard limits for task groups can be set by specifying how much CPU runtime a
++group can consume within a given period. If the group consumes more CPU time
++than the runtime in a given period, it gets throttled. None of the tasks of
++the throttled group gets to run until the runtime of the group gets refreshed
++at the beginning of the next period.
++
++2. Interface
++------------
++
++Hard limit feature adds 2 cgroup files for CFS group scheduler:
++
++cfs_runtime_us: Hard limit for the group in microseconds.
++
++cfs_period_us: Time period in microseconds within which hard limits is
++enforced.
++
++A group gets created with default values for runtime (infinite runtime which
++means hard limits disabled) and period (0.5s). Each group can set its own
++values for runtime and period independent of other groups in the system.
++
++3. Examples
++-----------
++
++# mount -t cgroup -ocpu none /cgroups/
++# cd /cgroups
++# mkdir 1
++# cd 1/
++# echo 250000 > cfs_runtime_us /* set a 250ms runtime or limit */
++# echo 500000 > cfs_period_us /* set a 500ms period */
+--- a/Documentation/vserver/debug.txt	1970-01-01 01:00:00.000000000 +0100
++++ a/Documentation/vserver/debug.txt	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,154 @@
++
++debug_cvirt:
++
++ 2   4	"vx_map_tgid: %p/%llx: %d -> %d"
++	"vx_rmap_tgid: %p/%llx: %d -> %d"
++
++debug_dlim:
++
++ 0   1	"ALLOC (%p,#%d)%c inode (%d)"
++	"FREE  (%p,#%d)%c inode"
++ 1   2	"ALLOC (%p,#%d)%c %lld bytes (%d)"
++	"FREE  (%p,#%d)%c %lld bytes"
++ 2   4	"ADJUST: %lld,%lld on %ld,%ld [mult=%d]"
++ 3   8	"ext3_has_free_blocks(%p): %lu<%lu+1, %c, %u!=%u r=%d"
++	"ext3_has_free_blocks(%p): free=%lu, root=%lu"
++	"rcu_free_dl_info(%p)"
++ 4  10	"alloc_dl_info(%p,%d) = %p"
++	"dealloc_dl_info(%p)"
++	"get_dl_info(%p[#%d.%d])"
++	"put_dl_info(%p[#%d.%d])"
++ 5  20	"alloc_dl_info(%p,%d)*"
++ 6  40	"__hash_dl_info: %p[#%d]"
++	"__unhash_dl_info: %p[#%d]"
++ 7  80	"locate_dl_info(%p,#%d) = %p"
++
++debug_misc:
++
++ 0   1	"destroy_dqhash: %p [#0x%08x] c=%d"
++	"new_dqhash: %p [#0x%08x]"
++	"vroot[%d]_clr_dev: dev=%p[%lu,%d:%d]"
++	"vroot[%d]_get_real_bdev: dev=%p[%lu,%d:%d]"
++	"vroot[%d]_set_dev: dev=%p[%lu,%d:%d]"
++	"vroot_get_real_bdev not set"
++ 1   2	"cow_break_link(»%s«)"
++	"temp copy »%s«"
++ 2   4	"dentry_open(new): %p"
++	"dentry_open(old): %p"
++	"lookup_create(new): %p"
++	"old path »%s«"
++	"path_lookup(old): %d"
++	"vfs_create(new): %d"
++	"vfs_rename: %d"
++	"vfs_sendfile: %d"
++ 3   8	"fput(new_file=%p[#%d])"
++	"fput(old_file=%p[#%d])"
++ 4  10	"vx_info_kill(%p[#%d],%d,%d) = %d"
++	"vx_info_kill(%p[#%d],%d,%d)*"
++ 5  20	"vs_reboot(%p[#%d],%d)"
++ 6  40	"dropping task %p[#%u,%u] for %p[#%u,%u]"
++
++debug_net:
++
++ 2   4	"nx_addr_conflict(%p,%p) %d.%d,%d.%d"
++ 3   8	"inet_bind(%p) %d.%d.%d.%d, %d.%d.%d.%d, %d.%d.%d.%d"
++	"inet_bind(%p)* %p,%p;%lx %d.%d.%d.%d"
++ 4  10	"ip_route_connect(%p) %p,%p;%lx"
++ 5  20	"__addr_in_socket(%p,%d.%d.%d.%d) %p:%d.%d.%d.%d %p;%lx"
++ 6  40	"sk,egf: %p [#%d] (from %d)"
++	"sk,egn: %p [#%d] (from %d)"
++	"sk,req: %p [#%d] (from %d)"
++	"sk: %p [#%d] (from %d)"
++	"tw: %p [#%d] (from %d)"
++ 7  80	"__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d"
++	"__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d"
++
++debug_nid:
++
++ 0   1	"__lookup_nx_info(#%u): %p[#%u]"
++	"alloc_nx_info(%d) = %p"
++	"create_nx_info(%d) (dynamic rejected)"
++	"create_nx_info(%d) = %p (already there)"
++	"create_nx_info(%d) = %p (new)"
++	"dealloc_nx_info(%p)"
++ 1   2	"alloc_nx_info(%d)*"
++	"create_nx_info(%d)*"
++ 2   4	"get_nx_info(%p[#%d.%d])"
++	"put_nx_info(%p[#%d.%d])"
++ 3   8	"claim_nx_info(%p[#%d.%d.%d]) %p"
++	"clr_nx_info(%p[#%d.%d])"
++	"init_nx_info(%p[#%d.%d])"
++	"release_nx_info(%p[#%d.%d.%d]) %p"
++	"set_nx_info(%p[#%d.%d])"
++ 4  10	"__hash_nx_info: %p[#%d]"
++	"__nx_dynamic_id: [#%d]"
++	"__unhash_nx_info: %p[#%d.%d.%d]"
++ 5  20	"moved task %p into nxi:%p[#%d]"
++	"nx_migrate_task(%p,%p[#%d.%d.%d])"
++	"task_get_nx_info(%p)"
++ 6  40	"nx_clear_persistent(%p[#%d])"
++
++debug_quota:
++
++ 0   1	"quota_sync_dqh(%p,%d) discard inode %p"
++ 1   2	"quota_sync_dqh(%p,%d)"
++	"sync_dquots(%p,%d)"
++	"sync_dquots_dqh(%p,%d)"
++ 3   8	"do_quotactl(%p,%d,cmd=%d,id=%d,%p)"
++
++debug_switch:
++
++ 0   1	"vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]"
++ 1   2	"vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]"
++ 4  10	"%s: (%s %s) returned %s with %d"
++
++debug_tag:
++
++ 7  80	"dx_parse_tag(»%s«): %d:#%d"
++	"dx_propagate_tag(%p[#%lu.%d]): %d,%d"
++
++debug_xid:
++
++ 0   1	"__lookup_vx_info(#%u): %p[#%u]"
++	"alloc_vx_info(%d) = %p"
++	"alloc_vx_info(%d)*"
++	"create_vx_info(%d) (dynamic rejected)"
++	"create_vx_info(%d) = %p (already there)"
++	"create_vx_info(%d) = %p (new)"
++	"dealloc_vx_info(%p)"
++	"loc_vx_info(%d) = %p (found)"
++	"loc_vx_info(%d) = %p (new)"
++	"loc_vx_info(%d) = %p (not available)"
++ 1   2	"create_vx_info(%d)*"
++	"loc_vx_info(%d)*"
++ 2   4	"get_vx_info(%p[#%d.%d])"
++	"put_vx_info(%p[#%d.%d])"
++ 3   8	"claim_vx_info(%p[#%d.%d.%d]) %p"
++	"clr_vx_info(%p[#%d.%d])"
++	"init_vx_info(%p[#%d.%d])"
++	"release_vx_info(%p[#%d.%d.%d]) %p"
++	"set_vx_info(%p[#%d.%d])"
++ 4  10	"__hash_vx_info: %p[#%d]"
++	"__unhash_vx_info: %p[#%d.%d.%d]"
++	"__vx_dynamic_id: [#%d]"
++ 5  20	"enter_vx_info(%p[#%d],%p) %p[#%d,%p]"
++	"leave_vx_info(%p[#%d,%p]) %p[#%d,%p]"
++	"moved task %p into vxi:%p[#%d]"
++	"task_get_vx_info(%p)"
++	"vx_migrate_task(%p,%p[#%d.%d])"
++ 6  40	"vx_clear_persistent(%p[#%d])"
++	"vx_exit_init(%p[#%d],%p[#%d,%d,%d])"
++	"vx_set_init(%p[#%d],%p[#%d,%d,%d])"
++	"vx_set_persistent(%p[#%d])"
++	"vx_set_reaper(%p[#%d],%p[#%d,%d])"
++ 7  80	"vx_child_reaper(%p[#%u,%u]) = %p[#%u,%u]"
++
++
++debug_limit:
++
++ n 2^n	"vx_acc_cres[%5d,%s,%2d]: %5d%s"
++	"vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
++
++ m 2^m	"vx_acc_page[%5d,%s,%2d]: %5d%s"
++	"vx_acc_pages[%5d,%s,%2d]: %5d += %5d"
++	"vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
+--- a/arch/alpha/Kconfig	2009-12-03 20:01:49.000000000 +0100
++++ a/arch/alpha/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -674,6 +674,8 @@ config DUMMY_CONSOLE
+ 	depends on VGA_HOSE
+ 	default y
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/alpha/kernel/entry.S	2009-06-11 17:11:46.000000000 +0200
++++ a/arch/alpha/kernel/entry.S	2011-06-10 13:03:02.000000000 +0200
+@@ -874,24 +874,15 @@ sys_getxgid:
+ 	.globl	sys_getxpid
+ 	.ent	sys_getxpid
+ sys_getxpid:
++	lda	$sp, -16($sp)
++	stq	$26, 0($sp)
+ 	.prologue 0
+-	ldq	$2, TI_TASK($8)
+ 
+-	/* See linux/kernel/timer.c sys_getppid for discussion
+-	   about this loop.  */
+-	ldq	$3, TASK_GROUP_LEADER($2)
+-	ldq	$4, TASK_REAL_PARENT($3)
+-	ldl	$0, TASK_TGID($2)
+-1:	ldl	$1, TASK_TGID($4)
+-#ifdef CONFIG_SMP
+-	mov	$4, $5
+-	mb
+-	ldq	$3, TASK_GROUP_LEADER($2)
+-	ldq	$4, TASK_REAL_PARENT($3)
+-	cmpeq	$4, $5, $5
+-	beq	$5, 1b
+-#endif
+-	stq	$1, 80($sp)
++	lda	$16, 96($sp)
++	jsr	$26, do_getxpid
++	ldq	$26, 0($sp)
++
++	lda	$sp, 16($sp)
+ 	ret
+ .end sys_getxpid
+ 
+--- a/arch/alpha/kernel/osf_sys.c	2011-05-29 23:42:13.000000000 +0200
++++ a/arch/alpha/kernel/osf_sys.c	2011-06-10 13:03:02.000000000 +0200
+@@ -865,7 +865,7 @@ SYSCALL_DEFINE2(osf_gettimeofday, struct
+ {
+ 	if (tv) {
+ 		struct timeval ktv;
+-		do_gettimeofday(&ktv);
++		vx_gettimeofday(&ktv);
+ 		if (put_tv32(tv, &ktv))
+ 			return -EFAULT;
+ 	}
+--- a/arch/alpha/kernel/ptrace.c	2009-09-10 15:25:14.000000000 +0200
++++ a/arch/alpha/kernel/ptrace.c	2011-06-10 13:03:02.000000000 +0200
+@@ -14,6 +14,7 @@
+ #include <linux/slab.h>
+ #include <linux/security.h>
+ #include <linux/signal.h>
++#include <linux/vs_base.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+--- a/arch/alpha/kernel/systbls.S	2009-03-24 14:18:08.000000000 +0100
++++ a/arch/alpha/kernel/systbls.S	2011-06-10 13:03:02.000000000 +0200
+@@ -446,7 +446,7 @@ sys_call_table:
+ 	.quad sys_stat64			/* 425 */
+ 	.quad sys_lstat64
+ 	.quad sys_fstat64
+-	.quad sys_ni_syscall			/* sys_vserver */
++	.quad sys_vserver			/* sys_vserver */
+ 	.quad sys_ni_syscall			/* sys_mbind */
+ 	.quad sys_ni_syscall			/* sys_get_mempolicy */
+ 	.quad sys_ni_syscall			/* sys_set_mempolicy */
+--- a/arch/alpha/kernel/traps.c	2009-06-11 17:11:46.000000000 +0200
++++ a/arch/alpha/kernel/traps.c	2011-06-10 13:03:02.000000000 +0200
+@@ -183,7 +183,8 @@ die_if_kernel(char * str, struct pt_regs
+ #ifdef CONFIG_SMP
+ 	printk("CPU %d ", hard_smp_processor_id());
+ #endif
+-	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
++	printk("%s(%d[#%u]): %s %ld\n", current->comm,
++		task_pid_nr(current), current->xid, str, err);
+ 	dik_show_regs(regs, r9_15);
+ 	add_taint(TAINT_DIE);
+ 	dik_show_trace((unsigned long *)(regs+1));
+--- a/arch/alpha/mm/fault.c	2009-09-10 15:25:14.000000000 +0200
++++ a/arch/alpha/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -193,8 +193,8 @@ do_page_fault(unsigned long address, uns
+ 		down_read(&mm->mmap_sem);
+ 		goto survive;
+ 	}
+-	printk(KERN_ALERT "VM: killing process %s(%d)\n",
+-	       current->comm, task_pid_nr(current));
++	printk(KERN_ALERT "VM: killing process %s(%d:#%u)\n",
++	       current->comm, task_pid_nr(current), current->xid);
+ 	if (!user_mode(regs))
+ 		goto no_context;
+ 	do_group_exit(SIGKILL);
+--- a/arch/arm/Kconfig	2011-05-29 23:42:13.000000000 +0200
++++ a/arch/arm/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -1524,6 +1524,8 @@ source "fs/Kconfig"
+ 
+ source "arch/arm/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/arm/include/asm/tlb.h	2009-09-10 15:25:15.000000000 +0200
++++ a/arch/arm/include/asm/tlb.h	2011-06-10 13:03:02.000000000 +0200
+@@ -27,6 +27,7 @@
+ 
+ #else /* !CONFIG_MMU */
+ 
++#include <linux/vs_memory.h>
+ #include <asm/pgalloc.h>
+ 
+ /*
+--- a/arch/arm/kernel/calls.S	2011-05-29 23:42:14.000000000 +0200
++++ a/arch/arm/kernel/calls.S	2011-06-10 13:03:02.000000000 +0200
+@@ -322,7 +322,7 @@
+ /* 310 */	CALL(sys_request_key)
+ 		CALL(sys_keyctl)
+ 		CALL(ABI(sys_semtimedop, sys_oabi_semtimedop))
+-/* vserver */	CALL(sys_ni_syscall)
++		CALL(sys_vserver)
+ 		CALL(sys_ioprio_set)
+ /* 315 */	CALL(sys_ioprio_get)
+ 		CALL(sys_inotify_init)
+--- a/arch/arm/kernel/process.c	2009-12-03 20:01:50.000000000 +0100
++++ a/arch/arm/kernel/process.c	2011-06-10 13:03:02.000000000 +0200
+@@ -269,7 +269,8 @@ void __show_regs(struct pt_regs *regs)
+ void show_regs(struct pt_regs * regs)
+ {
+ 	printk("\n");
+-	printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
++	printk("Pid: %d[#%u], comm: %20s\n",
++		task_pid_nr(current), current->xid, current->comm);
+ 	__show_regs(regs);
+ 	__backtrace();
+ }
+--- a/arch/arm/kernel/traps.c	2009-12-03 20:01:50.000000000 +0100
++++ a/arch/arm/kernel/traps.c	2011-06-10 13:03:02.000000000 +0200
+@@ -234,8 +234,8 @@ static void __die(const char *str, int e
+ 	sysfs_printk_last_file();
+ 	print_modules();
+ 	__show_regs(regs);
+-	printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
+-		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
++	printk(KERN_EMERG "Process %.*s (pid: %d:#%u, stack limit = 0x%p)\n",
++		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), tsk->xid, thread + 1);
+ 
+ 	if (!user_mode(regs) || in_interrupt()) {
+ 		dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
+--- a/arch/avr32/mm/fault.c	2009-09-10 15:25:20.000000000 +0200
++++ a/arch/avr32/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -216,7 +216,8 @@ out_of_memory:
+ 		down_read(&mm->mmap_sem);
+ 		goto survive;
+ 	}
+-	printk("VM: Killing process %s\n", tsk->comm);
++	printk("VM: Killing process %s(%d:#%u)\n",
++		tsk->comm, task_pid_nr(tsk), tsk->xid);
+ 	if (user_mode(regs))
+ 		do_group_exit(SIGKILL);
+ 	goto no_context;
+--- a/arch/cris/Kconfig	2009-06-11 17:11:56.000000000 +0200
++++ a/arch/cris/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -685,6 +685,8 @@ source "drivers/staging/Kconfig"
+ 
+ source "arch/cris/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/cris/mm/fault.c	2009-12-03 20:01:56.000000000 +0100
++++ a/arch/cris/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -245,7 +245,8 @@ do_page_fault(unsigned long address, str
+ 
+  out_of_memory:
+ 	up_read(&mm->mmap_sem);
+-	printk("VM: killing process %s\n", tsk->comm);
++	printk("VM: killing process %s(%d:#%u)\n",
++		tsk->comm, task_pid_nr(tsk), tsk->xid);
+ 	if (user_mode(regs))
+ 		do_exit(SIGKILL);
+ 	goto no_context;
+--- a/arch/frv/kernel/kernel_thread.S	2008-12-25 00:26:37.000000000 +0100
++++ a/arch/frv/kernel/kernel_thread.S	2011-06-10 13:03:02.000000000 +0200
+@@ -37,7 +37,7 @@ kernel_thread:
+ 
+ 	# start by forking the current process, but with shared VM
+ 	setlos.p	#__NR_clone,gr7		; syscall number
+-	ori		gr10,#CLONE_VM,gr8	; first syscall arg	[clone_flags]
++	ori		gr10,#CLONE_KT,gr8	; first syscall arg	[clone_flags]
+ 	sethi.p		#0xe4e4,gr9		; second syscall arg	[newsp]
+ 	setlo		#0xe4e4,gr9
+ 	setlos.p	#0,gr10			; third syscall arg	[parent_tidptr]
+--- a/arch/frv/mm/fault.c	2009-09-10 15:25:22.000000000 +0200
++++ a/arch/frv/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -257,7 +257,8 @@ asmlinkage void do_page_fault(int datamm
+  */
+  out_of_memory:
+ 	up_read(&mm->mmap_sem);
+-	printk("VM: killing process %s\n", current->comm);
++	printk("VM: killing process %s(%d:#%u)\n",
++		current->comm, task_pid_nr(current), current->xid);
+ 	if (user_mode(__frame))
+ 		do_group_exit(SIGKILL);
+ 	goto no_context;
+--- a/arch/h8300/Kconfig	2009-03-24 14:18:24.000000000 +0100
++++ a/arch/h8300/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -226,6 +226,8 @@ source "fs/Kconfig"
+ 
+ source "arch/h8300/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/ia64/Kconfig	2009-12-03 20:01:56.000000000 +0100
++++ a/arch/ia64/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -685,6 +685,8 @@ source "fs/Kconfig"
+ 
+ source "arch/ia64/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/ia64/ia32/ia32_entry.S	2009-06-11 17:11:57.000000000 +0200
++++ a/arch/ia64/ia32/ia32_entry.S	2011-06-10 13:03:02.000000000 +0200
+@@ -451,7 +451,7 @@ ia32_syscall_table:
+  	data8 sys_tgkill	/* 270 */
+  	data8 compat_sys_utimes
+  	data8 sys32_fadvise64_64
+- 	data8 sys_ni_syscall
++	data8 sys32_vserver
+   	data8 sys_ni_syscall
+  	data8 sys_ni_syscall	/* 275 */
+   	data8 sys_ni_syscall
+--- a/arch/ia64/include/asm/tlb.h	2009-09-10 15:25:22.000000000 +0200
++++ a/arch/ia64/include/asm/tlb.h	2011-06-10 13:03:02.000000000 +0200
+@@ -40,6 +40,7 @@
+ #include <linux/mm.h>
+ #include <linux/pagemap.h>
+ #include <linux/swap.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/pgalloc.h>
+ #include <asm/processor.h>
+--- a/arch/ia64/kernel/entry.S	2009-09-10 15:25:22.000000000 +0200
++++ a/arch/ia64/kernel/entry.S	2011-06-10 13:03:02.000000000 +0200
+@@ -1753,7 +1753,7 @@ sys_call_table:
+ 	data8 sys_mq_notify
+ 	data8 sys_mq_getsetattr
+ 	data8 sys_kexec_load
+-	data8 sys_ni_syscall			// reserved for vserver
++	data8 sys_vserver
+ 	data8 sys_waitid			// 1270
+ 	data8 sys_add_key
+ 	data8 sys_request_key
+--- a/arch/ia64/kernel/perfmon.c	2009-09-10 15:25:22.000000000 +0200
++++ a/arch/ia64/kernel/perfmon.c	2011-06-10 13:03:02.000000000 +0200
+@@ -41,6 +41,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/completion.h>
+ #include <linux/tracehook.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/errno.h>
+ #include <asm/intrinsics.h>
+@@ -2372,7 +2373,7 @@ pfm_smpl_buffer_alloc(struct task_struct
+ 	 */
+ 	insert_vm_struct(mm, vma);
+ 
+-	mm->total_vm  += size >> PAGE_SHIFT;
++	vx_vmpages_add(mm, size >> PAGE_SHIFT);
+ 	vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
+ 							vma_pages(vma));
+ 	up_write(&task->mm->mmap_sem);
+--- a/arch/ia64/kernel/process.c	2009-12-03 20:01:56.000000000 +0100
++++ a/arch/ia64/kernel/process.c	2011-06-10 13:03:02.000000000 +0200
+@@ -110,8 +110,8 @@ show_regs (struct pt_regs *regs)
+ 	unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
+ 
+ 	print_modules();
+-	printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current),
+-			smp_processor_id(), current->comm);
++	printk("\nPid: %d[#%u], CPU %d, comm: %20s\n", task_pid_nr(current),
++			current->xid, smp_processor_id(), current->comm);
+ 	printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]    %s (%s)\n",
+ 	       regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
+ 	       init_utsname()->release);
+--- a/arch/ia64/kernel/ptrace.c	2009-09-10 15:25:22.000000000 +0200
++++ a/arch/ia64/kernel/ptrace.c	2011-06-10 13:03:02.000000000 +0200
+@@ -22,6 +22,7 @@
+ #include <linux/regset.h>
+ #include <linux/elf.h>
+ #include <linux/tracehook.h>
++#include <linux/vs_base.h>
+ 
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+--- a/arch/ia64/kernel/traps.c	2008-12-25 00:26:37.000000000 +0100
++++ a/arch/ia64/kernel/traps.c	2011-06-10 13:03:02.000000000 +0200
+@@ -60,8 +60,9 @@ die (const char *str, struct pt_regs *re
+ 	put_cpu();
+ 
+ 	if (++die.lock_owner_depth < 3) {
+-		printk("%s[%d]: %s %ld [%d]\n",
+-		current->comm, task_pid_nr(current), str, err, ++die_counter);
++		printk("%s[%d[#%u]]: %s %ld [%d]\n",
++			current->comm, task_pid_nr(current), current->xid,
++			str, err, ++die_counter);
+ 		if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
+ 	            != NOTIFY_STOP)
+ 			show_regs(regs);
+@@ -324,8 +325,9 @@ handle_fpu_swa (int fp_fault, struct pt_
+ 			if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
+ 				last.time = current_jiffies + 5 * HZ;
+ 				printk(KERN_WARNING
+-		       			"%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
+-		       			current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
++					"%s(%d[#%u]): floating-point assist fault at ip %016lx, isr %016lx\n",
++					current->comm, task_pid_nr(current), current->xid,
++					regs->cr_iip + ia64_psr(regs)->ri, isr);
+ 			}
+ 		}
+ 	}
+--- a/arch/ia64/mm/fault.c	2009-09-10 15:25:23.000000000 +0200
++++ a/arch/ia64/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -10,6 +10,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+@@ -281,7 +282,8 @@ ia64_do_page_fault (unsigned long addres
+ 		down_read(&mm->mmap_sem);
+ 		goto survive;
+ 	}
+-	printk(KERN_CRIT "VM: killing process %s\n", current->comm);
++	printk(KERN_CRIT "VM: killing process %s(%d:#%u)\n",
++		current->comm, task_pid_nr(current), current->xid);
+ 	if (user_mode(regs))
+ 		do_group_exit(SIGKILL);
+ 	goto no_context;
+--- a/arch/m32r/kernel/traps.c	2009-12-03 20:01:57.000000000 +0100
++++ a/arch/m32r/kernel/traps.c	2011-06-10 13:03:02.000000000 +0200
+@@ -196,8 +196,9 @@ static void show_registers(struct pt_reg
+ 	} else {
+ 		printk("SPI: %08lx\n", sp);
+ 	}
+-	printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
+-		current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
++	printk("Process %s (pid: %d[#%u], process nr: %d, stackpage=%08lx)",
++		current->comm, task_pid_nr(current), current->xid,
++		0xffff & i, 4096+(unsigned long)current);
+ 
+ 	/*
+ 	 * When in-kernel, we also print out the stack and code at the
+--- a/arch/m32r/mm/fault.c	2009-09-10 15:25:23.000000000 +0200
++++ a/arch/m32r/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -276,7 +276,8 @@ out_of_memory:
+ 		down_read(&mm->mmap_sem);
+ 		goto survive;
+ 	}
+-	printk("VM: killing process %s\n", tsk->comm);
++	printk("VM: killing process %s(%d:#%u)\n",
++		tsk->comm, task_pid_nr(tsk), tsk->xid);
+ 	if (error_code & ACE_USERMODE)
+ 		do_group_exit(SIGKILL);
+ 	goto no_context;
+--- a/arch/m68k/Kconfig	2009-12-03 20:01:57.000000000 +0100
++++ a/arch/m68k/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -622,6 +622,8 @@ source "fs/Kconfig"
+ 
+ source "arch/m68k/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/m68k/kernel/ptrace.c	2008-12-25 00:26:37.000000000 +0100
++++ a/arch/m68k/kernel/ptrace.c	2011-06-10 13:03:02.000000000 +0200
+@@ -18,6 +18,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/user.h>
+ #include <linux/signal.h>
++#include <linux/vs_base.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/page.h>
+@@ -269,6 +270,8 @@ long arch_ptrace(struct task_struct *chi
+ 		ret = ptrace_request(child, request, addr, data);
+ 		break;
+ 	}
++	if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
++		goto out_tsk;
+ 
+ 	return ret;
+ out_eio:
+--- a/arch/m68k/kernel/traps.c	2009-09-10 15:25:23.000000000 +0200
++++ a/arch/m68k/kernel/traps.c	2011-06-10 13:03:02.000000000 +0200
+@@ -906,8 +906,8 @@ void show_registers(struct pt_regs *regs
+ 	printk("d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
+ 	       regs->d4, regs->d5, regs->a0, regs->a1);
+ 
+-	printk("Process %s (pid: %d, task=%p)\n",
+-		current->comm, task_pid_nr(current), current);
++	printk("Process %s (pid: %d[#%u], task=%p)\n",
++		current->comm, task_pid_nr(current), current->xid, current);
+ 	addr = (unsigned long)&fp->un;
+ 	printk("Frame format=%X ", regs->format);
+ 	switch (regs->format) {
+--- a/arch/m68k/mm/fault.c	2009-09-10 15:25:23.000000000 +0200
++++ a/arch/m68k/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -186,7 +186,8 @@ out_of_memory:
+ 		goto survive;
+ 	}
+ 
+-	printk("VM: killing process %s\n", current->comm);
++	printk("VM: killing process %s(%d:#%u)\n",
++		current->comm, task_pid_nr(current), current->xid);
+ 	if (user_mode(regs))
+ 		do_group_exit(SIGKILL);
+ 
+--- a/arch/m68knommu/Kconfig	2009-12-03 20:01:57.000000000 +0100
++++ a/arch/m68knommu/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -727,6 +727,8 @@ source "fs/Kconfig"
+ 
+ source "arch/m68knommu/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/m68knommu/kernel/traps.c	2009-09-10 15:25:23.000000000 +0200
++++ a/arch/m68knommu/kernel/traps.c	2011-06-10 13:03:02.000000000 +0200
+@@ -78,8 +78,9 @@ void die_if_kernel(char *str, struct pt_
+ 	printk(KERN_EMERG "d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
+ 	       fp->d4, fp->d5, fp->a0, fp->a1);
+ 
+-	printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
+-		current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
++	printk(KERN_EMERG "Process %s (pid: %d[#%u], stackpage=%08lx)\n",
++		current->comm, task_pid_nr(current), current->xid,
++		PAGE_SIZE+(unsigned long)current);
+ 	show_stack(NULL, (unsigned long *)(fp + 1));
+ 	add_taint(TAINT_DIE);
+ 	do_exit(SIGSEGV);
+--- a/arch/microblaze/mm/fault.c	2009-09-10 15:25:24.000000000 +0200
++++ a/arch/microblaze/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -279,7 +279,8 @@ out_of_memory:
+ 		goto survive;
+ 	}
+ 	up_read(&mm->mmap_sem);
+-	printk(KERN_WARNING "VM: killing process %s\n", current->comm);
++	printk(KERN_WARNING "VM: killing process %s(%d:#%u)\n",
++		current->comm, task_pid_nr(current), current->xid);
+ 	if (user_mode(regs))
+ 		do_exit(SIGKILL);
+ 	bad_page_fault(regs, address, SIGKILL);
+--- a/arch/mips/Kconfig	2009-12-03 20:01:58.000000000 +0100
++++ a/arch/mips/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -2188,6 +2188,8 @@ source "fs/Kconfig"
+ 
+ source "arch/mips/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/mips/kernel/ptrace.c	2008-12-25 00:26:37.000000000 +0100
++++ a/arch/mips/kernel/ptrace.c	2011-06-10 13:03:02.000000000 +0200
+@@ -25,6 +25,7 @@
+ #include <linux/security.h>
+ #include <linux/audit.h>
+ #include <linux/seccomp.h>
++#include <linux/vs_base.h>
+ 
+ #include <asm/byteorder.h>
+ #include <asm/cpu.h>
+@@ -259,6 +260,9 @@ long arch_ptrace(struct task_struct *chi
+ {
+ 	int ret;
+ 
++	if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
++		goto out;
++
+ 	switch (request) {
+ 	/* when I and D space are separate, these will need to be fixed. */
+ 	case PTRACE_PEEKTEXT: /* read word at location addr. */
+--- a/arch/mips/kernel/scall32-o32.S	2009-12-03 20:01:59.000000000 +0100
++++ a/arch/mips/kernel/scall32-o32.S	2011-06-10 13:03:02.000000000 +0200
+@@ -525,7 +525,7 @@ einval:	li	v0, -ENOSYS
+ 	sys	sys_mq_timedreceive	5
+ 	sys	sys_mq_notify		2	/* 4275 */
+ 	sys	sys_mq_getsetattr	3
+-	sys	sys_ni_syscall		0	/* sys_vserver */
++	sys	sys_vserver		3
+ 	sys	sys_waitid		5
+ 	sys	sys_ni_syscall		0	/* available, was setaltroot */
+ 	sys	sys_add_key		5	/* 4280 */
+--- a/arch/mips/kernel/scall64-64.S	2009-12-03 20:01:59.000000000 +0100
++++ a/arch/mips/kernel/scall64-64.S	2011-06-10 13:03:02.000000000 +0200
+@@ -362,7 +362,7 @@ sys_call_table:
+ 	PTR	sys_mq_timedreceive
+ 	PTR	sys_mq_notify
+ 	PTR	sys_mq_getsetattr		/* 5235 */
+-	PTR	sys_ni_syscall			/* sys_vserver */
++	PTR	sys_vserver
+ 	PTR	sys_waitid
+ 	PTR	sys_ni_syscall			/* available, was setaltroot */
+ 	PTR	sys_add_key
+--- a/arch/mips/kernel/scall64-n32.S	2009-12-03 20:01:59.000000000 +0100
++++ a/arch/mips/kernel/scall64-n32.S	2011-06-10 13:03:02.000000000 +0200
+@@ -360,7 +360,7 @@ EXPORT(sysn32_call_table)
+ 	PTR	compat_sys_mq_timedreceive
+ 	PTR	compat_sys_mq_notify
+ 	PTR	compat_sys_mq_getsetattr
+-	PTR	sys_ni_syscall			/* 6240, sys_vserver */
++	PTR	sys32_vserver			/* 6240 */
+ 	PTR	compat_sys_waitid
+ 	PTR	sys_ni_syscall			/* available, was setaltroot */
+ 	PTR	sys_add_key
+--- a/arch/mips/kernel/scall64-o32.S	2009-12-03 20:01:59.000000000 +0100
++++ a/arch/mips/kernel/scall64-o32.S	2011-06-10 13:03:02.000000000 +0200
+@@ -480,7 +480,7 @@ sys_call_table:
+ 	PTR	compat_sys_mq_timedreceive
+ 	PTR	compat_sys_mq_notify		/* 4275 */
+ 	PTR	compat_sys_mq_getsetattr
+-	PTR	sys_ni_syscall			/* sys_vserver */
++	PTR	sys32_vserver
+ 	PTR	sys_32_waitid
+ 	PTR	sys_ni_syscall			/* available, was setaltroot */
+ 	PTR	sys_add_key			/* 4280 */
+--- a/arch/mips/kernel/traps.c	2009-12-03 20:01:59.000000000 +0100
++++ a/arch/mips/kernel/traps.c	2011-06-10 13:03:02.000000000 +0200
+@@ -335,9 +335,10 @@ void show_registers(const struct pt_regs
+ 
+ 	__show_regs(regs);
+ 	print_modules();
+-	printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
+-	       current->comm, current->pid, current_thread_info(), current,
+-	      field, current_thread_info()->tp_value);
++	printk("Process %s (pid: %d:#%u, threadinfo=%p, task=%p, tls=%0*lx)\n",
++		current->comm, task_pid_nr(current), current->xid,
++		current_thread_info(), current,
++		field, current_thread_info()->tp_value);
+ 	if (cpu_has_userlocal) {
+ 		unsigned long tls;
+ 
+--- a/arch/mn10300/mm/fault.c	2009-09-10 15:25:39.000000000 +0200
++++ a/arch/mn10300/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -339,7 +339,8 @@ no_context:
+ out_of_memory:
+ 	up_read(&mm->mmap_sem);
+ 	monitor_signal(regs);
+-	printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
++	printk(KERN_ALERT "VM: killing process %s(%d:#%u)\n",
++		tsk->comm, task_pid_nr(tsk), tsk->xid);
+ 	if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+ 		do_exit(SIGKILL);
+ 	goto no_context;
+--- a/arch/parisc/Kconfig	2009-12-03 20:02:00.000000000 +0100
++++ a/arch/parisc/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -294,6 +294,8 @@ source "fs/Kconfig"
+ 
+ source "arch/parisc/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/parisc/kernel/syscall_table.S	2009-12-03 20:02:00.000000000 +0100
++++ a/arch/parisc/kernel/syscall_table.S	2011-06-10 13:03:02.000000000 +0200
+@@ -361,7 +361,7 @@
+ 	ENTRY_COMP(mbind)		/* 260 */
+ 	ENTRY_COMP(get_mempolicy)
+ 	ENTRY_COMP(set_mempolicy)
+-	ENTRY_SAME(ni_syscall)	/* 263: reserved for vserver */
++	ENTRY_DIFF(vserver)
+ 	ENTRY_SAME(add_key)
+ 	ENTRY_SAME(request_key)		/* 265 */
+ 	ENTRY_SAME(keyctl)
+--- a/arch/parisc/kernel/traps.c	2009-09-10 15:25:40.000000000 +0200
++++ a/arch/parisc/kernel/traps.c	2011-06-10 13:03:02.000000000 +0200
+@@ -236,8 +236,9 @@ void die_if_kernel(char *str, struct pt_
+ 		if (err == 0)
+ 			return; /* STFU */
+ 
+-		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
+-			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
++		printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld) at " RFMT "\n",
++			current->comm, task_pid_nr(current), current->xid,
++			str, err, regs->iaoq[0]);
+ #ifdef PRINT_USER_FAULTS
+ 		/* XXX for debugging only */
+ 		show_regs(regs);
+@@ -270,8 +271,8 @@ void die_if_kernel(char *str, struct pt_
+ 		pdc_console_restart();
+ 	
+ 	if (err)
+-		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
+-			current->comm, task_pid_nr(current), str, err);
++		printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld)\n",
++			current->comm, task_pid_nr(current), current->xid, str, err);
+ 
+ 	/* Wot's wrong wif bein' racy? */
+ 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
+--- a/arch/parisc/mm/fault.c	2009-09-10 15:25:40.000000000 +0200
++++ a/arch/parisc/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -237,8 +237,9 @@ bad_area:
+ 
+ #ifdef PRINT_USER_FAULTS
+ 		printk(KERN_DEBUG "\n");
+-		printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
+-		    task_pid_nr(tsk), tsk->comm, code, address);
++		printk(KERN_DEBUG "do_page_fault() pid=%d:#%u "
++		    "command='%s' type=%lu address=0x%08lx\n",
++		    task_pid_nr(tsk), tsk->xid, tsk->comm, code, address);
+ 		if (vma) {
+ 			printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+ 					vma->vm_start, vma->vm_end);
+@@ -264,7 +265,8 @@ no_context:
+ 
+   out_of_memory:
+ 	up_read(&mm->mmap_sem);
+-	printk(KERN_CRIT "VM: killing process %s\n", current->comm);
++	printk(KERN_CRIT "VM: killing process %s(%d:#%u)\n",
++		current->comm, current->pid, current->xid);
+ 	if (user_mode(regs))
+ 		do_group_exit(SIGKILL);
+ 	goto no_context;
+--- a/arch/powerpc/Kconfig	2009-12-03 20:02:00.000000000 +0100
++++ a/arch/powerpc/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -943,6 +943,8 @@ source "lib/Kconfig"
+ 
+ source "arch/powerpc/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ config KEYS_COMPAT
+--- a/arch/powerpc/include/asm/unistd.h	2009-12-03 20:02:01.000000000 +0100
++++ a/arch/powerpc/include/asm/unistd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -275,7 +275,7 @@
+ #endif
+ #define __NR_rtas		255
+ #define __NR_sys_debug_setcontext 256
+-/* Number 257 is reserved for vserver */
++#define __NR_vserver		257
+ #define __NR_migrate_pages	258
+ #define __NR_mbind		259
+ #define __NR_get_mempolicy	260
+--- a/arch/powerpc/kernel/process.c	2011-05-29 23:42:15.000000000 +0200
++++ a/arch/powerpc/kernel/process.c	2011-06-10 13:03:02.000000000 +0200
+@@ -519,8 +519,9 @@ void show_regs(struct pt_regs * regs)
+ #else
+ 		printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
+ #endif
+-	printk("TASK = %p[%d] '%s' THREAD: %p",
+-	       current, task_pid_nr(current), current->comm, task_thread_info(current));
++	printk("TASK = %p[%d,#%u] '%s' THREAD: %p",
++	       current, task_pid_nr(current), current->xid,
++	       current->comm, task_thread_info(current));
+ 
+ #ifdef CONFIG_SMP
+ 	printk(" CPU: %d", raw_smp_processor_id());
+--- a/arch/powerpc/kernel/traps.c	2009-09-10 15:25:41.000000000 +0200
++++ a/arch/powerpc/kernel/traps.c	2011-06-10 13:03:02.000000000 +0200
+@@ -931,8 +931,9 @@ void nonrecoverable_exception(struct pt_
+ 
+ void trace_syscall(struct pt_regs *regs)
+ {
+-	printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
+-	       current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
++	printk("Task: %p(%d[#%u]), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
++	       current, task_pid_nr(current), current->xid,
++	       regs->nip, regs->link, regs->gpr[0],
+ 	       regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
+ }
+ 
+--- a/arch/powerpc/kernel/vdso.c	2009-12-03 20:02:02.000000000 +0100
++++ a/arch/powerpc/kernel/vdso.c	2011-06-10 13:03:02.000000000 +0200
+@@ -23,6 +23,7 @@
+ #include <linux/security.h>
+ #include <linux/bootmem.h>
+ #include <linux/lmb.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+--- a/arch/powerpc/mm/fault.c	2009-12-03 20:02:02.000000000 +0100
++++ a/arch/powerpc/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -358,7 +358,8 @@ out_of_memory:
+ 		down_read(&mm->mmap_sem);
+ 		goto survive;
+ 	}
+-	printk("VM: killing process %s\n", current->comm);
++	printk("VM: killing process %s(%d:#%u)\n",
++		current->comm, current->pid, current->xid);
+ 	if (user_mode(regs))
+ 		do_group_exit(SIGKILL);
+ 	return SIGKILL;
+--- a/arch/s390/Kconfig	2009-12-03 20:02:03.000000000 +0100
++++ a/arch/s390/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -616,6 +616,8 @@ source "fs/Kconfig"
+ 
+ source "arch/s390/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/s390/include/asm/tlb.h	2009-09-10 15:25:43.000000000 +0200
++++ a/arch/s390/include/asm/tlb.h	2011-06-10 13:03:02.000000000 +0200
+@@ -23,6 +23,8 @@
+ 
+ #include <linux/mm.h>
+ #include <linux/swap.h>
++#include <linux/vs_memory.h>
++
+ #include <asm/processor.h>
+ #include <asm/pgalloc.h>
+ #include <asm/smp.h>
+--- a/arch/s390/include/asm/unistd.h	2009-12-03 20:02:03.000000000 +0100
++++ a/arch/s390/include/asm/unistd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -202,7 +202,7 @@
+ #define __NR_clock_gettime	(__NR_timer_create+6)
+ #define __NR_clock_getres	(__NR_timer_create+7)
+ #define __NR_clock_nanosleep	(__NR_timer_create+8)
+-/* Number 263 is reserved for vserver */
++#define __NR_vserver		263
+ #define __NR_statfs64		265
+ #define __NR_fstatfs64		266
+ #define __NR_remap_file_pages	267
+--- a/arch/s390/kernel/ptrace.c	2011-05-29 23:42:15.000000000 +0200
++++ a/arch/s390/kernel/ptrace.c	2011-06-10 13:03:02.000000000 +0200
+@@ -36,6 +36,7 @@
+ #include <linux/regset.h>
+ #include <linux/tracehook.h>
+ #include <linux/seccomp.h>
++#include <linux/vs_base.h>
+ #include <trace/syscall.h>
+ #include <asm/compat.h>
+ #include <asm/segment.h>
+--- a/arch/s390/kernel/syscalls.S	2009-12-03 20:02:03.000000000 +0100
++++ a/arch/s390/kernel/syscalls.S	2011-06-10 13:03:02.000000000 +0200
+@@ -271,7 +271,7 @@ SYSCALL(sys_clock_settime,sys_clock_sett
+ SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper)	/* 260 */
+ SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
+ SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
+-NI_SYSCALL							/* reserved for vserver */
++SYSCALL(sys_vserver,sys_vserver,sys32_vserver)
+ SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
+ SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
+ SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
+--- a/arch/s390/lib/uaccess_pt.c	2009-09-10 15:25:43.000000000 +0200
++++ a/arch/s390/lib/uaccess_pt.c	2011-06-10 13:03:02.000000000 +0200
+@@ -90,7 +90,8 @@ out_of_memory:
+ 		down_read(&mm->mmap_sem);
+ 		goto survive;
+ 	}
+-	printk("VM: killing process %s\n", current->comm);
++	printk("VM: killing process %s(%d:#%u)\n",
++		current->comm, task_pid_nr(current), current->xid);
+ 	return ret;
+ 
+ out_sigbus:
+--- a/arch/sh/Kconfig	2009-12-03 20:02:03.000000000 +0100
++++ a/arch/sh/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -853,6 +853,8 @@ source "fs/Kconfig"
+ 
+ source "arch/sh/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/sh/kernel/irq.c	2009-12-03 20:02:10.000000000 +0100
++++ a/arch/sh/kernel/irq.c	2011-06-10 13:03:02.000000000 +0200
+@@ -12,6 +12,7 @@
+ #include <linux/kernel_stat.h>
+ #include <linux/seq_file.h>
+ #include <linux/ftrace.h>
++// #include <linux/vs_context.h>
+ #include <asm/processor.h>
+ #include <asm/machvec.h>
+ #include <asm/uaccess.h>
+--- a/arch/sh/kernel/vsyscall/vsyscall.c	2009-03-24 14:18:42.000000000 +0100
++++ a/arch/sh/kernel/vsyscall/vsyscall.c	2011-06-10 13:03:02.000000000 +0200
+@@ -19,6 +19,7 @@
+ #include <linux/elf.h>
+ #include <linux/sched.h>
+ #include <linux/err.h>
++#include <linux/vs_memory.h>
+ 
+ /*
+  * Should the kernel map a VDSO page into processes and pass its
+--- a/arch/sh/mm/fault_32.c	2009-12-03 20:02:14.000000000 +0100
++++ a/arch/sh/mm/fault_32.c	2011-06-10 13:03:02.000000000 +0200
+@@ -292,7 +292,8 @@ out_of_memory:
+ 		down_read(&mm->mmap_sem);
+ 		goto survive;
+ 	}
+-	printk("VM: killing process %s\n", tsk->comm);
++	printk("VM: killing process %s(%d:#%u)\n",
++		tsk->comm, task_pid_nr(tsk), tsk->xid);
+ 	if (user_mode(regs))
+ 		do_group_exit(SIGKILL);
+ 	goto no_context;
+--- a/arch/sh/mm/tlbflush_64.c	2009-12-03 20:02:14.000000000 +0100
++++ a/arch/sh/mm/tlbflush_64.c	2011-06-10 13:03:02.000000000 +0200
+@@ -306,7 +306,8 @@ out_of_memory:
+ 		down_read(&mm->mmap_sem);
+ 		goto survive;
+ 	}
+-	printk("VM: killing process %s\n", tsk->comm);
++	printk("VM: killing process %s(%d:#%u)\n",
++		tsk->comm, task_pid_nr(tsk), tsk->xid);
+ 	if (user_mode(regs))
+ 		do_group_exit(SIGKILL);
+ 	goto no_context;
+--- a/arch/sparc/Kconfig	2009-12-03 20:02:14.000000000 +0100
++++ a/arch/sparc/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -550,6 +550,8 @@ source "fs/Kconfig"
+ 
+ source "arch/sparc/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/sparc/include/asm/tlb_64.h	2009-09-10 15:25:45.000000000 +0200
++++ a/arch/sparc/include/asm/tlb_64.h	2011-06-10 13:03:02.000000000 +0200
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/swap.h>
+ #include <linux/pagemap.h>
++#include <linux/vs_memory.h>
+ #include <asm/pgalloc.h>
+ #include <asm/tlbflush.h>
+ #include <asm/mmu_context.h>
+--- a/arch/sparc/include/asm/unistd.h	2009-12-03 20:02:15.000000000 +0100
++++ a/arch/sparc/include/asm/unistd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -335,7 +335,7 @@
+ #define __NR_timer_getoverrun	264
+ #define __NR_timer_delete	265
+ #define __NR_timer_create	266
+-/* #define __NR_vserver		267 Reserved for VSERVER */
++#define __NR_vserver		267
+ #define __NR_io_setup		268
+ #define __NR_io_destroy		269
+ #define __NR_io_submit		270
+--- a/arch/sparc/kernel/systbls_32.S	2011-05-29 23:42:16.000000000 +0200
++++ a/arch/sparc/kernel/systbls_32.S	2011-06-10 13:03:02.000000000 +0200
+@@ -70,7 +70,7 @@ sys_call_table:
+ /*250*/	.long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
+ /*255*/	.long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
+ /*260*/	.long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
+-/*265*/	.long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
++/*265*/	.long sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy
+ /*270*/	.long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
+ /*275*/	.long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
+ /*280*/	.long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
+--- a/arch/sparc/kernel/systbls_64.S	2011-05-29 23:42:16.000000000 +0200
++++ a/arch/sparc/kernel/systbls_64.S	2011-06-10 13:03:02.000000000 +0200
+@@ -71,7 +71,7 @@ sys_call_table32:
+ /*250*/	.word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
+ 	.word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
+ /*260*/	.word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
+-	.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
++	.word sys_timer_delete, compat_sys_timer_create, sys32_vserver, compat_sys_io_setup, sys_io_destroy
+ /*270*/	.word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
+ 	.word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
+ /*280*/	.word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
+@@ -146,7 +146,7 @@ sys_call_table:
+ /*250*/	.word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
+ 	.word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
+ /*260*/	.word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
+-	.word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
++	.word sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy
+ /*270*/	.word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
+ 	.word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
+ /*280*/	.word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
+--- a/arch/um/Kconfig.rest	2009-06-11 17:12:19.000000000 +0200
++++ a/arch/um/Kconfig.rest	2011-06-10 13:03:02.000000000 +0200
+@@ -18,6 +18,8 @@ source "drivers/connector/Kconfig"
+ 
+ source "fs/Kconfig"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/um/include/asm/tlb.h	2009-09-10 15:25:46.000000000 +0200
++++ a/arch/um/include/asm/tlb.h	2011-06-10 13:03:02.000000000 +0200
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/pagemap.h>
+ #include <linux/swap.h>
++#include <linux/vs_memory.h>
+ #include <asm/percpu.h>
+ #include <asm/pgalloc.h>
+ #include <asm/tlbflush.h>
+--- a/arch/x86/Kconfig	2011-05-29 23:42:16.000000000 +0200
++++ a/arch/x86/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -2100,6 +2100,8 @@ source "fs/Kconfig"
+ 
+ source "arch/x86/Kconfig.debug"
+ 
++source "kernel/vserver/Kconfig"
++
+ source "security/Kconfig"
+ 
+ source "crypto/Kconfig"
+--- a/arch/x86/ia32/ia32entry.S	2011-05-29 23:42:16.000000000 +0200
++++ a/arch/x86/ia32/ia32entry.S	2011-06-10 13:03:02.000000000 +0200
+@@ -783,7 +783,7 @@ ia32_sys_call_table:
+ 	.quad sys_tgkill		/* 270 */
+ 	.quad compat_sys_utimes
+ 	.quad sys32_fadvise64_64
+-	.quad quiet_ni_syscall	/* sys_vserver */
++	.quad sys32_vserver
+ 	.quad sys_mbind
+ 	.quad compat_sys_get_mempolicy	/* 275 */
+ 	.quad sys_set_mempolicy
+--- a/arch/x86/include/asm/unistd_64.h	2009-12-03 20:02:16.000000000 +0100
++++ a/arch/x86/include/asm/unistd_64.h	2011-06-10 13:03:02.000000000 +0200
+@@ -535,7 +535,7 @@ __SYSCALL(__NR_tgkill, sys_tgkill)
+ #define __NR_utimes				235
+ __SYSCALL(__NR_utimes, sys_utimes)
+ #define __NR_vserver				236
+-__SYSCALL(__NR_vserver, sys_ni_syscall)
++__SYSCALL(__NR_vserver, sys_vserver)
+ #define __NR_mbind				237
+ __SYSCALL(__NR_mbind, sys_mbind)
+ #define __NR_set_mempolicy			238
+--- a/arch/x86/kernel/syscall_table_32.S	2011-05-29 23:42:16.000000000 +0200
++++ a/arch/x86/kernel/syscall_table_32.S	2011-06-10 13:03:02.000000000 +0200
+@@ -272,7 +272,7 @@ ENTRY(sys_call_table)
+ 	.long sys_tgkill	/* 270 */
+ 	.long sys_utimes
+  	.long sys_fadvise64_64
+-	.long sys_ni_syscall	/* sys_vserver */
++	.long sys_vserver
+ 	.long sys_mbind
+ 	.long sys_get_mempolicy
+ 	.long sys_set_mempolicy
+--- a/arch/xtensa/mm/fault.c	2009-09-10 15:25:48.000000000 +0200
++++ a/arch/xtensa/mm/fault.c	2011-06-10 13:03:02.000000000 +0200
+@@ -151,7 +151,8 @@ out_of_memory:
+ 		down_read(&mm->mmap_sem);
+ 		goto survive;
+ 	}
+-	printk("VM: killing process %s\n", current->comm);
++	printk("VM: killing process %s(%d:#%u)\n",
++		current->comm, task_pid_nr(current), current->xid);
+ 	if (user_mode(regs))
+ 		do_group_exit(SIGKILL);
+ 	bad_page_fault(regs, address, SIGKILL);
+--- a/drivers/block/Kconfig	2009-09-10 15:25:49.000000000 +0200
++++ a/drivers/block/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -271,6 +271,13 @@ config BLK_DEV_CRYPTOLOOP
+ 
+ source "drivers/block/drbd/Kconfig"
+ 
++config BLK_DEV_VROOT
++	tristate "Virtual Root device support"
++	depends on QUOTACTL
++	---help---
++	  Saying Y here will allow you to use quota/fs ioctls on a shared
++	  partition within a virtual server without compromising security.
++
+ config BLK_DEV_NBD
+ 	tristate "Network block device support"
+ 	depends on NET
+--- a/drivers/block/Makefile	2009-09-10 15:25:49.000000000 +0200
++++ a/drivers/block/Makefile	2011-06-10 13:03:02.000000000 +0200
+@@ -34,6 +34,7 @@ obj-$(CONFIG_VIODASD)		+= viodasd.o
+ obj-$(CONFIG_BLK_DEV_SX8)	+= sx8.o
+ obj-$(CONFIG_BLK_DEV_UB)	+= ub.o
+ obj-$(CONFIG_BLK_DEV_HD)	+= hd.o
++obj-$(CONFIG_BLK_DEV_VROOT)	+= vroot.o
+ 
+ obj-$(CONFIG_XEN_BLKDEV_FRONTEND)	+= xen-blkfront.o
+ 
+--- a/drivers/block/loop.c	2011-05-29 23:42:17.000000000 +0200
++++ a/drivers/block/loop.c	2011-06-10 13:03:02.000000000 +0200
+@@ -74,6 +74,7 @@
+ #include <linux/gfp.h>
+ #include <linux/kthread.h>
+ #include <linux/splice.h>
++#include <linux/vs_context.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -814,6 +815,7 @@ static int loop_set_fd(struct loop_devic
+ 	lo->lo_blocksize = lo_blocksize;
+ 	lo->lo_device = bdev;
+ 	lo->lo_flags = lo_flags;
++	lo->lo_xid = vx_current_xid();
+ 	lo->lo_backing_file = file;
+ 	lo->transfer = transfer_none;
+ 	lo->ioctl = NULL;
+@@ -939,6 +941,7 @@ static int loop_clr_fd(struct loop_devic
+ 	lo->lo_encrypt_key_size = 0;
+ 	lo->lo_flags = 0;
+ 	lo->lo_thread = NULL;
++	lo->lo_xid = 0;
+ 	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
+ 	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
+ 	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+@@ -973,7 +976,7 @@ loop_set_status(struct loop_device *lo, 
+ 
+ 	if (lo->lo_encrypt_key_size &&
+ 	    lo->lo_key_owner != uid &&
+-	    !capable(CAP_SYS_ADMIN))
++	    !vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP))
+ 		return -EPERM;
+ 	if (lo->lo_state != Lo_bound)
+ 		return -ENXIO;
+@@ -1057,7 +1060,8 @@ loop_get_status(struct loop_device *lo, 
+ 	memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
+ 	info->lo_encrypt_type =
+ 		lo->lo_encryption ? lo->lo_encryption->number : 0;
+-	if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
++	if (lo->lo_encrypt_key_size &&
++		vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP)) {
+ 		info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
+ 		memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
+ 		       lo->lo_encrypt_key_size);
+@@ -1401,6 +1405,9 @@ static int lo_open(struct block_device *
+ {
+ 	struct loop_device *lo = bdev->bd_disk->private_data;
+ 
++	if (!vx_check(lo->lo_xid, VS_IDENT|VS_HOSTID|VS_ADMIN_P))
++		return -EACCES;
++
+ 	mutex_lock(&lo->lo_ctl_mutex);
+ 	lo->lo_refcnt++;
+ 	mutex_unlock(&lo->lo_ctl_mutex);
+--- a/drivers/block/vroot.c	1970-01-01 01:00:00.000000000 +0100
++++ a/drivers/block/vroot.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,281 @@
++/*
++ *  linux/drivers/block/vroot.c
++ *
++ *  written by Herbert Pötzl, 9/11/2002
++ *  ported to 2.6.10 by Herbert Pötzl, 30/12/2004
++ *
++ *  based on the loop.c code by Theodore Ts'o.
++ *
++ * Copyright (C) 2002-2007 by Herbert Pötzl.
++ * Redistribution of this file is permitted under the
++ * GNU General Public License.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/file.h>
++#include <linux/major.h>
++#include <linux/blkdev.h>
++
++#include <linux/vroot.h>
++#include <linux/vs_context.h>
++
++
++static int max_vroot = 8;
++
++static struct vroot_device *vroot_dev;
++static struct gendisk **disks;
++
++
++static int vroot_set_dev(
++	struct vroot_device *vr,
++	struct block_device *bdev,
++	unsigned int arg)
++{
++	struct block_device *real_bdev;
++	struct file *file;
++	struct inode *inode;
++	int error;
++
++	error = -EBUSY;
++	if (vr->vr_state != Vr_unbound)
++		goto out;
++
++	error = -EBADF;
++	file = fget(arg);
++	if (!file)
++		goto out;
++
++	error = -EINVAL;
++	inode = file->f_dentry->d_inode;
++
++
++	if (S_ISBLK(inode->i_mode)) {
++		real_bdev = inode->i_bdev;
++		vr->vr_device = real_bdev;
++		__iget(real_bdev->bd_inode);
++	} else
++		goto out_fput;
++
++	vxdprintk(VXD_CBIT(misc, 0),
++		"vroot[%d]_set_dev: dev=" VXF_DEV,
++		vr->vr_number, VXD_DEV(real_bdev));
++
++	vr->vr_state = Vr_bound;
++	error = 0;
++
++ out_fput:
++	fput(file);
++ out:
++	return error;
++}
++
++static int vroot_clr_dev(
++	struct vroot_device *vr,
++	struct block_device *bdev)
++{
++	struct block_device *real_bdev;
++
++	if (vr->vr_state != Vr_bound)
++		return -ENXIO;
++	if (vr->vr_refcnt > 1)	/* we needed one fd for the ioctl */
++		return -EBUSY;
++
++	real_bdev = vr->vr_device;
++
++	vxdprintk(VXD_CBIT(misc, 0),
++		"vroot[%d]_clr_dev: dev=" VXF_DEV,
++		vr->vr_number, VXD_DEV(real_bdev));
++
++	bdput(real_bdev);
++	vr->vr_state = Vr_unbound;
++	vr->vr_device = NULL;
++	return 0;
++}
++
++
++static int vr_ioctl(struct block_device *bdev, fmode_t mode,
++	unsigned int cmd, unsigned long arg)
++{
++	struct vroot_device *vr = bdev->bd_disk->private_data;
++	int err;
++
++	down(&vr->vr_ctl_mutex);
++	switch (cmd) {
++	case VROOT_SET_DEV:
++		err = vroot_set_dev(vr, bdev, arg);
++		break;
++	case VROOT_CLR_DEV:
++		err = vroot_clr_dev(vr, bdev);
++		break;
++	default:
++		err = -EINVAL;
++		break;
++	}
++	up(&vr->vr_ctl_mutex);
++	return err;
++}
++
++static int vr_open(struct block_device *bdev, fmode_t mode)
++{
++	struct vroot_device *vr = bdev->bd_disk->private_data;
++
++	down(&vr->vr_ctl_mutex);
++	vr->vr_refcnt++;
++	up(&vr->vr_ctl_mutex);
++	return 0;
++}
++
++static int vr_release(struct gendisk *disk, fmode_t mode)
++{
++	struct vroot_device *vr = disk->private_data;
++
++	down(&vr->vr_ctl_mutex);
++	--vr->vr_refcnt;
++	up(&vr->vr_ctl_mutex);
++	return 0;
++}
++
++static struct block_device_operations vr_fops = {
++	.owner =	THIS_MODULE,
++	.open =		vr_open,
++	.release =	vr_release,
++	.ioctl =	vr_ioctl,
++};
++
++struct block_device *__vroot_get_real_bdev(struct block_device *bdev)
++{
++	struct inode *inode = bdev->bd_inode;
++	struct vroot_device *vr;
++	struct block_device *real_bdev;
++	int minor = iminor(inode);
++
++	vr = &vroot_dev[minor];
++	real_bdev = vr->vr_device;
++
++	vxdprintk(VXD_CBIT(misc, 0),
++		"vroot[%d]_get_real_bdev: dev=" VXF_DEV,
++		vr->vr_number, VXD_DEV(real_bdev));
++
++	if (vr->vr_state != Vr_bound)
++		return ERR_PTR(-ENXIO);
++
++	__iget(real_bdev->bd_inode);
++	return real_bdev;
++}
++
++/*
++ * And now the modules code and kernel interface.
++ */
++
++module_param(max_vroot, int, 0);
++
++MODULE_PARM_DESC(max_vroot, "Maximum number of vroot devices (1-256)");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS_BLOCKDEV_MAJOR(VROOT_MAJOR);
++
++MODULE_AUTHOR ("Herbert Pötzl");
++MODULE_DESCRIPTION ("Virtual Root Device Mapper");
++
++
++int __init vroot_init(void)
++{
++	int err, i;
++
++	if (max_vroot < 1 || max_vroot > 256) {
++		max_vroot = MAX_VROOT_DEFAULT;
++		printk(KERN_WARNING "vroot: invalid max_vroot "
++			"(must be between 1 and 256), "
++			"using default (%d)\n", max_vroot);
++	}
++
++	if (register_blkdev(VROOT_MAJOR, "vroot"))
++		return -EIO;
++
++	err = -ENOMEM;
++	vroot_dev = kmalloc(max_vroot * sizeof(struct vroot_device), GFP_KERNEL);
++	if (!vroot_dev)
++		goto out_mem1;
++	memset(vroot_dev, 0, max_vroot * sizeof(struct vroot_device));
++
++	disks = kmalloc(max_vroot * sizeof(struct gendisk *), GFP_KERNEL);
++	if (!disks)
++		goto out_mem2;
++
++	for (i = 0; i < max_vroot; i++) {
++		disks[i] = alloc_disk(1);
++		if (!disks[i])
++			goto out_mem3;
++		disks[i]->queue = blk_alloc_queue(GFP_KERNEL);
++		if (!disks[i]->queue)
++			goto out_mem3;
++	}
++
++	for (i = 0; i < max_vroot; i++) {
++		struct vroot_device *vr = &vroot_dev[i];
++		struct gendisk *disk = disks[i];
++
++		memset(vr, 0, sizeof(*vr));
++		init_MUTEX(&vr->vr_ctl_mutex);
++		vr->vr_number = i;
++		disk->major = VROOT_MAJOR;
++		disk->first_minor = i;
++		disk->fops = &vr_fops;
++		sprintf(disk->disk_name, "vroot%d", i);
++		disk->private_data = vr;
++	}
++
++	err = register_vroot_grb(&__vroot_get_real_bdev);
++	if (err)
++		goto out_mem3;
++
++	for (i = 0; i < max_vroot; i++)
++		add_disk(disks[i]);
++	printk(KERN_INFO "vroot: loaded (max %d devices)\n", max_vroot);
++	return 0;
++
++out_mem3:
++	while (i--)
++		put_disk(disks[i]);
++	kfree(disks);
++out_mem2:
++	kfree(vroot_dev);
++out_mem1:
++	unregister_blkdev(VROOT_MAJOR, "vroot");
++	printk(KERN_ERR "vroot: ran out of memory\n");
++	return err;
++}
++
++void vroot_exit(void)
++{
++	int i;
++
++	if (unregister_vroot_grb(&__vroot_get_real_bdev))
++		printk(KERN_WARNING "vroot: cannot unregister grb\n");
++
++	for (i = 0; i < max_vroot; i++) {
++		del_gendisk(disks[i]);
++		put_disk(disks[i]);
++	}
++	unregister_blkdev(VROOT_MAJOR, "vroot");
++
++	kfree(disks);
++	kfree(vroot_dev);
++}
++
++module_init(vroot_init);
++module_exit(vroot_exit);
++
++#ifndef MODULE
++
++static int __init max_vroot_setup(char *str)
++{
++	max_vroot = simple_strtol(str, NULL, 0);
++	return 1;
++}
++
++__setup("max_vroot=", max_vroot_setup);
++
++#endif
++
+--- a/drivers/char/sysrq.c	2009-12-03 20:02:20.000000000 +0100
++++ a/drivers/char/sysrq.c	2011-06-10 13:03:02.000000000 +0200
+@@ -38,6 +38,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/hrtimer.h>
+ #include <linux/oom.h>
++#include <linux/vserver/debug.h>
+ 
+ #include <asm/ptrace.h>
+ #include <asm/irq_regs.h>
+@@ -391,6 +392,21 @@ static struct sysrq_key_op sysrq_unrt_op
+ 	.enable_mask	= SYSRQ_ENABLE_RTNICE,
+ };
+ 
++
++#ifdef CONFIG_VSERVER_DEBUG
++static void sysrq_handle_vxinfo(int key, struct tty_struct *tty)
++{
++	dump_vx_info_inactive((key == 'x')?0:1);
++}
++
++static struct sysrq_key_op sysrq_showvxinfo_op = {
++	.handler	= sysrq_handle_vxinfo,
++	.help_msg	= "conteXt",
++	.action_msg	= "Show Context Info",
++	.enable_mask	= SYSRQ_ENABLE_DUMP,
++};
++#endif
++
+ /* Key Operations table and lock */
+ static DEFINE_SPINLOCK(sysrq_key_table_lock);
+ 
+@@ -445,7 +461,11 @@ static struct sysrq_key_op *sysrq_key_ta
+ 	NULL,				/* v */
+ 	&sysrq_showstate_blocked_op,	/* w */
+ 	/* x: May be registered on ppc/powerpc for xmon */
++#ifdef CONFIG_VSERVER_DEBUG
++	&sysrq_showvxinfo_op,		/* x */
++#else
+ 	NULL,				/* x */
++#endif
+ 	/* y: May be registered on sparc64 for global register dump */
+ 	NULL,				/* y */
+ 	&sysrq_ftrace_dump_op,		/* z */
+@@ -460,6 +480,8 @@ static int sysrq_key_table_key2index(int
+ 		retval = key - '0';
+ 	else if ((key >= 'a') && (key <= 'z'))
+ 		retval = key + 10 - 'a';
++	else if ((key >= 'A') && (key <= 'Z'))
++		retval = key + 10 - 'A';
+ 	else
+ 		retval = -1;
+ 	return retval;
+--- a/drivers/char/tty_io.c	2011-05-29 23:42:17.000000000 +0200
++++ a/drivers/char/tty_io.c	2011-06-10 13:03:02.000000000 +0200
+@@ -106,6 +106,7 @@
+ 
+ #include <linux/kmod.h>
+ #include <linux/nsproxy.h>
++#include <linux/vs_pid.h>
+ 
+ #undef TTY_DEBUG_HANGUP
+ 
+@@ -1970,7 +1971,8 @@ static int tiocsti(struct tty_struct *tt
+ 	char ch, mbz = 0;
+ 	struct tty_ldisc *ld;
+ 
+-	if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN))
++	if (((current->signal->tty != tty) &&
++		!vx_capable(CAP_SYS_ADMIN, VXC_TIOCSTI)))
+ 		return -EPERM;
+ 	if (get_user(ch, p))
+ 		return -EFAULT;
+@@ -2258,6 +2260,7 @@ static int tiocspgrp(struct tty_struct *
+ 		return -ENOTTY;
+ 	if (get_user(pgrp_nr, p))
+ 		return -EFAULT;
++	pgrp_nr = vx_rmap_pid(pgrp_nr);
+ 	if (pgrp_nr < 0)
+ 		return -EINVAL;
+ 	rcu_read_lock();
+--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c	2009-12-03 20:02:23.000000000 +0100
++++ a/drivers/infiniband/hw/ipath/ipath_user_pages.c	2011-06-10 13:03:02.000000000 +0200
+@@ -34,6 +34,7 @@
+ #include <linux/mm.h>
+ #include <linux/device.h>
+ #include <linux/sched.h>
++#include <linux/vs_memory.h>
+ 
+ #include "ipath_kernel.h"
+ 
+@@ -62,7 +63,8 @@ static int __get_user_pages(unsigned lon
+ 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >>
+ 		PAGE_SHIFT;
+ 
+-	if (num_pages > lock_limit) {
++	if (num_pages > lock_limit ||
++		!vx_vmlocked_avail(current->mm, num_pages)) {
+ 		ret = -ENOMEM;
+ 		goto bail;
+ 	}
+@@ -79,7 +81,7 @@ static int __get_user_pages(unsigned lon
+ 			goto bail_release;
+ 	}
+ 
+-	current->mm->locked_vm += num_pages;
++	vx_vmlocked_add(current->mm, num_pages);
+ 
+ 	ret = 0;
+ 	goto bail;
+@@ -178,7 +180,7 @@ void ipath_release_user_pages(struct pag
+ 
+ 	__ipath_release_user_pages(p, num_pages, 1);
+ 
+-	current->mm->locked_vm -= num_pages;
++	vx_vmlocked_sub(current->mm, num_pages);
+ 
+ 	up_write(&current->mm->mmap_sem);
+ }
+@@ -195,7 +197,7 @@ static void user_pages_account(struct wo
+ 		container_of(_work, struct ipath_user_pages_work, work);
+ 
+ 	down_write(&work->mm->mmap_sem);
+-	work->mm->locked_vm -= work->num_pages;
++	vx_vmlocked_sub(work->mm, work->num_pages);
+ 	up_write(&work->mm->mmap_sem);
+ 	mmput(work->mm);
+ 	kfree(work);
+--- a/drivers/md/dm-ioctl.c	2011-05-29 23:42:19.000000000 +0200
++++ a/drivers/md/dm-ioctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -16,6 +16,7 @@
+ #include <linux/dm-ioctl.h>
+ #include <linux/hdreg.h>
+ #include <linux/compat.h>
++#include <linux/vs_context.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -106,7 +107,8 @@ static struct hash_cell *__get_name_cell
+ 	unsigned int h = hash_str(str);
+ 
+ 	list_for_each_entry (hc, _name_buckets + h, name_list)
+-		if (!strcmp(hc->name, str)) {
++		if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) &&
++			!strcmp(hc->name, str)) {
+ 			dm_get(hc->md);
+ 			return hc;
+ 		}
+@@ -120,7 +122,8 @@ static struct hash_cell *__get_uuid_cell
+ 	unsigned int h = hash_str(str);
+ 
+ 	list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
+-		if (!strcmp(hc->uuid, str)) {
++		if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) &&
++			!strcmp(hc->uuid, str)) {
+ 			dm_get(hc->md);
+ 			return hc;
+ 		}
+@@ -369,6 +372,9 @@ typedef int (*ioctl_fn)(struct dm_ioctl 
+ 
+ static int remove_all(struct dm_ioctl *param, size_t param_size)
+ {
++	if (!vx_check(0, VS_ADMIN))
++		return -EPERM;
++
+ 	dm_hash_remove_all(1);
+ 	param->data_size = 0;
+ 	return 0;
+@@ -416,6 +422,8 @@ static int list_devices(struct dm_ioctl 
+ 	 */
+ 	for (i = 0; i < NUM_BUCKETS; i++) {
+ 		list_for_each_entry (hc, _name_buckets + i, name_list) {
++			if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT))
++				continue;
+ 			needed += sizeof(struct dm_name_list);
+ 			needed += strlen(hc->name) + 1;
+ 			needed += ALIGN_MASK;
+@@ -439,6 +447,8 @@ static int list_devices(struct dm_ioctl 
+ 	 */
+ 	for (i = 0; i < NUM_BUCKETS; i++) {
+ 		list_for_each_entry (hc, _name_buckets + i, name_list) {
++			if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT))
++				continue;
+ 			if (old_nl)
+ 				old_nl->next = (uint32_t) ((void *) nl -
+ 							   (void *) old_nl);
+@@ -629,10 +639,11 @@ static struct hash_cell *__find_device_h
+ 	if (!md)
+ 		goto out;
+ 
+-	mdptr = dm_get_mdptr(md);
++	if (vx_check(dm_get_xid(md), VS_WATCH_P | VS_IDENT))
++		mdptr = dm_get_mdptr(md);
++
+ 	if (!mdptr)
+ 		dm_put(md);
+-
+ out:
+ 	return mdptr;
+ }
+@@ -1462,8 +1473,8 @@ static int ctl_ioctl(uint command, struc
+ 	ioctl_fn fn = NULL;
+ 	size_t param_size;
+ 
+-	/* only root can play with this */
+-	if (!capable(CAP_SYS_ADMIN))
++	/* only root and certain contexts can play with this */
++	if (!vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_MAPPER))
+ 		return -EACCES;
+ 
+ 	if (_IOC_TYPE(command) != DM_IOCTL)
+--- a/drivers/md/dm.c	2011-05-29 23:42:19.000000000 +0200
++++ a/drivers/md/dm.c	2011-06-10 13:03:02.000000000 +0200
+@@ -19,6 +19,7 @@
+ #include <linux/slab.h>
+ #include <linux/idr.h>
+ #include <linux/hdreg.h>
++#include <linux/vs_base.h>
+ 
+ #include <trace/events/block.h>
+ 
+@@ -119,6 +120,7 @@ struct mapped_device {
+ 	rwlock_t map_lock;
+ 	atomic_t holders;
+ 	atomic_t open_count;
++	xid_t xid;
+ 
+ 	unsigned long flags;
+ 
+@@ -323,6 +325,7 @@ static void __exit dm_exit(void)
+ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
+ {
+ 	struct mapped_device *md;
++	int ret = -ENXIO;
+ 
+ 	spin_lock(&_minor_lock);
+ 
+@@ -331,18 +334,19 @@ static int dm_blk_open(struct block_devi
+ 		goto out;
+ 
+ 	if (test_bit(DMF_FREEING, &md->flags) ||
+-	    test_bit(DMF_DELETING, &md->flags)) {
+-		md = NULL;
++	    test_bit(DMF_DELETING, &md->flags))
++		goto out;
++
++	ret = -EACCES;
++	if (!vx_check(md->xid, VS_IDENT|VS_HOSTID))
+ 		goto out;
+-	}
+ 
+ 	dm_get(md);
+ 	atomic_inc(&md->open_count);
+-
++	ret = 0;
+ out:
+ 	spin_unlock(&_minor_lock);
+-
+-	return md ? 0 : -ENXIO;
++	return ret;
+ }
+ 
+ static int dm_blk_close(struct gendisk *disk, fmode_t mode)
+@@ -553,6 +557,14 @@ int dm_set_geometry(struct mapped_device
+ 	return 0;
+ }
+ 
++/*
++ * Get the xid associated with a dm device
++ */
++xid_t dm_get_xid(struct mapped_device *md)
++{
++	return md->xid;
++}
++
+ /*-----------------------------------------------------------------
+  * CRUD START:
+  *   A more elegant soln is in the works that uses the queue
+@@ -1775,6 +1787,7 @@ static struct mapped_device *alloc_dev(i
+ 	INIT_LIST_HEAD(&md->uevent_list);
+ 	spin_lock_init(&md->uevent_lock);
+ 
++	md->xid = vx_current_xid();
+ 	md->queue = blk_init_queue(dm_request_fn, NULL);
+ 	if (!md->queue)
+ 		goto bad_queue;
+--- a/drivers/md/dm.h	2009-09-10 15:25:55.000000000 +0200
++++ a/drivers/md/dm.h	2011-06-10 13:03:02.000000000 +0200
+@@ -41,6 +41,8 @@ struct dm_dev_internal {
+ struct dm_table;
+ struct dm_md_mempools;
+ 
++xid_t dm_get_xid(struct mapped_device *md);
++
+ /*-----------------------------------------------------------------
+  * Internal table functions.
+  *---------------------------------------------------------------*/
+--- a/drivers/net/tun.c	2011-05-29 23:42:20.000000000 +0200
++++ a/drivers/net/tun.c	2011-06-10 13:03:02.000000000 +0200
+@@ -61,6 +61,7 @@
+ #include <linux/crc32.h>
+ #include <linux/nsproxy.h>
+ #include <linux/virtio_net.h>
++#include <linux/vs_network.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>
+@@ -102,6 +103,7 @@ struct tun_struct {
+ 	unsigned int 		flags;
+ 	uid_t			owner;
+ 	gid_t			group;
++	nid_t			nid;
+ 
+ 	struct net_device	*dev;
+ 	struct fasync_struct	*fasync;
+@@ -816,6 +818,7 @@ static void tun_setup(struct net_device 
+ 
+ 	tun->owner = -1;
+ 	tun->group = -1;
++	tun->nid = current->nid;
+ 
+ 	dev->ethtool_ops = &tun_ethtool_ops;
+ 	dev->destructor = tun_free_netdev;
+@@ -932,7 +935,7 @@ static int tun_set_iff(struct net *net, 
+ 
+ 		if (((tun->owner != -1 && cred->euid != tun->owner) ||
+ 		     (tun->group != -1 && !in_egroup_p(tun->group))) &&
+-		    !capable(CAP_NET_ADMIN))
++		!cap_raised(current_cap(), CAP_NET_ADMIN))
+ 			return -EPERM;
+ 		err = security_tun_dev_attach(tun->socket.sk);
+ 		if (err < 0)
+@@ -946,7 +949,7 @@ static int tun_set_iff(struct net *net, 
+ 		char *name;
+ 		unsigned long flags = 0;
+ 
+-		if (!capable(CAP_NET_ADMIN))
++		if (!nx_capable(CAP_NET_ADMIN, NXC_TUN_CREATE))
+ 			return -EPERM;
+ 		err = security_tun_dev_create();
+ 		if (err < 0)
+@@ -1014,6 +1017,9 @@ static int tun_set_iff(struct net *net, 
+ 
+ 		sk->sk_destruct = tun_sock_destruct;
+ 
++		if (!nx_check(tun->nid, VS_IDENT | VS_HOSTID | VS_ADMIN_P))
++			return -EPERM;
++
+ 		err = tun_attach(tun, file);
+ 		if (err < 0)
+ 			goto failed;
+@@ -1203,6 +1209,16 @@ static long tun_chr_ioctl(struct file *f
+ 		DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
+ 		break;
+ 
++	case TUNSETNID:
++		if (!capable(CAP_CONTEXT))
++			return -EPERM;
++
++		/* Set nid owner of the device */
++		tun->nid = (nid_t) arg;
++
++		DBG(KERN_INFO "%s: nid owner set to %u\n", tun->dev->name, tun->nid);
++		break;
++
+ 	case TUNSETLINK:
+ 		/* Only allow setting the type when the interface is down */
+ 		if (tun->dev->flags & IFF_UP) {
+--- a/fs/attr.c	2009-12-03 20:02:51.000000000 +0100
++++ a/fs/attr.c	2011-06-10 13:03:02.000000000 +0200
+@@ -14,6 +14,9 @@
+ #include <linux/fcntl.h>
+ #include <linux/quotaops.h>
+ #include <linux/security.h>
++#include <linux/proc_fs.h>
++#include <linux/devpts_fs.h>
++#include <linux/vs_tag.h>
+ 
+ /* Taken over from the old code... */
+ 
+@@ -55,6 +58,10 @@ int inode_change_ok(const struct inode *
+ 		if (!is_owner_or_cap(inode))
+ 			goto error;
+ 	}
++
++	if (dx_permission(inode, MAY_WRITE))
++		goto error;
++
+ fine:
+ 	retval = 0;
+ error:
+@@ -120,6 +127,8 @@ int inode_setattr(struct inode * inode, 
+ 		inode->i_uid = attr->ia_uid;
+ 	if (ia_valid & ATTR_GID)
+ 		inode->i_gid = attr->ia_gid;
++	if ((ia_valid & ATTR_TAG) && IS_TAGGED(inode))
++		inode->i_tag = attr->ia_tag;
+ 	if (ia_valid & ATTR_ATIME)
+ 		inode->i_atime = timespec_trunc(attr->ia_atime,
+ 						inode->i_sb->s_time_gran);
+@@ -214,7 +223,8 @@ int notify_change(struct dentry * dentry
+ 		error = inode_change_ok(inode, attr);
+ 		if (!error) {
+ 			if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+-			    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
++			    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
++			    (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag))
+ 				error = vfs_dq_transfer(inode, attr) ?
+ 					-EDQUOT : 0;
+ 			if (!error)
+--- a/fs/binfmt_aout.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/binfmt_aout.c	2011-06-10 13:03:02.000000000 +0200
+@@ -24,6 +24,7 @@
+ #include <linux/binfmts.h>
+ #include <linux/personality.h>
+ #include <linux/init.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+--- a/fs/binfmt_elf.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/binfmt_elf.c	2011-06-10 13:03:02.000000000 +0200
+@@ -31,6 +31,7 @@
+ #include <linux/random.h>
+ #include <linux/elf.h>
+ #include <linux/utsname.h>
++#include <linux/vs_memory.h>
+ #include <asm/uaccess.h>
+ #include <asm/param.h>
+ #include <asm/page.h>
+--- a/fs/binfmt_flat.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/binfmt_flat.c	2011-06-10 13:03:02.000000000 +0200
+@@ -35,6 +35,7 @@
+ #include <linux/init.h>
+ #include <linux/flat.h>
+ #include <linux/syscalls.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/byteorder.h>
+ #include <asm/system.h>
+--- a/fs/binfmt_som.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/binfmt_som.c	2011-06-10 13:03:02.000000000 +0200
+@@ -28,6 +28,7 @@
+ #include <linux/shm.h>
+ #include <linux/personality.h>
+ #include <linux/init.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+--- a/fs/block_dev.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/block_dev.c	2011-06-10 13:03:02.000000000 +0200
+@@ -26,6 +26,7 @@
+ #include <linux/namei.h>
+ #include <linux/log2.h>
+ #include <linux/kmemleak.h>
++#include <linux/vs_device.h>
+ #include <asm/uaccess.h>
+ #include "internal.h"
+ 
+@@ -557,6 +558,7 @@ struct block_device *bdget(dev_t dev)
+ 		bdev->bd_invalidated = 0;
+ 		inode->i_mode = S_IFBLK;
+ 		inode->i_rdev = dev;
++		inode->i_mdev = dev;
+ 		inode->i_bdev = bdev;
+ 		inode->i_data.a_ops = &def_blk_aops;
+ 		mapping_set_gfp_mask(&inode->i_data, GFP_USER);
+@@ -603,6 +605,11 @@ EXPORT_SYMBOL(bdput);
+ static struct block_device *bd_acquire(struct inode *inode)
+ {
+ 	struct block_device *bdev;
++	dev_t mdev;
++
++	if (!vs_map_blkdev(inode->i_rdev, &mdev, DATTR_OPEN))
++		return NULL;
++	inode->i_mdev = mdev;
+ 
+ 	spin_lock(&bdev_lock);
+ 	bdev = inode->i_bdev;
+@@ -613,7 +620,7 @@ static struct block_device *bd_acquire(s
+ 	}
+ 	spin_unlock(&bdev_lock);
+ 
+-	bdev = bdget(inode->i_rdev);
++	bdev = bdget(mdev);
+ 	if (bdev) {
+ 		spin_lock(&bdev_lock);
+ 		if (!inode->i_bdev) {
+--- a/fs/btrfs/ctree.h	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/btrfs/ctree.h	2011-06-10 13:03:02.000000000 +0200
+@@ -547,11 +547,14 @@ struct btrfs_inode_item {
+ 	/* modification sequence number for NFS */
+ 	__le64 sequence;
+ 
++	__le16 tag;
+ 	/*
+ 	 * a little future expansion, for more than this we can
+ 	 * just grow the inode item and version it
+ 	 */
+-	__le64 reserved[4];
++	__le16 reserved16;
++	__le32 reserved32;
++	__le64 reserved[3];
+ 	struct btrfs_timespec atime;
+ 	struct btrfs_timespec ctime;
+ 	struct btrfs_timespec mtime;
+@@ -1162,6 +1165,8 @@ struct btrfs_root {
+ #define BTRFS_MOUNT_NOSSD		(1 << 9)
+ #define BTRFS_MOUNT_DISCARD		(1 << 10)
+ 
++#define BTRFS_MOUNT_TAGGED		(1 << 24)
++
+ #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
+ #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
+ #define btrfs_test_opt(root, opt)	((root)->fs_info->mount_opt & \
+@@ -1181,6 +1186,10 @@ struct btrfs_root {
+ #define BTRFS_INODE_NOATIME		(1 << 9)
+ #define BTRFS_INODE_DIRSYNC		(1 << 10)
+ 
++#define BTRFS_INODE_IXUNLINK		(1 << 24)
++#define BTRFS_INODE_BARRIER		(1 << 25)
++#define BTRFS_INODE_COW			(1 << 26)
++
+ 
+ #define BTRFS_INODE_ROOT_ITEM_INIT	(1 << 31)
+ 
+@@ -1385,6 +1394,7 @@ BTRFS_SETGET_FUNCS(inode_block_group, st
+ BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32);
+ BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32);
+ BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32);
++BTRFS_SETGET_FUNCS(inode_tag, struct btrfs_inode_item, tag, 16);
+ BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32);
+ BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64);
+ BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64);
+@@ -2360,6 +2370,7 @@ extern const struct dentry_operations bt
+ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ void btrfs_update_iflags(struct inode *inode);
+ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
++int btrfs_sync_flags(struct inode *inode, int, int);
+ 
+ /* file.c */
+ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync);
+--- a/fs/btrfs/disk-io.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/btrfs/disk-io.c	2011-06-10 13:03:02.000000000 +0200
+@@ -1728,6 +1728,9 @@ struct btrfs_root *open_ctree(struct sup
+ 		goto fail_iput;
+ 	}
+ 
++	if (btrfs_test_opt(tree_root, TAGGED))
++		sb->s_flags |= MS_TAGGED;
++
+ 	features = btrfs_super_incompat_flags(disk_super) &
+ 		~BTRFS_FEATURE_INCOMPAT_SUPP;
+ 	if (features) {
+--- a/fs/btrfs/inode.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/btrfs/inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -36,6 +36,8 @@
+ #include <linux/xattr.h>
+ #include <linux/posix_acl.h>
+ #include <linux/falloc.h>
++#include <linux/vs_tag.h>
++
+ #include "compat.h"
+ #include "ctree.h"
+ #include "disk-io.h"
+@@ -2263,6 +2265,8 @@ static void btrfs_read_locked_inode(stru
+ 	int maybe_acls;
+ 	u64 alloc_group_block;
+ 	u32 rdev;
++	uid_t uid;
++	gid_t gid;
+ 	int ret;
+ 
+ 	path = btrfs_alloc_path();
+@@ -2279,8 +2283,13 @@ static void btrfs_read_locked_inode(stru
+ 
+ 	inode->i_mode = btrfs_inode_mode(leaf, inode_item);
+ 	inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
+-	inode->i_uid = btrfs_inode_uid(leaf, inode_item);
+-	inode->i_gid = btrfs_inode_gid(leaf, inode_item);
++
++	uid = btrfs_inode_uid(leaf, inode_item);
++	gid = btrfs_inode_gid(leaf, inode_item);
++	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
++		btrfs_inode_tag(leaf, inode_item));
+ 	btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
+ 
+ 	tspec = btrfs_inode_atime(inode_item);
+@@ -2362,8 +2371,15 @@ static void fill_inode_item(struct btrfs
+ 			    struct btrfs_inode_item *item,
+ 			    struct inode *inode)
+ {
+-	btrfs_set_inode_uid(leaf, item, inode->i_uid);
+-	btrfs_set_inode_gid(leaf, item, inode->i_gid);
++	uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
++	gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
++
++	btrfs_set_inode_uid(leaf, item, uid);
++	btrfs_set_inode_gid(leaf, item, gid);
++#ifdef CONFIG_TAGGING_INTERN
++	btrfs_set_inode_tag(leaf, item, inode->i_tag);
++#endif
++
+ 	btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
+ 	btrfs_set_inode_mode(leaf, item, inode->i_mode);
+ 	btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+@@ -4153,6 +4169,7 @@ static struct inode *btrfs_new_inode(str
+ 	} else
+ 		inode->i_gid = current_fsgid();
+ 
++	inode->i_tag = dx_current_fstag(root->fs_info->sb);
+ 	inode->i_mode = mode;
+ 	inode->i_ino = objectid;
+ 	inode_set_bytes(inode, 0);
+@@ -5954,6 +5971,7 @@ static const struct inode_operations btr
+ 	.listxattr	= btrfs_listxattr,
+ 	.removexattr	= btrfs_removexattr,
+ 	.permission	= btrfs_permission,
++	.sync_flags	= btrfs_sync_flags,
+ };
+ static const struct inode_operations btrfs_dir_ro_inode_operations = {
+ 	.lookup		= btrfs_lookup,
+@@ -6029,6 +6047,7 @@ static const struct inode_operations btr
+ 	.permission	= btrfs_permission,
+ 	.fallocate	= btrfs_fallocate,
+ 	.fiemap		= btrfs_fiemap,
++	.sync_flags	= btrfs_sync_flags,
+ };
+ static const struct inode_operations btrfs_special_inode_operations = {
+ 	.getattr	= btrfs_getattr,
+--- a/fs/btrfs/ioctl.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/btrfs/ioctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -67,10 +67,13 @@ static unsigned int btrfs_flags_to_ioctl
+ {
+ 	unsigned int iflags = 0;
+ 
+-	if (flags & BTRFS_INODE_SYNC)
+-		iflags |= FS_SYNC_FL;
+ 	if (flags & BTRFS_INODE_IMMUTABLE)
+ 		iflags |= FS_IMMUTABLE_FL;
++	if (flags & BTRFS_INODE_IXUNLINK)
++		iflags |= FS_IXUNLINK_FL;
++
++	if (flags & BTRFS_INODE_SYNC)
++		iflags |= FS_SYNC_FL;
+ 	if (flags & BTRFS_INODE_APPEND)
+ 		iflags |= FS_APPEND_FL;
+ 	if (flags & BTRFS_INODE_NODUMP)
+@@ -80,28 +83,78 @@ static unsigned int btrfs_flags_to_ioctl
+ 	if (flags & BTRFS_INODE_DIRSYNC)
+ 		iflags |= FS_DIRSYNC_FL;
+ 
++	if (flags & BTRFS_INODE_BARRIER)
++		iflags |= FS_BARRIER_FL;
++	if (flags & BTRFS_INODE_COW)
++		iflags |= FS_COW_FL;
+ 	return iflags;
+ }
+ 
+ /*
+- * Update inode->i_flags based on the btrfs internal flags.
++ * Update inode->i_(v)flags based on the btrfs internal flags.
+  */
+ void btrfs_update_iflags(struct inode *inode)
+ {
+ 	struct btrfs_inode *ip = BTRFS_I(inode);
+ 
+-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
++	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+ 
+-	if (ip->flags & BTRFS_INODE_SYNC)
+-		inode->i_flags |= S_SYNC;
+ 	if (ip->flags & BTRFS_INODE_IMMUTABLE)
+ 		inode->i_flags |= S_IMMUTABLE;
++	if (ip->flags & BTRFS_INODE_IXUNLINK)
++		inode->i_flags |= S_IXUNLINK;
++
++	if (ip->flags & BTRFS_INODE_SYNC)
++		inode->i_flags |= S_SYNC;
+ 	if (ip->flags & BTRFS_INODE_APPEND)
+ 		inode->i_flags |= S_APPEND;
+ 	if (ip->flags & BTRFS_INODE_NOATIME)
+ 		inode->i_flags |= S_NOATIME;
+ 	if (ip->flags & BTRFS_INODE_DIRSYNC)
+ 		inode->i_flags |= S_DIRSYNC;
++
++	inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++	if (ip->flags & BTRFS_INODE_BARRIER)
++		inode->i_vflags |= V_BARRIER;
++	if (ip->flags & BTRFS_INODE_COW)
++		inode->i_vflags |= V_COW;
++}
++
++/*
++ * Update btrfs internal flags from inode->i_(v)flags.
++ */
++void btrfs_update_flags(struct inode *inode)
++{
++	struct btrfs_inode *ip = BTRFS_I(inode);
++
++	unsigned int flags = inode->i_flags;
++	unsigned int vflags = inode->i_vflags;
++
++	ip->flags &= ~(BTRFS_INODE_SYNC | BTRFS_INODE_APPEND |
++			BTRFS_INODE_IMMUTABLE | BTRFS_INODE_IXUNLINK |
++			BTRFS_INODE_NOATIME | BTRFS_INODE_DIRSYNC |
++			BTRFS_INODE_BARRIER | BTRFS_INODE_COW);
++
++	if (flags & S_IMMUTABLE)
++		ip->flags |= BTRFS_INODE_IMMUTABLE;
++	if (flags & S_IXUNLINK)
++		ip->flags |= BTRFS_INODE_IXUNLINK;
++
++	if (flags & S_SYNC)
++		ip->flags |= BTRFS_INODE_SYNC;
++	if (flags & S_APPEND)
++		ip->flags |= BTRFS_INODE_APPEND;
++	if (flags & S_NOATIME)
++		ip->flags |= BTRFS_INODE_NOATIME;
++	if (flags & S_DIRSYNC)
++		ip->flags |= BTRFS_INODE_DIRSYNC;
++
++	if (vflags & V_BARRIER)
++		ip->flags |= BTRFS_INODE_BARRIER;
++	if (vflags & V_COW)
++		ip->flags |= BTRFS_INODE_COW;
+ }
+ 
+ /*
+@@ -119,7 +172,7 @@ void btrfs_inherit_iflags(struct inode *
+ 	flags = BTRFS_I(dir)->flags;
+ 
+ 	if (S_ISREG(inode->i_mode))
+-		flags &= ~BTRFS_INODE_DIRSYNC;
++		flags &= ~(BTRFS_INODE_DIRSYNC | BTRFS_INODE_BARRIER);
+ 	else if (!S_ISDIR(inode->i_mode))
+ 		flags &= (BTRFS_INODE_NODUMP | BTRFS_INODE_NOATIME);
+ 
+@@ -127,6 +180,30 @@ void btrfs_inherit_iflags(struct inode *
+ 	btrfs_update_iflags(inode);
+ }
+ 
++int btrfs_sync_flags(struct inode *inode, int flags, int vflags)
++{
++	struct btrfs_inode *ip = BTRFS_I(inode);
++	struct btrfs_root *root = ip->root;
++	struct btrfs_trans_handle *trans;
++	int ret;
++
++	trans = btrfs_join_transaction(root, 1);
++	BUG_ON(!trans);
++
++	inode->i_flags = flags;
++	inode->i_vflags = vflags;
++	btrfs_update_flags(inode);
++
++	ret = btrfs_update_inode(trans, root, inode);
++	BUG_ON(ret);
++
++	btrfs_update_iflags(inode);
++	inode->i_ctime = CURRENT_TIME;
++	btrfs_end_transaction(trans, root);
++
++	return 0;
++}
++
+ static int btrfs_ioctl_getflags(struct file *file, void __user *arg)
+ {
+ 	struct btrfs_inode *ip = BTRFS_I(file->f_path.dentry->d_inode);
+@@ -149,6 +226,7 @@ static int btrfs_ioctl_setflags(struct f
+ 	if (copy_from_user(&flags, arg, sizeof(flags)))
+ 		return -EFAULT;
+ 
++	/* maybe add FS_IXUNLINK_FL ? */
+ 	if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
+ 		      FS_NOATIME_FL | FS_NODUMP_FL | \
+ 		      FS_SYNC_FL | FS_DIRSYNC_FL))
+@@ -161,7 +239,8 @@ static int btrfs_ioctl_setflags(struct f
+ 
+ 	flags = btrfs_mask_flags(inode->i_mode, flags);
+ 	oldflags = btrfs_flags_to_ioctl(ip->flags);
+-	if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
++	if ((flags ^ oldflags) & (FS_APPEND_FL |
++		FS_IMMUTABLE_FL | FS_IXUNLINK_FL)) {
+ 		if (!capable(CAP_LINUX_IMMUTABLE)) {
+ 			ret = -EPERM;
+ 			goto out_unlock;
+@@ -172,14 +251,19 @@ static int btrfs_ioctl_setflags(struct f
+ 	if (ret)
+ 		goto out_unlock;
+ 
+-	if (flags & FS_SYNC_FL)
+-		ip->flags |= BTRFS_INODE_SYNC;
+-	else
+-		ip->flags &= ~BTRFS_INODE_SYNC;
+ 	if (flags & FS_IMMUTABLE_FL)
+ 		ip->flags |= BTRFS_INODE_IMMUTABLE;
+ 	else
+ 		ip->flags &= ~BTRFS_INODE_IMMUTABLE;
++	if (flags & FS_IXUNLINK_FL)
++		ip->flags |= BTRFS_INODE_IXUNLINK;
++	else
++		ip->flags &= ~BTRFS_INODE_IXUNLINK;
++
++	if (flags & FS_SYNC_FL)
++		ip->flags |= BTRFS_INODE_SYNC;
++	else
++		ip->flags &= ~BTRFS_INODE_SYNC;
+ 	if (flags & FS_APPEND_FL)
+ 		ip->flags |= BTRFS_INODE_APPEND;
+ 	else
+--- a/fs/btrfs/super.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/btrfs/super.c	2011-06-10 13:03:02.000000000 +0200
+@@ -67,7 +67,7 @@ enum {
+ 	Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
+ 	Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl,
+ 	Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit,
+-	Opt_discard, Opt_err,
++	Opt_tag, Opt_notag, Opt_tagid, Opt_discard, Opt_err,
+ };
+ 
+ static match_table_t tokens = {
+@@ -90,6 +90,9 @@ static match_table_t tokens = {
+ 	{Opt_flushoncommit, "flushoncommit"},
+ 	{Opt_ratio, "metadata_ratio=%d"},
+ 	{Opt_discard, "discard"},
++	{Opt_tag, "tag"},
++	{Opt_notag, "notag"},
++	{Opt_tagid, "tagid=%u"},
+ 	{Opt_err, NULL},
+ };
+ 
+@@ -264,6 +267,22 @@ int btrfs_parse_options(struct btrfs_roo
+ 		case Opt_discard:
+ 			btrfs_set_opt(info->mount_opt, DISCARD);
+ 			break;
++#ifndef CONFIG_TAGGING_NONE
++		case Opt_tag:
++			printk(KERN_INFO "btrfs: use tagging\n");
++			btrfs_set_opt(info->mount_opt, TAGGED);
++			break;
++		case Opt_notag:
++			printk(KERN_INFO "btrfs: disabled tagging\n");
++			btrfs_clear_opt(info->mount_opt, TAGGED);
++			break;
++#endif
++#ifdef CONFIG_PROPAGATE
++		case Opt_tagid:
++			/* use args[0] */
++			btrfs_set_opt(info->mount_opt, TAGGED);
++			break;
++#endif
+ 		case Opt_err:
+ 			printk(KERN_INFO "btrfs: unrecognized mount option "
+ 			       "'%s'\n", p);
+@@ -585,6 +604,12 @@ static int btrfs_remount(struct super_bl
+ 	if (ret)
+ 		return -EINVAL;
+ 
++	if (btrfs_test_opt(root, TAGGED) && !(sb->s_flags & MS_TAGGED)) {
++		printk("btrfs: %s: tagging not permitted on remount.\n",
++			sb->s_id);
++		return -EINVAL;
++	}
++
+ 	if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
+ 		return 0;
+ 
+--- a/fs/char_dev.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/char_dev.c	2011-06-10 13:03:02.000000000 +0200
+@@ -20,6 +20,8 @@
+ #include <linux/cdev.h>
+ #include <linux/mutex.h>
+ #include <linux/backing-dev.h>
++#include <linux/vs_context.h>
++#include <linux/vs_device.h>
+ 
+ #include "internal.h"
+ 
+@@ -370,14 +372,21 @@ static int chrdev_open(struct inode *ino
+ 	struct cdev *p;
+ 	struct cdev *new = NULL;
+ 	int ret = 0;
++	dev_t mdev;
++
++	if (!vs_map_chrdev(inode->i_rdev, &mdev, DATTR_OPEN))
++		return -EPERM;
++	inode->i_mdev = mdev;
+ 
+ 	spin_lock(&cdev_lock);
+ 	p = inode->i_cdev;
+ 	if (!p) {
+ 		struct kobject *kobj;
+ 		int idx;
++
+ 		spin_unlock(&cdev_lock);
+-		kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
++
++		kobj = kobj_lookup(cdev_map, mdev, &idx);
+ 		if (!kobj)
+ 			return -ENXIO;
+ 		new = container_of(kobj, struct cdev, kobj);
+--- a/fs/dcache.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/dcache.c	2011-06-10 13:03:02.000000000 +0200
+@@ -33,6 +33,7 @@
+ #include <linux/bootmem.h>
+ #include <linux/fs_struct.h>
+ #include <linux/hardirq.h>
++#include <linux/vs_limit.h>
+ #include "internal.h"
+ 
+ int sysctl_vfs_cache_pressure __read_mostly = 100;
+@@ -230,6 +231,8 @@ repeat:
+ 		return;
+ 	}
+ 
++	vx_dentry_dec(dentry);
++
+ 	/*
+ 	 * AV: ->d_delete() is _NOT_ allowed to block now.
+ 	 */
+@@ -321,6 +324,7 @@ static inline struct dentry * __dget_loc
+ {
+ 	atomic_inc(&dentry->d_count);
+ 	dentry_lru_del_init(dentry);
++	vx_dentry_inc(dentry);
+ 	return dentry;
+ }
+ 
+@@ -919,6 +923,9 @@ struct dentry *d_alloc(struct dentry * p
+ 	struct dentry *dentry;
+ 	char *dname;
+ 
++	if (!vx_dentry_avail(1))
++		return NULL;
++
+ 	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
+ 	if (!dentry)
+ 		return NULL;
+@@ -964,6 +971,7 @@ struct dentry *d_alloc(struct dentry * p
+ 	if (parent)
+ 		list_add(&dentry->d_u.d_child, &parent->d_subdirs);
+ 	dentry_stat.nr_dentry++;
++	vx_dentry_inc(dentry);
+ 	spin_unlock(&dcache_lock);
+ 
+ 	return dentry;
+@@ -1410,6 +1418,7 @@ struct dentry * __d_lookup(struct dentry
+ 		}
+ 
+ 		atomic_inc(&dentry->d_count);
++		vx_dentry_inc(dentry);
+ 		found = dentry;
+ 		spin_unlock(&dentry->d_lock);
+ 		break;
+--- a/fs/devpts/inode.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/devpts/inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -24,6 +24,7 @@
+ #include <linux/parser.h>
+ #include <linux/fsnotify.h>
+ #include <linux/seq_file.h>
++#include <linux/vs_base.h>
+ 
+ #define DEVPTS_DEFAULT_MODE 0600
+ /*
+@@ -35,6 +36,20 @@
+ #define DEVPTS_DEFAULT_PTMX_MODE 0000
+ #define PTMX_MINOR	2
+ 
++static int devpts_permission(struct inode *inode, int mask)
++{
++	int ret = -EACCES;
++
++	/* devpts is xid tagged */
++	if (vx_check((xid_t)inode->i_tag, VS_WATCH_P | VS_IDENT))
++		ret = generic_permission(inode, mask, NULL);
++	return ret;
++}
++
++static struct inode_operations devpts_file_inode_operations = {
++	.permission     = devpts_permission,
++};
++
+ extern int pty_limit;			/* Config limit on Unix98 ptys */
+ static DEFINE_MUTEX(allocated_ptys_lock);
+ 
+@@ -262,6 +277,25 @@ static int devpts_show_options(struct se
+ 	return 0;
+ }
+ 
++static int devpts_filter(struct dentry *de)
++{
++	/* devpts is xid tagged */
++	return vx_check((xid_t)de->d_inode->i_tag, VS_WATCH_P | VS_IDENT);
++}
++
++static int devpts_readdir(struct file * filp, void * dirent, filldir_t filldir)
++{
++	return dcache_readdir_filter(filp, dirent, filldir, devpts_filter);
++}
++
++static struct file_operations devpts_dir_operations = {
++	.open		= dcache_dir_open,
++	.release	= dcache_dir_close,
++	.llseek		= dcache_dir_lseek,
++	.read		= generic_read_dir,
++	.readdir	= devpts_readdir,
++};
++
+ static const struct super_operations devpts_sops = {
+ 	.statfs		= simple_statfs,
+ 	.remount_fs	= devpts_remount,
+@@ -301,12 +335,15 @@ devpts_fill_super(struct super_block *s,
+ 	inode = new_inode(s);
+ 	if (!inode)
+ 		goto free_fsi;
++
+ 	inode->i_ino = 1;
+ 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ 	inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
+ 	inode->i_op = &simple_dir_inode_operations;
+-	inode->i_fop = &simple_dir_operations;
++	inode->i_fop = &devpts_dir_operations;
+ 	inode->i_nlink = 2;
++	/* devpts is xid tagged */
++	inode->i_tag = (tag_t)vx_current_xid();
+ 
+ 	s->s_root = d_alloc_root(inode);
+ 	if (s->s_root)
+@@ -497,6 +534,9 @@ int devpts_pty_new(struct inode *ptmx_in
+ 	inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
+ 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ 	init_special_inode(inode, S_IFCHR|opts->mode, device);
++	/* devpts is xid tagged */
++	inode->i_tag = (tag_t)vx_current_xid();
++	inode->i_op = &devpts_file_inode_operations;
+ 	inode->i_private = tty;
+ 	tty->driver_data = inode;
+ 
+--- a/fs/exec.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/exec.c	2011-06-10 13:03:02.000000000 +0200
+@@ -273,7 +273,9 @@ static int __bprm_mm_init(struct linux_b
+ 	if (err)
+ 		goto err;
+ 
+-	mm->stack_vm = mm->total_vm = 1;
++	mm->total_vm = 0;
++	vx_vmpages_inc(mm);
++	mm->stack_vm = 1;
+ 	up_write(&mm->mmap_sem);
+ 	bprm->p = vma->vm_end - sizeof(void *);
+ 	return 0;
+@@ -1537,7 +1539,7 @@ static int format_corename(char *corenam
+ 			/* UNIX time of coredump */
+ 			case 't': {
+ 				struct timeval tv;
+-				do_gettimeofday(&tv);
++				vx_gettimeofday(&tv);
+ 				rc = snprintf(out_ptr, out_end - out_ptr,
+ 					      "%lu", tv.tv_sec);
+ 				if (rc > out_end - out_ptr)
+--- a/fs/ext2/balloc.c	2009-06-11 17:13:03.000000000 +0200
++++ a/fs/ext2/balloc.c	2011-06-10 13:03:02.000000000 +0200
+@@ -701,7 +701,6 @@ ext2_try_to_allocate(struct super_block 
+ 			start = 0;
+ 		end = EXT2_BLOCKS_PER_GROUP(sb);
+ 	}
+-
+ 	BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb));
+ 
+ repeat:
+--- a/fs/ext2/ext2.h	2009-09-10 15:26:21.000000000 +0200
++++ a/fs/ext2/ext2.h	2011-06-10 13:03:02.000000000 +0200
+@@ -131,6 +131,7 @@ extern int ext2_fiemap(struct inode *ino
+ int __ext2_write_begin(struct file *file, struct address_space *mapping,
+ 		loff_t pos, unsigned len, unsigned flags,
+ 		struct page **pagep, void **fsdata);
++extern int ext2_sync_flags(struct inode *, int, int);
+ 
+ /* ioctl.c */
+ extern long ext2_ioctl(struct file *, unsigned int, unsigned long);
+--- a/fs/ext2/file.c	2009-12-03 20:02:51.000000000 +0100
++++ a/fs/ext2/file.c	2011-06-10 13:03:02.000000000 +0200
+@@ -87,4 +87,5 @@ const struct inode_operations ext2_file_
+ 	.setattr	= ext2_setattr,
+ 	.check_acl	= ext2_check_acl,
+ 	.fiemap		= ext2_fiemap,
++	.sync_flags	= ext2_sync_flags,
+ };
+--- a/fs/ext2/ialloc.c	2009-06-11 17:13:03.000000000 +0200
++++ a/fs/ext2/ialloc.c	2011-06-10 13:03:02.000000000 +0200
+@@ -17,6 +17,7 @@
+ #include <linux/backing-dev.h>
+ #include <linux/buffer_head.h>
+ #include <linux/random.h>
++#include <linux/vs_tag.h>
+ #include "ext2.h"
+ #include "xattr.h"
+ #include "acl.h"
+@@ -560,6 +561,7 @@ got:
+ 	} else
+ 		inode->i_gid = current_fsgid();
+ 	inode->i_mode = mode;
++	inode->i_tag = dx_current_fstag(sb);
+ 
+ 	inode->i_ino = ino;
+ 	inode->i_blocks = 0;
+--- a/fs/ext2/inode.c	2009-12-03 20:02:51.000000000 +0100
++++ a/fs/ext2/inode.c	2011-06-10 18:47:35.000000000 +0200
+@@ -33,6 +33,7 @@
+ #include <linux/mpage.h>
+ #include <linux/fiemap.h>
+ #include <linux/namei.h>
++#include <linux/vs_tag.h>
+ #include "ext2.h"
+ #include "acl.h"
+ #include "xip.h"
+@@ -1040,7 +1041,7 @@ void ext2_truncate(struct inode *inode)
+ 		return;
+ 	if (ext2_inode_is_fast_symlink(inode))
+ 		return;
+-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
++	if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
+ 		return;
+ 
+ 	blocksize = inode->i_sb->s_blocksize;
+@@ -1178,36 +1179,61 @@ void ext2_set_inode_flags(struct inode *
+ {
+ 	unsigned int flags = EXT2_I(inode)->i_flags;
+ 
+-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
++	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
++
++
++	if (flags & EXT2_IMMUTABLE_FL)
++		inode->i_flags |= S_IMMUTABLE;
++	if (flags & EXT2_IXUNLINK_FL)
++		inode->i_flags |= S_IXUNLINK;
++
+ 	if (flags & EXT2_SYNC_FL)
+ 		inode->i_flags |= S_SYNC;
+ 	if (flags & EXT2_APPEND_FL)
+ 		inode->i_flags |= S_APPEND;
+-	if (flags & EXT2_IMMUTABLE_FL)
+-		inode->i_flags |= S_IMMUTABLE;
+ 	if (flags & EXT2_NOATIME_FL)
+ 		inode->i_flags |= S_NOATIME;
+ 	if (flags & EXT2_DIRSYNC_FL)
+ 		inode->i_flags |= S_DIRSYNC;
++
++	inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++	if (flags & EXT2_BARRIER_FL)
++		inode->i_vflags |= V_BARRIER;
++	if (flags & EXT2_COW_FL)
++		inode->i_vflags |= V_COW;
+ }
+ 
+ /* Propagate flags from i_flags to EXT2_I(inode)->i_flags */
+ void ext2_get_inode_flags(struct ext2_inode_info *ei)
+ {
+ 	unsigned int flags = ei->vfs_inode.i_flags;
++	unsigned int vflags = ei->vfs_inode.i_vflags;
++
++	ei->i_flags &= ~(EXT2_SYNC_FL | EXT2_APPEND_FL |
++			EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL |
++			EXT2_NOATIME_FL | EXT2_DIRSYNC_FL |
++			EXT2_BARRIER_FL | EXT2_COW_FL);
++
++	if (flags & S_IMMUTABLE)
++		ei->i_flags |= EXT2_IMMUTABLE_FL;
++	if (flags & S_IXUNLINK)
++		ei->i_flags |= EXT2_IXUNLINK_FL;
+ 
+-	ei->i_flags &= ~(EXT2_SYNC_FL|EXT2_APPEND_FL|
+-			EXT2_IMMUTABLE_FL|EXT2_NOATIME_FL|EXT2_DIRSYNC_FL);
+ 	if (flags & S_SYNC)
+ 		ei->i_flags |= EXT2_SYNC_FL;
+ 	if (flags & S_APPEND)
+ 		ei->i_flags |= EXT2_APPEND_FL;
+-	if (flags & S_IMMUTABLE)
+-		ei->i_flags |= EXT2_IMMUTABLE_FL;
+ 	if (flags & S_NOATIME)
+ 		ei->i_flags |= EXT2_NOATIME_FL;
+ 	if (flags & S_DIRSYNC)
+ 		ei->i_flags |= EXT2_DIRSYNC_FL;
++
++	if (vflags & V_BARRIER)
++		ei->i_flags |= EXT2_BARRIER_FL;
++	if (vflags & V_COW)
++		ei->i_flags |= EXT2_COW_FL;
+ }
+ 
+ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
+@@ -1217,6 +1243,8 @@ struct inode *ext2_iget (struct super_bl
+ 	struct ext2_inode *raw_inode;
+ 	struct inode *inode;
+ 	long ret = -EIO;
++	uid_t uid;
++	gid_t gid;
+ 	int n;
+ 
+ 	inode = iget_locked(sb, ino);
+@@ -1235,12 +1263,17 @@ struct inode *ext2_iget (struct super_bl
+ 	}
+ 
+ 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
+-	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+-	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
++	uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
++	gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+ 	if (!(test_opt (inode->i_sb, NO_UID32))) {
+-		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+-		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
++		uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
++		gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+ 	}
++	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
++		le16_to_cpu(raw_inode->i_raw_tag));
++
+ 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
+ 	inode->i_size = le32_to_cpu(raw_inode->i_size);
+ 	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
+@@ -1338,8 +1371,8 @@ int ext2_write_inode(struct inode *inode
+ 	struct ext2_inode_info *ei = EXT2_I(inode);
+ 	struct super_block *sb = inode->i_sb;
+ 	ino_t ino = inode->i_ino;
+-	uid_t uid = inode->i_uid;
+-	gid_t gid = inode->i_gid;
++	uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
++	gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
+ 	struct buffer_head * bh;
+ 	struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
+ 	int n;
+@@ -1375,6 +1408,9 @@ int ext2_write_inode(struct inode *inode
+ 		raw_inode->i_uid_high = 0;
+ 		raw_inode->i_gid_high = 0;
+ 	}
++#ifdef CONFIG_TAGGING_INTERN
++	raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
++#endif
+ 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+ 	raw_inode->i_size = cpu_to_le32(inode->i_size);
+ 	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
+@@ -1456,7 +1492,8 @@ int ext2_setattr(struct dentry *dentry, 
+ 	if (error)
+ 		return error;
+ 	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
+-	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
++	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) ||
++	    (iattr->ia_valid & ATTR_TAG && iattr->ia_tag != inode->i_tag)) {
+ 		error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0;
+ 		if (error)
+ 			return error;
+--- a/fs/ext2/ioctl.c	2009-09-10 15:26:21.000000000 +0200
++++ a/fs/ext2/ioctl.c	2011-06-10 18:47:17.000000000 +0200
+@@ -17,6 +17,16 @@
+ #include <asm/uaccess.h>
+ 
+ 
++int ext2_sync_flags(struct inode *inode, int flags, int vflags)
++{
++	inode->i_flags = flags;
++	inode->i_vflags = vflags;
++	ext2_get_inode_flags(EXT2_I(inode));
++	inode->i_ctime = CURRENT_TIME_SEC;
++	mark_inode_dirty(inode);
++	return 0;
++}
++
+ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ 	struct inode *inode = filp->f_dentry->d_inode;
+@@ -51,6 +61,11 @@ long ext2_ioctl(struct file *filp, unsig
+ 
+ 		flags = ext2_mask_flags(inode->i_mode, flags);
+ 
++		if (IS_BARRIER(inode)) {
++			vxwprintk_task(1, "messing with the barrier.");
++			return -EACCES;
++		}
++
+ 		mutex_lock(&inode->i_mutex);
+ 		/* Is it quota file? Do not allow user to mess with it */
+ 		if (IS_NOQUOTA(inode)) {
+@@ -66,7 +81,9 @@ long ext2_ioctl(struct file *filp, unsig
+ 		 *
+ 		 * This test looks nicer. Thanks to Pauline Middelink
+ 		 */
+-		if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
++		if ((oldflags & EXT2_IMMUTABLE_FL) ||
++			((flags ^ oldflags) & (EXT2_APPEND_FL |
++			EXT2_IMMUTABLE_FL | EXT2_IXUNLINK_FL))) {
+ 			if (!capable(CAP_LINUX_IMMUTABLE)) {
+ 				mutex_unlock(&inode->i_mutex);
+ 				ret = -EPERM;
+@@ -74,7 +91,7 @@ long ext2_ioctl(struct file *filp, unsig
+ 			}
+ 		}
+ 
+-		flags = flags & EXT2_FL_USER_MODIFIABLE;
++		flags &= EXT2_FL_USER_MODIFIABLE;
+ 		flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
+ 		ei->i_flags = flags;
+ 		mutex_unlock(&inode->i_mutex);
+--- a/fs/ext2/namei.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext2/namei.c	2011-06-10 13:03:02.000000000 +0200
+@@ -31,6 +31,7 @@
+  */
+ 
+ #include <linux/pagemap.h>
++#include <linux/vs_tag.h>
+ #include "ext2.h"
+ #include "xattr.h"
+ #include "acl.h"
+@@ -74,6 +75,7 @@ static struct dentry *ext2_lookup(struct
+ 				return ERR_PTR(-EIO);
+ 			} else {
+ 				return ERR_CAST(inode);
++		dx_propagate_tag(nd, inode);
+ 			}
+ 		}
+ 	}
+@@ -396,6 +398,7 @@ const struct inode_operations ext2_dir_i
+ #endif
+ 	.setattr	= ext2_setattr,
+ 	.check_acl	= ext2_check_acl,
++	.sync_flags	= ext2_sync_flags,
+ };
+ 
+ const struct inode_operations ext2_special_inode_operations = {
+--- a/fs/ext2/super.c	2009-09-10 15:26:21.000000000 +0200
++++ a/fs/ext2/super.c	2011-06-10 13:03:02.000000000 +0200
+@@ -382,7 +382,8 @@ enum {
+ 	Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
+ 	Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
+ 	Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
+-	Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
++	Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation,
++	Opt_tag, Opt_notag, Opt_tagid
+ };
+ 
+ static const match_table_t tokens = {
+@@ -410,6 +411,9 @@ static const match_table_t tokens = {
+ 	{Opt_acl, "acl"},
+ 	{Opt_noacl, "noacl"},
+ 	{Opt_xip, "xip"},
++	{Opt_tag, "tag"},
++	{Opt_notag, "notag"},
++	{Opt_tagid, "tagid=%u"},
+ 	{Opt_grpquota, "grpquota"},
+ 	{Opt_ignore, "noquota"},
+ 	{Opt_quota, "quota"},
+@@ -480,6 +484,20 @@ static int parse_options (char * options
+ 		case Opt_nouid32:
+ 			set_opt (sbi->s_mount_opt, NO_UID32);
+ 			break;
++#ifndef CONFIG_TAGGING_NONE
++		case Opt_tag:
++			set_opt (sbi->s_mount_opt, TAGGED);
++			break;
++		case Opt_notag:
++			clear_opt (sbi->s_mount_opt, TAGGED);
++			break;
++#endif
++#ifdef CONFIG_PROPAGATE
++		case Opt_tagid:
++			/* use args[0] */
++			set_opt (sbi->s_mount_opt, TAGGED);
++			break;
++#endif
+ 		case Opt_nocheck:
+ 			clear_opt (sbi->s_mount_opt, CHECK);
+ 			break;
+@@ -829,6 +847,8 @@ static int ext2_fill_super(struct super_
+ 	if (!parse_options ((char *) data, sbi))
+ 		goto failed_mount;
+ 
++	if (EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_TAGGED)
++		sb->s_flags |= MS_TAGGED;
+ 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ 		((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
+ 		 MS_POSIXACL : 0);
+@@ -1175,6 +1195,14 @@ static int ext2_remount (struct super_bl
+ 		goto restore_opts;
+ 	}
+ 
++	if ((sbi->s_mount_opt & EXT2_MOUNT_TAGGED) &&
++		!(sb->s_flags & MS_TAGGED)) {
++		printk("EXT2-fs: %s: tagging not permitted on remount.\n",
++		       sb->s_id);
++		err = -EINVAL;
++		goto restore_opts;
++	}
++
+ 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ 		((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ 
+--- a/fs/ext3/file.c	2009-12-03 20:02:51.000000000 +0100
++++ a/fs/ext3/file.c	2011-06-10 13:03:02.000000000 +0200
+@@ -80,5 +80,6 @@ const struct inode_operations ext3_file_
+ #endif
+ 	.check_acl	= ext3_check_acl,
+ 	.fiemap		= ext3_fiemap,
++	.sync_flags	= ext3_sync_flags,
+ };
+ 
+--- a/fs/ext3/ialloc.c	2009-09-10 15:26:21.000000000 +0200
++++ a/fs/ext3/ialloc.c	2011-06-10 13:03:02.000000000 +0200
+@@ -23,6 +23,7 @@
+ #include <linux/buffer_head.h>
+ #include <linux/random.h>
+ #include <linux/bitops.h>
++#include <linux/vs_tag.h>
+ 
+ #include <asm/byteorder.h>
+ 
+@@ -548,6 +549,7 @@ got:
+ 	} else
+ 		inode->i_gid = current_fsgid();
+ 	inode->i_mode = mode;
++	inode->i_tag = dx_current_fstag(sb);
+ 
+ 	inode->i_ino = ino;
+ 	/* This is the optimal IO size (for stat), not the fs block size */
+--- a/fs/ext3/inode.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext3/inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -38,6 +38,7 @@
+ #include <linux/bio.h>
+ #include <linux/fiemap.h>
+ #include <linux/namei.h>
++#include <linux/vs_tag.h>
+ #include "xattr.h"
+ #include "acl.h"
+ 
+@@ -2343,7 +2344,7 @@ static void ext3_free_branches(handle_t 
+ 
+ int ext3_can_truncate(struct inode *inode)
+ {
+-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
++	if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
+ 		return 0;
+ 	if (S_ISREG(inode->i_mode))
+ 		return 1;
+@@ -2728,36 +2729,60 @@ void ext3_set_inode_flags(struct inode *
+ {
+ 	unsigned int flags = EXT3_I(inode)->i_flags;
+ 
+-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
++	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
++
++	if (flags & EXT3_IMMUTABLE_FL)
++		inode->i_flags |= S_IMMUTABLE;
++	if (flags & EXT3_IXUNLINK_FL)
++		inode->i_flags |= S_IXUNLINK;
++
+ 	if (flags & EXT3_SYNC_FL)
+ 		inode->i_flags |= S_SYNC;
+ 	if (flags & EXT3_APPEND_FL)
+ 		inode->i_flags |= S_APPEND;
+-	if (flags & EXT3_IMMUTABLE_FL)
+-		inode->i_flags |= S_IMMUTABLE;
+ 	if (flags & EXT3_NOATIME_FL)
+ 		inode->i_flags |= S_NOATIME;
+ 	if (flags & EXT3_DIRSYNC_FL)
+ 		inode->i_flags |= S_DIRSYNC;
++
++	inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++	if (flags & EXT3_BARRIER_FL)
++		inode->i_vflags |= V_BARRIER;
++	if (flags & EXT3_COW_FL)
++		inode->i_vflags |= V_COW;
+ }
+ 
+ /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
+ void ext3_get_inode_flags(struct ext3_inode_info *ei)
+ {
+ 	unsigned int flags = ei->vfs_inode.i_flags;
++	unsigned int vflags = ei->vfs_inode.i_vflags;
++
++	ei->i_flags &= ~(EXT3_SYNC_FL | EXT3_APPEND_FL |
++			EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL |
++			EXT3_NOATIME_FL | EXT3_DIRSYNC_FL |
++			EXT3_BARRIER_FL | EXT3_COW_FL);
++
++	if (flags & S_IMMUTABLE)
++		ei->i_flags |= EXT3_IMMUTABLE_FL;
++	if (flags & S_IXUNLINK)
++		ei->i_flags |= EXT3_IXUNLINK_FL;
+ 
+-	ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
+-			EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
+ 	if (flags & S_SYNC)
+ 		ei->i_flags |= EXT3_SYNC_FL;
+ 	if (flags & S_APPEND)
+ 		ei->i_flags |= EXT3_APPEND_FL;
+-	if (flags & S_IMMUTABLE)
+-		ei->i_flags |= EXT3_IMMUTABLE_FL;
+ 	if (flags & S_NOATIME)
+ 		ei->i_flags |= EXT3_NOATIME_FL;
+ 	if (flags & S_DIRSYNC)
+ 		ei->i_flags |= EXT3_DIRSYNC_FL;
++
++	if (vflags & V_BARRIER)
++		ei->i_flags |= EXT3_BARRIER_FL;
++	if (vflags & V_COW)
++		ei->i_flags |= EXT3_COW_FL;
+ }
+ 
+ struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
+@@ -2771,6 +2796,8 @@ struct inode *ext3_iget(struct super_blo
+ 	transaction_t *transaction;
+ 	long ret;
+ 	int block;
++	uid_t uid;
++	gid_t gid;
+ 
+ 	inode = iget_locked(sb, ino);
+ 	if (!inode)
+@@ -2787,12 +2814,17 @@ struct inode *ext3_iget(struct super_blo
+ 	bh = iloc.bh;
+ 	raw_inode = ext3_raw_inode(&iloc);
+ 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
+-	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+-	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
++	uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
++	gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+ 	if(!(test_opt (inode->i_sb, NO_UID32))) {
+-		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+-		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
++		uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
++		gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+ 	}
++	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
++		le16_to_cpu(raw_inode->i_raw_tag));
++
+ 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
+ 	inode->i_size = le32_to_cpu(raw_inode->i_size);
+ 	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
+@@ -2947,6 +2979,8 @@ static int ext3_do_update_inode(handle_t
+ 	struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
+ 	struct ext3_inode_info *ei = EXT3_I(inode);
+ 	struct buffer_head *bh = iloc->bh;
++	uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
++	gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
+ 	int err = 0, rc, block;
+ 
+ again:
+@@ -2961,29 +2995,32 @@ again:
+ 	ext3_get_inode_flags(ei);
+ 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
+ 	if(!(test_opt(inode->i_sb, NO_UID32))) {
+-		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
+-		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
++		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
++		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
+ /*
+  * Fix up interoperability with old kernels. Otherwise, old inodes get
+  * re-used with the upper 16 bits of the uid/gid intact
+  */
+ 		if(!ei->i_dtime) {
+ 			raw_inode->i_uid_high =
+-				cpu_to_le16(high_16_bits(inode->i_uid));
++				cpu_to_le16(high_16_bits(uid));
+ 			raw_inode->i_gid_high =
+-				cpu_to_le16(high_16_bits(inode->i_gid));
++				cpu_to_le16(high_16_bits(gid));
+ 		} else {
+ 			raw_inode->i_uid_high = 0;
+ 			raw_inode->i_gid_high = 0;
+ 		}
+ 	} else {
+ 		raw_inode->i_uid_low =
+-			cpu_to_le16(fs_high2lowuid(inode->i_uid));
++			cpu_to_le16(fs_high2lowuid(uid));
+ 		raw_inode->i_gid_low =
+-			cpu_to_le16(fs_high2lowgid(inode->i_gid));
++			cpu_to_le16(fs_high2lowgid(gid));
+ 		raw_inode->i_uid_high = 0;
+ 		raw_inode->i_gid_high = 0;
+ 	}
++#ifdef CONFIG_TAGGING_INTERN
++	raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
++#endif
+ 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+ 	raw_inode->i_size = cpu_to_le32(ei->i_disksize);
+ 	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
+@@ -3141,7 +3178,8 @@ int ext3_setattr(struct dentry *dentry, 
+ 		return error;
+ 
+ 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+-		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
++		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
++		(ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
+ 		handle_t *handle;
+ 
+ 		/* (user+group)*(old+new) structure, inode write (sb,
+@@ -3163,6 +3201,8 @@ int ext3_setattr(struct dentry *dentry, 
+ 			inode->i_uid = attr->ia_uid;
+ 		if (attr->ia_valid & ATTR_GID)
+ 			inode->i_gid = attr->ia_gid;
++		if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
++			inode->i_tag = attr->ia_tag;
+ 		error = ext3_mark_inode_dirty(handle, inode);
+ 		ext3_journal_stop(handle);
+ 	}
+--- a/fs/ext3/ioctl.c	2009-06-11 17:13:03.000000000 +0200
++++ a/fs/ext3/ioctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -8,6 +8,7 @@
+  */
+ 
+ #include <linux/fs.h>
++#include <linux/mount.h>
+ #include <linux/jbd.h>
+ #include <linux/capability.h>
+ #include <linux/ext3_fs.h>
+@@ -17,6 +18,34 @@
+ #include <linux/compat.h>
+ #include <asm/uaccess.h>
+ 
++
++int ext3_sync_flags(struct inode *inode, int flags, int vflags)
++{
++	handle_t *handle = NULL;
++	struct ext3_iloc iloc;
++	int err;
++
++	handle = ext3_journal_start(inode, 1);
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++
++	if (IS_SYNC(inode))
++		handle->h_sync = 1;
++	err = ext3_reserve_inode_write(handle, inode, &iloc);
++	if (err)
++		goto flags_err;
++
++	inode->i_flags = flags;
++	inode->i_vflags = vflags;
++	ext3_get_inode_flags(EXT3_I(inode));
++	inode->i_ctime = CURRENT_TIME_SEC;
++
++	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
++flags_err:
++	ext3_journal_stop(handle);
++	return err;
++}
++
+ long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ 	struct inode *inode = filp->f_dentry->d_inode;
+@@ -50,6 +79,11 @@ long ext3_ioctl(struct file *filp, unsig
+ 
+ 		flags = ext3_mask_flags(inode->i_mode, flags);
+ 
++		if (IS_BARRIER(inode)) {
++			vxwprintk_task(1, "messing with the barrier.");
++			return -EACCES;
++		}
++
+ 		mutex_lock(&inode->i_mutex);
+ 
+ 		/* Is it quota file? Do not allow user to mess with it */
+@@ -68,7 +102,9 @@ long ext3_ioctl(struct file *filp, unsig
+ 		 *
+ 		 * This test looks nicer. Thanks to Pauline Middelink
+ 		 */
+-		if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
++		if ((oldflags & EXT3_IMMUTABLE_FL) ||
++			((flags ^ oldflags) & (EXT3_APPEND_FL |
++			EXT3_IMMUTABLE_FL | EXT3_IXUNLINK_FL))) {
+ 			if (!capable(CAP_LINUX_IMMUTABLE))
+ 				goto flags_out;
+ 		}
+@@ -93,7 +129,7 @@ long ext3_ioctl(struct file *filp, unsig
+ 		if (err)
+ 			goto flags_err;
+ 
+-		flags = flags & EXT3_FL_USER_MODIFIABLE;
++		flags &= EXT3_FL_USER_MODIFIABLE;
+ 		flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
+ 		ei->i_flags = flags;
+ 
+--- a/fs/ext3/namei.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext3/namei.c	2011-06-10 13:03:02.000000000 +0200
+@@ -36,6 +36,7 @@
+ #include <linux/quotaops.h>
+ #include <linux/buffer_head.h>
+ #include <linux/bio.h>
++#include <linux/vs_tag.h>
+ 
+ #include "namei.h"
+ #include "xattr.h"
+@@ -912,6 +913,7 @@ restart:
+ 				if (bh)
+ 					ll_rw_block(READ_META, 1, &bh);
+ 			}
++		dx_propagate_tag(nd, inode);
+ 		}
+ 		if ((bh = bh_use[ra_ptr++]) == NULL)
+ 			goto next;
+@@ -2446,6 +2448,7 @@ const struct inode_operations ext3_dir_i
+ 	.removexattr	= generic_removexattr,
+ #endif
+ 	.check_acl	= ext3_check_acl,
++	.sync_flags	= ext3_sync_flags,
+ };
+ 
+ const struct inode_operations ext3_special_inode_operations = {
+--- a/fs/ext3/super.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext3/super.c	2011-06-10 13:03:02.000000000 +0200
+@@ -789,7 +789,7 @@ enum {
+ 	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
+ 	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
+ 	Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
+-	Opt_grpquota
++	Opt_grpquota, Opt_tag, Opt_notag, Opt_tagid
+ };
+ 
+ static const match_table_t tokens = {
+@@ -842,6 +842,9 @@ static const match_table_t tokens = {
+ 	{Opt_usrquota, "usrquota"},
+ 	{Opt_barrier, "barrier=%u"},
+ 	{Opt_resize, "resize"},
++	{Opt_tag, "tag"},
++	{Opt_notag, "notag"},
++	{Opt_tagid, "tagid=%u"},
+ 	{Opt_err, NULL},
+ };
+ 
+@@ -934,6 +937,20 @@ static int parse_options (char *options,
+ 		case Opt_nouid32:
+ 			set_opt (sbi->s_mount_opt, NO_UID32);
+ 			break;
++#ifndef CONFIG_TAGGING_NONE
++		case Opt_tag:
++			set_opt (sbi->s_mount_opt, TAGGED);
++			break;
++		case Opt_notag:
++			clear_opt (sbi->s_mount_opt, TAGGED);
++			break;
++#endif
++#ifdef CONFIG_PROPAGATE
++		case Opt_tagid:
++			/* use args[0] */
++			set_opt (sbi->s_mount_opt, TAGGED);
++			break;
++#endif
+ 		case Opt_nocheck:
+ 			clear_opt (sbi->s_mount_opt, CHECK);
+ 			break;
+@@ -1665,6 +1682,9 @@ static int ext3_fill_super (struct super
+ 			    NULL, 0))
+ 		goto failed_mount;
+ 
++	if (EXT3_SB(sb)->s_mount_opt & EXT3_MOUNT_TAGGED)
++		sb->s_flags |= MS_TAGGED;
++
+ 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ 		((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ 
+@@ -2534,6 +2554,14 @@ static int ext3_remount (struct super_bl
+ 	if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
+ 		ext3_abort(sb, __func__, "Abort forced by user");
+ 
++	if ((sbi->s_mount_opt & EXT3_MOUNT_TAGGED) &&
++		!(sb->s_flags & MS_TAGGED)) {
++		printk("EXT3-fs: %s: tagging not permitted on remount.\n",
++			sb->s_id);
++		err = -EINVAL;
++		goto restore_opts;
++	}
++
+ 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ 		((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ 
+--- a/fs/ext4/ext4.h	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext4/ext4.h	2011-06-10 13:03:02.000000000 +0200
+@@ -289,8 +289,12 @@ struct flex_groups {
+ #define EXT4_EXTENTS_FL			0x00080000 /* Inode uses extents */
+ #define EXT4_EA_INODE_FL	        0x00200000 /* Inode used for large EA */
+ #define EXT4_EOFBLOCKS_FL		0x00400000 /* Blocks allocated beyond EOF */
++#define EXT4_IXUNLINK_FL		0x08000000 /* Immutable invert on unlink */
+ #define EXT4_RESERVED_FL		0x80000000 /* reserved for ext4 lib */
+ 
++#define EXT4_BARRIER_FL			0x04000000 /* Barrier for chroot() */
++#define EXT4_COW_FL			0x20000000 /* Copy on Write marker */
++
+ #define EXT4_FL_USER_VISIBLE		0x004BDFFF /* User visible flags */
+ #define EXT4_FL_USER_MODIFIABLE		0x004B80FF /* User modifiable flags */
+ 
+@@ -552,7 +556,8 @@ struct ext4_inode {
+ 			__le16	l_i_file_acl_high;
+ 			__le16	l_i_uid_high;	/* these 2 fields */
+ 			__le16	l_i_gid_high;	/* were reserved2[0] */
+-			__u32	l_i_reserved2;
++			__le16	l_i_tag;	/* Context Tag */
++			__u16	l_i_reserved2;
+ 		} linux2;
+ 		struct {
+ 			__le16	h_i_reserved1;	/* Obsoleted fragment number/size which are removed in ext4 */
+@@ -666,6 +671,7 @@ do {									       \
+ #define i_gid_low	i_gid
+ #define i_uid_high	osd2.linux2.l_i_uid_high
+ #define i_gid_high	osd2.linux2.l_i_gid_high
++#define i_raw_tag	osd2.linux2.l_i_tag
+ #define i_reserved2	osd2.linux2.l_i_reserved2
+ 
+ #elif defined(__GNU__)
+@@ -840,6 +846,7 @@ struct ext4_inode_info {
+ #define EXT4_MOUNT_QUOTA		0x80000 /* Some quota option set */
+ #define EXT4_MOUNT_USRQUOTA		0x100000 /* "old" user quota */
+ #define EXT4_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
++#define EXT4_MOUNT_TAGGED		0x400000 /* Enable Context Tags */
+ #define EXT4_MOUNT_JOURNAL_CHECKSUM	0x800000 /* Journal checksums */
+ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT	0x1000000 /* Journal Async Commit */
+ #define EXT4_MOUNT_I_VERSION            0x2000000 /* i_version support */
+@@ -1865,6 +1872,7 @@ extern int ext4_get_blocks(handle_t *han
+ 			   struct buffer_head *bh, int flags);
+ extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 			__u64 start, __u64 len);
++extern int ext4_sync_flags(struct inode *, int, int);
+ /* move_extent.c */
+ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
+ 			     __u64 start_orig, __u64 start_donor,
+--- a/fs/ext4/file.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext4/file.c	2011-06-10 13:03:02.000000000 +0200
+@@ -161,5 +161,6 @@ const struct inode_operations ext4_file_
+ 	.check_acl	= ext4_check_acl,
+ 	.fallocate	= ext4_fallocate,
+ 	.fiemap		= ext4_fiemap,
++	.sync_flags	= ext4_sync_flags,
+ };
+ 
+--- a/fs/ext4/ialloc.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext4/ialloc.c	2011-06-10 13:03:02.000000000 +0200
+@@ -22,6 +22,7 @@
+ #include <linux/random.h>
+ #include <linux/bitops.h>
+ #include <linux/blkdev.h>
++#include <linux/vs_tag.h>
+ #include <asm/byteorder.h>
+ 
+ #include "ext4.h"
+@@ -988,6 +989,7 @@ got:
+ 	} else
+ 		inode->i_gid = current_fsgid();
+ 	inode->i_mode = mode;
++	inode->i_tag = dx_current_fstag(sb);
+ 
+ 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
+ 	/* This is the optimal IO size (for stat), not the fs block size */
+--- a/fs/ext4/inode.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext4/inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -38,6 +38,7 @@
+ #include <linux/uio.h>
+ #include <linux/bio.h>
+ #include <linux/workqueue.h>
++#include <linux/vs_tag.h>
+ 
+ #include "ext4_jbd2.h"
+ #include "xattr.h"
+@@ -4446,7 +4447,7 @@ static void ext4_free_branches(handle_t 
+ 
+ int ext4_can_truncate(struct inode *inode)
+ {
+-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
++	if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
+ 		return 0;
+ 	if (S_ISREG(inode->i_mode))
+ 		return 1;
+@@ -4799,41 +4800,64 @@ void ext4_set_inode_flags(struct inode *
+ {
+ 	unsigned int flags = EXT4_I(inode)->i_flags;
+ 
+-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
++	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
++
++	if (flags & EXT4_IMMUTABLE_FL)
++		inode->i_flags |= S_IMMUTABLE;
++	if (flags & EXT4_IXUNLINK_FL)
++		inode->i_flags |= S_IXUNLINK;
++
+ 	if (flags & EXT4_SYNC_FL)
+ 		inode->i_flags |= S_SYNC;
+ 	if (flags & EXT4_APPEND_FL)
+ 		inode->i_flags |= S_APPEND;
+-	if (flags & EXT4_IMMUTABLE_FL)
+-		inode->i_flags |= S_IMMUTABLE;
+ 	if (flags & EXT4_NOATIME_FL)
+ 		inode->i_flags |= S_NOATIME;
+ 	if (flags & EXT4_DIRSYNC_FL)
+ 		inode->i_flags |= S_DIRSYNC;
++
++	inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++	if (flags & EXT4_BARRIER_FL)
++		inode->i_vflags |= V_BARRIER;
++	if (flags & EXT4_COW_FL)
++		inode->i_vflags |= V_COW;
+ }
+ 
+ /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
+ void ext4_get_inode_flags(struct ext4_inode_info *ei)
+ {
+-	unsigned int vfs_fl;
++	unsigned int vfs_fl, vflags;
+ 	unsigned long old_fl, new_fl;
+ 
+ 	do {
+ 		vfs_fl = ei->vfs_inode.i_flags;
++		vflags = ei->vfs_inode.i_vflags;
+ 		old_fl = ei->i_flags;
+-		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
+-				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
+-				EXT4_DIRSYNC_FL);
++		new_fl = old_fl & ~(EXT4_SYNC_FL | EXT4_APPEND_FL |
++				EXT4_IMMUTABLE_FL | EXT4_IXUNLINK_FL |
++				EXT4_NOATIME_FL | EXT4_DIRSYNC_FL |
++				EXT4_BARRIER_FL | EXT4_COW_FL);
++
++		if (vfs_fl & S_IMMUTABLE)
++			new_fl |= EXT4_IMMUTABLE_FL;
++		if (vfs_fl & S_IXUNLINK)
++			new_fl |= EXT4_IXUNLINK_FL;
++
+ 		if (vfs_fl & S_SYNC)
+ 			new_fl |= EXT4_SYNC_FL;
+ 		if (vfs_fl & S_APPEND)
+ 			new_fl |= EXT4_APPEND_FL;
+-		if (vfs_fl & S_IMMUTABLE)
+-			new_fl |= EXT4_IMMUTABLE_FL;
+ 		if (vfs_fl & S_NOATIME)
+ 			new_fl |= EXT4_NOATIME_FL;
+ 		if (vfs_fl & S_DIRSYNC)
+ 			new_fl |= EXT4_DIRSYNC_FL;
++
++		if (vflags & V_BARRIER)
++			new_fl |= EXT4_BARRIER_FL;
++		if (vflags & V_COW)
++			new_fl |= EXT4_COW_FL;
+ 	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
+ }
+ 
+@@ -4869,6 +4893,8 @@ struct inode *ext4_iget(struct super_blo
+ 	journal_t *journal = EXT4_SB(sb)->s_journal;
+ 	long ret;
+ 	int block;
++	uid_t uid;
++	gid_t gid;
+ 
+ 	inode = iget_locked(sb, ino);
+ 	if (!inode)
+@@ -4884,12 +4910,16 @@ struct inode *ext4_iget(struct super_blo
+ 		goto bad_inode;
+ 	raw_inode = ext4_raw_inode(&iloc);
+ 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
+-	inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+-	inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
++	uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
++	gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+ 	if (!(test_opt(inode->i_sb, NO_UID32))) {
+-		inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+-		inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
++		uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
++		gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+ 	}
++	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
++		le16_to_cpu(raw_inode->i_raw_tag));
+ 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
+ 
+ 	ei->i_state_flags = 0;
+@@ -5111,6 +5141,8 @@ static int ext4_do_update_inode(handle_t
+ 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
+ 	struct ext4_inode_info *ei = EXT4_I(inode);
+ 	struct buffer_head *bh = iloc->bh;
++	uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
++	gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
+ 	int err = 0, rc, block;
+ 
+ 	/* For fields not not tracking in the in-memory inode,
+@@ -5121,29 +5153,32 @@ static int ext4_do_update_inode(handle_t
+ 	ext4_get_inode_flags(ei);
+ 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
+ 	if (!(test_opt(inode->i_sb, NO_UID32))) {
+-		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
+-		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
++		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
++		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
+ /*
+  * Fix up interoperability with old kernels. Otherwise, old inodes get
+  * re-used with the upper 16 bits of the uid/gid intact
+  */
+ 		if (!ei->i_dtime) {
+ 			raw_inode->i_uid_high =
+-				cpu_to_le16(high_16_bits(inode->i_uid));
++				cpu_to_le16(high_16_bits(uid));
+ 			raw_inode->i_gid_high =
+-				cpu_to_le16(high_16_bits(inode->i_gid));
++				cpu_to_le16(high_16_bits(gid));
+ 		} else {
+ 			raw_inode->i_uid_high = 0;
+ 			raw_inode->i_gid_high = 0;
+ 		}
+ 	} else {
+ 		raw_inode->i_uid_low =
+-			cpu_to_le16(fs_high2lowuid(inode->i_uid));
++			cpu_to_le16(fs_high2lowuid(uid));
+ 		raw_inode->i_gid_low =
+-			cpu_to_le16(fs_high2lowgid(inode->i_gid));
++			cpu_to_le16(fs_high2lowgid(gid));
+ 		raw_inode->i_uid_high = 0;
+ 		raw_inode->i_gid_high = 0;
+ 	}
++#ifdef CONFIG_TAGGING_INTERN
++	raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
++#endif
+ 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+ 
+ 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
+@@ -5329,7 +5364,8 @@ int ext4_setattr(struct dentry *dentry, 
+ 		return error;
+ 
+ 	if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+-		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
++		(ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
++		(ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
+ 		handle_t *handle;
+ 
+ 		/* (user+group)*(old+new) structure, inode write (sb,
+@@ -5351,6 +5387,8 @@ int ext4_setattr(struct dentry *dentry, 
+ 			inode->i_uid = attr->ia_uid;
+ 		if (attr->ia_valid & ATTR_GID)
+ 			inode->i_gid = attr->ia_gid;
++		if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
++			inode->i_tag = attr->ia_tag;
+ 		error = ext4_mark_inode_dirty(handle, inode);
+ 		ext4_journal_stop(handle);
+ 	}
+--- a/fs/ext4/ioctl.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext4/ioctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -14,10 +14,39 @@
+ #include <linux/compat.h>
+ #include <linux/mount.h>
+ #include <linux/file.h>
++#include <linux/vs_tag.h>
+ #include <asm/uaccess.h>
+ #include "ext4_jbd2.h"
+ #include "ext4.h"
+ 
++
++int ext4_sync_flags(struct inode *inode, int flags, int vflags)
++{
++	handle_t *handle = NULL;
++	struct ext4_iloc iloc;
++	int err;
++
++	handle = ext4_journal_start(inode, 1);
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++
++	if (IS_SYNC(inode))
++		ext4_handle_sync(handle);
++	err = ext4_reserve_inode_write(handle, inode, &iloc);
++	if (err)
++		goto flags_err;
++
++	inode->i_flags = flags;
++	inode->i_vflags = vflags;
++	ext4_get_inode_flags(EXT4_I(inode));
++	inode->i_ctime = ext4_current_time(inode);
++
++	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
++flags_err:
++	ext4_journal_stop(handle);
++	return err;
++}
++
+ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ 	struct inode *inode = filp->f_dentry->d_inode;
+@@ -50,6 +79,11 @@ long ext4_ioctl(struct file *filp, unsig
+ 
+ 		flags = ext4_mask_flags(inode->i_mode, flags);
+ 
++		if (IS_BARRIER(inode)) {
++			vxwprintk_task(1, "messing with the barrier.");
++			return -EACCES;
++		}
++
+ 		err = -EPERM;
+ 		mutex_lock(&inode->i_mutex);
+ 		/* Is it quota file? Do not allow user to mess with it */
+@@ -67,7 +101,9 @@ long ext4_ioctl(struct file *filp, unsig
+ 		 *
+ 		 * This test looks nicer. Thanks to Pauline Middelink
+ 		 */
+-		if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) {
++		if ((oldflags & EXT4_IMMUTABLE_FL) ||
++			((flags ^ oldflags) & (EXT4_APPEND_FL |
++			EXT4_IMMUTABLE_FL | EXT4_IXUNLINK_FL))) {
+ 			if (!capable(CAP_LINUX_IMMUTABLE))
+ 				goto flags_out;
+ 		}
+--- a/fs/ext4/namei.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext4/namei.c	2011-06-10 13:03:02.000000000 +0200
+@@ -34,6 +34,7 @@
+ #include <linux/quotaops.h>
+ #include <linux/buffer_head.h>
+ #include <linux/bio.h>
++#include <linux/vs_tag.h>
+ #include "ext4.h"
+ #include "ext4_jbd2.h"
+ 
+@@ -941,6 +942,7 @@ restart:
+ 				if (bh)
+ 					ll_rw_block(READ_META, 1, &bh);
+ 			}
++		dx_propagate_tag(nd, inode);
+ 		}
+ 		if ((bh = bh_use[ra_ptr++]) == NULL)
+ 			goto next;
+@@ -2543,6 +2545,7 @@ const struct inode_operations ext4_dir_i
+ #endif
+ 	.check_acl	= ext4_check_acl,
+ 	.fiemap         = ext4_fiemap,
++	.sync_flags	= ext4_sync_flags,
+ };
+ 
+ const struct inode_operations ext4_special_inode_operations = {
+--- a/fs/ext4/super.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/ext4/super.c	2011-06-10 13:03:02.000000000 +0200
+@@ -1100,6 +1100,7 @@ enum {
+ 	Opt_block_validity, Opt_noblock_validity,
+ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
+ 	Opt_discard, Opt_nodiscard,
++	Opt_tag, Opt_notag, Opt_tagid
+ };
+ 
+ static const match_table_t tokens = {
+@@ -1167,6 +1168,9 @@ static const match_table_t tokens = {
+ 	{Opt_noauto_da_alloc, "noauto_da_alloc"},
+ 	{Opt_discard, "discard"},
+ 	{Opt_nodiscard, "nodiscard"},
++	{Opt_tag, "tag"},
++	{Opt_notag, "notag"},
++	{Opt_tagid, "tagid=%u"},
+ 	{Opt_err, NULL},
+ };
+ 
+@@ -1269,6 +1273,20 @@ static int parse_options(char *options, 
+ 		case Opt_nouid32:
+ 			set_opt(sbi->s_mount_opt, NO_UID32);
+ 			break;
++#ifndef CONFIG_TAGGING_NONE
++		case Opt_tag:
++			set_opt (sbi->s_mount_opt, TAGGED);
++			break;
++		case Opt_notag:
++			clear_opt (sbi->s_mount_opt, TAGGED);
++			break;
++#endif
++#ifdef CONFIG_PROPAGATE
++		case Opt_tagid:
++			/* use args[0] */
++			set_opt (sbi->s_mount_opt, TAGGED);
++			break;
++#endif
+ 		case Opt_debug:
+ 			set_opt(sbi->s_mount_opt, DEBUG);
+ 			break;
+@@ -2471,6 +2489,9 @@ static int ext4_fill_super(struct super_
+ 			   &journal_ioprio, NULL, 0))
+ 		goto failed_mount;
+ 
++	if (EXT4_SB(sb)->s_mount_opt & EXT4_MOUNT_TAGGED)
++		sb->s_flags |= MS_TAGGED;
++
+ 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ 		((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ 
+@@ -3522,6 +3543,14 @@ static int ext4_remount(struct super_blo
+ 	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
+ 		ext4_abort(sb, __func__, "Abort forced by user");
+ 
++	if ((sbi->s_mount_opt & EXT4_MOUNT_TAGGED) &&
++		!(sb->s_flags & MS_TAGGED)) {
++		printk("EXT4-fs: %s: tagging not permitted on remount.\n",
++			sb->s_id);
++		err = -EINVAL;
++		goto restore_opts;
++	}
++
+ 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ 		((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ 
+--- a/fs/fcntl.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/fcntl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -19,6 +19,7 @@
+ #include <linux/signal.h>
+ #include <linux/rcupdate.h>
+ #include <linux/pid_namespace.h>
++#include <linux/vs_limit.h>
+ 
+ #include <asm/poll.h>
+ #include <asm/siginfo.h>
+@@ -102,6 +103,8 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldf
+ 
+ 	if (tofree)
+ 		filp_close(tofree, files);
++	else
++		vx_openfd_inc(newfd);	/* fd was unused */
+ 
+ 	return newfd;
+ 
+@@ -426,6 +429,8 @@ SYSCALL_DEFINE3(fcntl, unsigned int, fd,
+ 	filp = fget(fd);
+ 	if (!filp)
+ 		goto out;
++	if (!vx_files_avail(1))
++		goto out;
+ 
+ 	err = security_file_fcntl(filp, cmd, arg);
+ 	if (err) {
+--- a/fs/file.c	2009-12-03 20:02:51.000000000 +0100
++++ a/fs/file.c	2011-06-10 13:03:02.000000000 +0200
+@@ -20,6 +20,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/rcupdate.h>
+ #include <linux/workqueue.h>
++#include <linux/vs_limit.h>
+ 
+ struct fdtable_defer {
+ 	spinlock_t lock;
+@@ -368,6 +369,8 @@ struct files_struct *dup_fd(struct files
+ 		struct file *f = *old_fds++;
+ 		if (f) {
+ 			get_file(f);
++			/* TODO: sum it first for check and performance */
++			vx_openfd_inc(open_files - i);
+ 		} else {
+ 			/*
+ 			 * The fd may be claimed in the fd bitmap but not yet
+@@ -476,6 +479,7 @@ repeat:
+ 	else
+ 		FD_CLR(fd, fdt->close_on_exec);
+ 	error = fd;
++	vx_openfd_inc(fd);
+ #if 1
+ 	/* Sanity check */
+ 	if (rcu_dereference(fdt->fd[fd]) != NULL) {
+--- a/fs/file_table.c	2011-05-29 23:42:26.000000000 +0200
++++ a/fs/file_table.c	2011-06-10 13:03:02.000000000 +0200
+@@ -22,6 +22,8 @@
+ #include <linux/fsnotify.h>
+ #include <linux/sysctl.h>
+ #include <linux/percpu_counter.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_context.h>
+ 
+ #include <asm/atomic.h>
+ 
+@@ -131,6 +133,8 @@ struct file *get_empty_filp(void)
+ 	spin_lock_init(&f->f_lock);
+ 	eventpoll_init_file(f);
+ 	/* f->f_version: 0 */
++	f->f_xid = vx_current_xid();
++	vx_files_inc(f);
+ 	return f;
+ 
+ over:
+@@ -285,6 +289,8 @@ void __fput(struct file *file)
+ 		cdev_put(inode->i_cdev);
+ 	fops_put(file->f_op);
+ 	put_pid(file->f_owner.pid);
++	vx_files_dec(file);
++	file->f_xid = 0;
+ 	file_kill(file);
+ 	if (file->f_mode & FMODE_WRITE)
+ 		drop_file_write_access(file);
+@@ -352,6 +358,8 @@ void put_filp(struct file *file)
+ {
+ 	if (atomic_long_dec_and_test(&file->f_count)) {
+ 		security_file_free(file);
++		vx_files_dec(file);
++		file->f_xid = 0;
+ 		file_kill(file);
+ 		file_free(file);
+ 	}
+--- a/fs/fs_struct.c	2009-06-11 17:13:04.000000000 +0200
++++ a/fs/fs_struct.c	2011-06-10 13:03:02.000000000 +0200
+@@ -4,6 +4,7 @@
+ #include <linux/path.h>
+ #include <linux/slab.h>
+ #include <linux/fs_struct.h>
++#include <linux/vserver/global.h>
+ 
+ /*
+  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
+@@ -77,6 +78,7 @@ void free_fs_struct(struct fs_struct *fs
+ {
+ 	path_put(&fs->root);
+ 	path_put(&fs->pwd);
++	atomic_dec(&vs_global_fs);
+ 	kmem_cache_free(fs_cachep, fs);
+ }
+ 
+@@ -112,6 +114,7 @@ struct fs_struct *copy_fs_struct(struct 
+ 		fs->pwd = old->pwd;
+ 		path_get(&old->pwd);
+ 		read_unlock(&old->lock);
++		atomic_inc(&vs_global_fs);
+ 	}
+ 	return fs;
+ }
+--- a/fs/gfs2/file.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/gfs2/file.c	2011-06-10 13:03:02.000000000 +0200
+@@ -132,6 +132,9 @@ static const u32 fsflags_to_gfs2[32] = {
+ 	[7] = GFS2_DIF_NOATIME,
+ 	[12] = GFS2_DIF_EXHASH,
+ 	[14] = GFS2_DIF_INHERIT_JDATA,
++	[27] = GFS2_DIF_IXUNLINK,
++	[26] = GFS2_DIF_BARRIER,
++	[29] = GFS2_DIF_COW,
+ };
+ 
+ static const u32 gfs2_to_fsflags[32] = {
+@@ -141,6 +144,9 @@ static const u32 gfs2_to_fsflags[32] = {
+ 	[gfs2fl_NoAtime] = FS_NOATIME_FL,
+ 	[gfs2fl_ExHash] = FS_INDEX_FL,
+ 	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
++	[gfs2fl_IXUnlink] = FS_IXUNLINK_FL,
++	[gfs2fl_Barrier] = FS_BARRIER_FL,
++	[gfs2fl_Cow] = FS_COW_FL,
+ };
+ 
+ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
+@@ -171,10 +177,16 @@ void gfs2_set_inode_flags(struct inode *
+ {
+ 	struct gfs2_inode *ip = GFS2_I(inode);
+ 	unsigned int flags = inode->i_flags;
++	unsigned int vflags = inode->i_vflags;
++
++	flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+ 
+-	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ 	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
+ 		flags |= S_IMMUTABLE;
++	if (ip->i_diskflags & GFS2_DIF_IXUNLINK)
++		flags |= S_IXUNLINK;
++
+ 	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
+ 		flags |= S_APPEND;
+ 	if (ip->i_diskflags & GFS2_DIF_NOATIME)
+@@ -182,6 +194,43 @@ void gfs2_set_inode_flags(struct inode *
+ 	if (ip->i_diskflags & GFS2_DIF_SYNC)
+ 		flags |= S_SYNC;
+ 	inode->i_flags = flags;
++
++	vflags &= ~(V_BARRIER | V_COW);
++
++	if (ip->i_diskflags & GFS2_DIF_BARRIER)
++		vflags |= V_BARRIER;
++	if (ip->i_diskflags & GFS2_DIF_COW)
++		vflags |= V_COW;
++	inode->i_vflags = vflags;
++}
++
++void gfs2_get_inode_flags(struct inode *inode)
++{
++	struct gfs2_inode *ip = GFS2_I(inode);
++	unsigned int flags = inode->i_flags;
++	unsigned int vflags = inode->i_vflags;
++
++	ip->i_diskflags &= ~(GFS2_DIF_APPENDONLY |
++			GFS2_DIF_NOATIME | GFS2_DIF_SYNC |
++			GFS2_DIF_IMMUTABLE | GFS2_DIF_IXUNLINK |
++			GFS2_DIF_BARRIER | GFS2_DIF_COW);
++
++	if (flags & S_IMMUTABLE)
++		ip->i_diskflags |= GFS2_DIF_IMMUTABLE;
++	if (flags & S_IXUNLINK)
++		ip->i_diskflags |= GFS2_DIF_IXUNLINK;
++
++	if (flags & S_APPEND)
++		ip->i_diskflags |= GFS2_DIF_APPENDONLY;
++	if (flags & S_NOATIME)
++		ip->i_diskflags |= GFS2_DIF_NOATIME;
++	if (flags & S_SYNC)
++		ip->i_diskflags |= GFS2_DIF_SYNC;
++
++	if (vflags & V_BARRIER)
++		ip->i_diskflags |= GFS2_DIF_BARRIER;
++	if (vflags & V_COW)
++		ip->i_diskflags |= GFS2_DIF_COW;
+ }
+ 
+ /* Flags that can be set by user space */
+@@ -293,6 +342,37 @@ static int gfs2_set_flags(struct file *f
+ 	return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
+ }
+ 
++int gfs2_sync_flags(struct inode *inode, int flags, int vflags)
++{
++	struct gfs2_inode *ip = GFS2_I(inode);
++	struct gfs2_sbd *sdp = GFS2_SB(inode);
++	struct buffer_head *bh;
++	struct gfs2_holder gh;
++	int error;
++
++	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
++	if (error)
++		return error;
++	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
++	if (error)
++		goto out;
++	error = gfs2_meta_inode_buffer(ip, &bh);
++	if (error)
++		goto out_trans_end;
++	gfs2_trans_add_bh(ip->i_gl, bh, 1);
++	inode->i_flags = flags;
++	inode->i_vflags = vflags;
++	gfs2_get_inode_flags(inode);
++	gfs2_dinode_out(ip, bh->b_data);
++	brelse(bh);
++	gfs2_set_aops(inode);
++out_trans_end:
++	gfs2_trans_end(sdp);
++out:
++	gfs2_glock_dq_uninit(&gh);
++	return error;
++}
++
+ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ 	switch(cmd) {
+--- a/fs/gfs2/inode.h	2009-09-10 15:26:22.000000000 +0200
++++ a/fs/gfs2/inode.h	2011-06-10 13:03:02.000000000 +0200
+@@ -109,6 +109,7 @@ extern const struct file_operations gfs2
+ extern const struct file_operations gfs2_dir_fops_nolock;
+ 
+ extern void gfs2_set_inode_flags(struct inode *inode);
++extern int gfs2_sync_flags(struct inode *inode, int flags, int vflags);
+  
+ #ifdef CONFIG_GFS2_FS_LOCKING_DLM
+ extern const struct file_operations gfs2_file_fops;
+--- a/fs/gfs2/ops_inode.c	2009-12-03 20:02:52.000000000 +0100
++++ a/fs/gfs2/ops_inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -1400,6 +1400,7 @@ const struct inode_operations gfs2_file_
+ 	.listxattr = gfs2_listxattr,
+ 	.removexattr = gfs2_removexattr,
+ 	.fiemap = gfs2_fiemap,
++	.sync_flags = gfs2_sync_flags,
+ };
+ 
+ const struct inode_operations gfs2_dir_iops = {
+@@ -1420,6 +1421,7 @@ const struct inode_operations gfs2_dir_i
+ 	.listxattr = gfs2_listxattr,
+ 	.removexattr = gfs2_removexattr,
+ 	.fiemap = gfs2_fiemap,
++	.sync_flags = gfs2_sync_flags,
+ };
+ 
+ const struct inode_operations gfs2_symlink_iops = {
+--- a/fs/hfsplus/ioctl.c	2008-12-25 00:26:37.000000000 +0100
++++ a/fs/hfsplus/ioctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -17,6 +17,7 @@
+ #include <linux/mount.h>
+ #include <linux/sched.h>
+ #include <linux/xattr.h>
++#include <linux/mount.h>
+ #include <asm/uaccess.h>
+ #include "hfsplus_fs.h"
+ 
+--- a/fs/inode.c	2009-12-03 20:02:52.000000000 +0100
++++ a/fs/inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -133,6 +133,9 @@ int inode_init_always(struct super_block
+ 	struct address_space *const mapping = &inode->i_data;
+ 
+ 	inode->i_sb = sb;
++
++	/* essential because of inode slab reuse */
++	inode->i_tag = 0;
+ 	inode->i_blkbits = sb->s_blocksize_bits;
+ 	inode->i_flags = 0;
+ 	atomic_set(&inode->i_count, 1);
+@@ -153,6 +156,7 @@ int inode_init_always(struct super_block
+ 	inode->i_bdev = NULL;
+ 	inode->i_cdev = NULL;
+ 	inode->i_rdev = 0;
++	inode->i_mdev = 0;
+ 	inode->dirtied_when = 0;
+ 
+ 	if (security_inode_alloc(inode))
+@@ -307,6 +311,8 @@ void __iget(struct inode *inode)
+ 	inodes_stat.nr_unused--;
+ }
+ 
++EXPORT_SYMBOL_GPL(__iget);
++
+ /**
+  * clear_inode - clear an inode
+  * @inode: inode to clear
+@@ -1611,9 +1617,11 @@ void init_special_inode(struct inode *in
+ 	if (S_ISCHR(mode)) {
+ 		inode->i_fop = &def_chr_fops;
+ 		inode->i_rdev = rdev;
++		inode->i_mdev = rdev;
+ 	} else if (S_ISBLK(mode)) {
+ 		inode->i_fop = &def_blk_fops;
+ 		inode->i_rdev = rdev;
++		inode->i_mdev = rdev;
+ 	} else if (S_ISFIFO(mode))
+ 		inode->i_fop = &def_fifo_fops;
+ 	else if (S_ISSOCK(mode))
+--- a/fs/ioctl.c	2009-12-03 20:02:52.000000000 +0100
++++ a/fs/ioctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -16,6 +16,9 @@
+ #include <linux/writeback.h>
+ #include <linux/buffer_head.h>
+ #include <linux/falloc.h>
++#include <linux/proc_fs.h>
++#include <linux/vserver/inode.h>
++#include <linux/vs_tag.h>
+ 
+ #include <asm/ioctls.h>
+ 
+--- a/fs/ioprio.c	2009-03-24 14:22:26.000000000 +0100
++++ a/fs/ioprio.c	2011-06-10 13:03:02.000000000 +0200
+@@ -26,6 +26,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/security.h>
+ #include <linux/pid_namespace.h>
++#include <linux/vs_base.h>
+ 
+ int set_task_ioprio(struct task_struct *task, int ioprio)
+ {
+@@ -123,6 +124,8 @@ SYSCALL_DEFINE3(ioprio_set, int, which, 
+ 			else
+ 				pgrp = find_vpid(who);
+ 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
++				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
++					continue;
+ 				ret = set_task_ioprio(p, ioprio);
+ 				if (ret)
+ 					break;
+@@ -212,6 +215,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, 
+ 			else
+ 				pgrp = find_vpid(who);
+ 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
++				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
++					continue;
+ 				tmpio = get_task_ioprio(p);
+ 				if (tmpio < 0)
+ 					continue;
+--- a/fs/jfs/acl.c	2009-12-03 20:02:52.000000000 +0100
++++ a/fs/jfs/acl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -216,7 +216,8 @@ int jfs_setattr(struct dentry *dentry, s
+ 		return rc;
+ 
+ 	if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
+-	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
++	    (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) ||
++	    (iattr->ia_valid & ATTR_TAG && iattr->ia_tag != inode->i_tag)) {
+ 		if (vfs_dq_transfer(inode, iattr))
+ 			return -EDQUOT;
+ 	}
+--- a/fs/jfs/file.c	2009-12-03 20:02:52.000000000 +0100
++++ a/fs/jfs/file.c	2011-06-10 13:03:02.000000000 +0200
+@@ -98,6 +98,7 @@ const struct inode_operations jfs_file_i
+ 	.setattr	= jfs_setattr,
+ 	.check_acl	= jfs_check_acl,
+ #endif
++	.sync_flags	= jfs_sync_flags,
+ };
+ 
+ const struct file_operations jfs_file_operations = {
+--- a/fs/jfs/ioctl.c	2008-12-25 00:26:37.000000000 +0100
++++ a/fs/jfs/ioctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -11,6 +11,7 @@
+ #include <linux/mount.h>
+ #include <linux/time.h>
+ #include <linux/sched.h>
++#include <linux/mount.h>
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+ 
+@@ -52,6 +53,16 @@ static long jfs_map_ext2(unsigned long f
+ }
+ 
+ 
++int jfs_sync_flags(struct inode *inode, int flags, int vflags)
++{
++	inode->i_flags = flags;
++	inode->i_vflags = vflags;
++	jfs_get_inode_flags(JFS_IP(inode));
++	inode->i_ctime = CURRENT_TIME_SEC;
++	mark_inode_dirty(inode);
++	return 0;
++}
++
+ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ 	struct inode *inode = filp->f_dentry->d_inode;
+@@ -85,6 +96,11 @@ long jfs_ioctl(struct file *filp, unsign
+ 		if (!S_ISDIR(inode->i_mode))
+ 			flags &= ~JFS_DIRSYNC_FL;
+ 
++		if (IS_BARRIER(inode)) {
++			vxwprintk_task(1, "messing with the barrier.");
++			return -EACCES;
++		}
++
+ 		/* Is it quota file? Do not allow user to mess with it */
+ 		if (IS_NOQUOTA(inode)) {
+ 			err = -EPERM;
+@@ -102,8 +118,8 @@ long jfs_ioctl(struct file *filp, unsign
+ 		 * the relevant capability.
+ 		 */
+ 		if ((oldflags & JFS_IMMUTABLE_FL) ||
+-			((flags ^ oldflags) &
+-			(JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
++			((flags ^ oldflags) & (JFS_APPEND_FL |
++			JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL))) {
+ 			if (!capable(CAP_LINUX_IMMUTABLE)) {
+ 				mutex_unlock(&inode->i_mutex);
+ 				err = -EPERM;
+@@ -111,7 +127,7 @@ long jfs_ioctl(struct file *filp, unsign
+ 			}
+ 		}
+ 
+-		flags = flags & JFS_FL_USER_MODIFIABLE;
++		flags &= JFS_FL_USER_MODIFIABLE;
+ 		flags |= oldflags & ~JFS_FL_USER_MODIFIABLE;
+ 		jfs_inode->mode2 = flags;
+ 
+--- a/fs/jfs/jfs_dinode.h	2008-12-25 00:26:37.000000000 +0100
++++ a/fs/jfs/jfs_dinode.h	2011-06-10 13:03:02.000000000 +0200
+@@ -161,9 +161,13 @@ struct dinode {
+ 
+ #define JFS_APPEND_FL		0x01000000 /* writes to file may only append */
+ #define JFS_IMMUTABLE_FL	0x02000000 /* Immutable file */
++#define JFS_IXUNLINK_FL		0x08000000 /* Immutable invert on unlink */
+ 
+-#define JFS_FL_USER_VISIBLE	0x03F80000
+-#define JFS_FL_USER_MODIFIABLE	0x03F80000
++#define JFS_BARRIER_FL		0x04000000 /* Barrier for chroot() */
++#define JFS_COW_FL		0x20000000 /* Copy on Write marker */
++
++#define JFS_FL_USER_VISIBLE	0x07F80000
++#define JFS_FL_USER_MODIFIABLE	0x07F80000
+ #define JFS_FL_INHERIT		0x03C80000
+ 
+ /* These are identical to EXT[23]_IOC_GETFLAGS/SETFLAGS */
+--- a/fs/jfs/jfs_filsys.h	2008-12-25 00:26:37.000000000 +0100
++++ a/fs/jfs/jfs_filsys.h	2011-06-10 13:03:02.000000000 +0200
+@@ -263,6 +263,7 @@
+ #define JFS_NAME_MAX	255
+ #define JFS_PATH_MAX	BPSIZE
+ 
++#define JFS_TAGGED		0x00800000	/* Context Tagging */
+ 
+ /*
+  *	file system state (superblock state)
+--- a/fs/jfs/jfs_imap.c	2009-09-10 15:26:22.000000000 +0200
++++ a/fs/jfs/jfs_imap.c	2011-06-10 13:03:02.000000000 +0200
+@@ -45,6 +45,7 @@
+ #include <linux/buffer_head.h>
+ #include <linux/pagemap.h>
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
+ 
+ #include "jfs_incore.h"
+ #include "jfs_inode.h"
+@@ -3059,6 +3060,8 @@ static int copy_from_dinode(struct dinod
+ {
+ 	struct jfs_inode_info *jfs_ip = JFS_IP(ip);
+ 	struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
++	uid_t uid;
++	gid_t gid;
+ 
+ 	jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
+ 	jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
+@@ -3079,14 +3082,18 @@ static int copy_from_dinode(struct dinod
+ 	}
+ 	ip->i_nlink = le32_to_cpu(dip->di_nlink);
+ 
+-	jfs_ip->saved_uid = le32_to_cpu(dip->di_uid);
++	uid = le32_to_cpu(dip->di_uid);
++	gid = le32_to_cpu(dip->di_gid);
++	ip->i_tag = INOTAG_TAG(DX_TAG(ip), uid, gid, 0);
++
++	jfs_ip->saved_uid = INOTAG_UID(DX_TAG(ip), uid, gid);
+ 	if (sbi->uid == -1)
+ 		ip->i_uid = jfs_ip->saved_uid;
+ 	else {
+ 		ip->i_uid = sbi->uid;
+ 	}
+ 
+-	jfs_ip->saved_gid = le32_to_cpu(dip->di_gid);
++	jfs_ip->saved_gid = INOTAG_GID(DX_TAG(ip), uid, gid);
+ 	if (sbi->gid == -1)
+ 		ip->i_gid = jfs_ip->saved_gid;
+ 	else {
+@@ -3151,14 +3158,12 @@ static void copy_to_dinode(struct dinode
+ 	dip->di_size = cpu_to_le64(ip->i_size);
+ 	dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
+ 	dip->di_nlink = cpu_to_le32(ip->i_nlink);
+-	if (sbi->uid == -1)
+-		dip->di_uid = cpu_to_le32(ip->i_uid);
+-	else
+-		dip->di_uid = cpu_to_le32(jfs_ip->saved_uid);
+-	if (sbi->gid == -1)
+-		dip->di_gid = cpu_to_le32(ip->i_gid);
+-	else
+-		dip->di_gid = cpu_to_le32(jfs_ip->saved_gid);
++
++	dip->di_uid = cpu_to_le32(TAGINO_UID(DX_TAG(ip),
++		(sbi->uid == -1) ? ip->i_uid : jfs_ip->saved_uid, ip->i_tag));
++	dip->di_gid = cpu_to_le32(TAGINO_GID(DX_TAG(ip),
++		(sbi->gid == -1) ? ip->i_gid : jfs_ip->saved_gid, ip->i_tag));
++
+ 	jfs_get_inode_flags(jfs_ip);
+ 	/*
+ 	 * mode2 is only needed for storing the higher order bits.
+--- a/fs/jfs/jfs_inode.c	2009-06-11 17:13:05.000000000 +0200
++++ a/fs/jfs/jfs_inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -18,6 +18,7 @@
+ 
+ #include <linux/fs.h>
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
+ #include "jfs_incore.h"
+ #include "jfs_inode.h"
+ #include "jfs_filsys.h"
+@@ -30,29 +31,46 @@ void jfs_set_inode_flags(struct inode *i
+ {
+ 	unsigned int flags = JFS_IP(inode)->mode2;
+ 
+-	inode->i_flags &= ~(S_IMMUTABLE | S_APPEND |
+-		S_NOATIME | S_DIRSYNC | S_SYNC);
++	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
++		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+ 
+ 	if (flags & JFS_IMMUTABLE_FL)
+ 		inode->i_flags |= S_IMMUTABLE;
++	if (flags & JFS_IXUNLINK_FL)
++		inode->i_flags |= S_IXUNLINK;
++
++	if (flags & JFS_SYNC_FL)
++		inode->i_flags |= S_SYNC;
+ 	if (flags & JFS_APPEND_FL)
+ 		inode->i_flags |= S_APPEND;
+ 	if (flags & JFS_NOATIME_FL)
+ 		inode->i_flags |= S_NOATIME;
+ 	if (flags & JFS_DIRSYNC_FL)
+ 		inode->i_flags |= S_DIRSYNC;
+-	if (flags & JFS_SYNC_FL)
+-		inode->i_flags |= S_SYNC;
++
++	inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++	if (flags & JFS_BARRIER_FL)
++		inode->i_vflags |= V_BARRIER;
++	if (flags & JFS_COW_FL)
++		inode->i_vflags |= V_COW;
+ }
+ 
+ void jfs_get_inode_flags(struct jfs_inode_info *jfs_ip)
+ {
+ 	unsigned int flags = jfs_ip->vfs_inode.i_flags;
++	unsigned int vflags = jfs_ip->vfs_inode.i_vflags;
++
++	jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_IXUNLINK_FL |
++			   JFS_APPEND_FL | JFS_NOATIME_FL |
++			   JFS_DIRSYNC_FL | JFS_SYNC_FL |
++			   JFS_BARRIER_FL | JFS_COW_FL);
+ 
+-	jfs_ip->mode2 &= ~(JFS_IMMUTABLE_FL | JFS_APPEND_FL | JFS_NOATIME_FL |
+-			   JFS_DIRSYNC_FL | JFS_SYNC_FL);
+ 	if (flags & S_IMMUTABLE)
+ 		jfs_ip->mode2 |= JFS_IMMUTABLE_FL;
++	if (flags & S_IXUNLINK)
++		jfs_ip->mode2 |= JFS_IXUNLINK_FL;
++
+ 	if (flags & S_APPEND)
+ 		jfs_ip->mode2 |= JFS_APPEND_FL;
+ 	if (flags & S_NOATIME)
+@@ -61,6 +79,11 @@ void jfs_get_inode_flags(struct jfs_inod
+ 		jfs_ip->mode2 |= JFS_DIRSYNC_FL;
+ 	if (flags & S_SYNC)
+ 		jfs_ip->mode2 |= JFS_SYNC_FL;
++
++	if (vflags & V_BARRIER)
++		jfs_ip->mode2 |= JFS_BARRIER_FL;
++	if (vflags & V_COW)
++		jfs_ip->mode2 |= JFS_COW_FL;
+ }
+ 
+ /*
+@@ -105,6 +128,7 @@ struct inode *ialloc(struct inode *paren
+ 			mode |= S_ISGID;
+ 	} else
+ 		inode->i_gid = current_fsgid();
++	inode->i_tag = dx_current_fstag(sb);
+ 
+ 	/*
+ 	 * New inodes need to save sane values on disk when
+--- a/fs/jfs/jfs_inode.h	2009-06-11 17:13:05.000000000 +0200
++++ a/fs/jfs/jfs_inode.h	2011-06-10 13:03:02.000000000 +0200
+@@ -39,6 +39,7 @@ extern struct dentry *jfs_fh_to_dentry(s
+ extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ 	int fh_len, int fh_type);
+ extern void jfs_set_inode_flags(struct inode *);
++extern int jfs_sync_flags(struct inode *, int, int);
+ extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
+ 
+ extern const struct address_space_operations jfs_aops;
+--- a/fs/jfs/namei.c	2009-12-03 20:02:52.000000000 +0100
++++ a/fs/jfs/namei.c	2011-06-10 13:03:02.000000000 +0200
+@@ -21,6 +21,7 @@
+ #include <linux/ctype.h>
+ #include <linux/quotaops.h>
+ #include <linux/exportfs.h>
++#include <linux/vs_tag.h>
+ #include "jfs_incore.h"
+ #include "jfs_superblock.h"
+ #include "jfs_inode.h"
+@@ -1476,6 +1477,7 @@ static struct dentry *jfs_lookup(struct 
+ 		return ERR_CAST(ip);
+ 	}
+ 
++	dx_propagate_tag(nd, ip);
+ 	dentry = d_splice_alias(ip, dentry);
+ 
+ 	if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
+@@ -1545,6 +1547,7 @@ const struct inode_operations jfs_dir_in
+ 	.setattr	= jfs_setattr,
+ 	.check_acl	= jfs_check_acl,
+ #endif
++	.sync_flags	= jfs_sync_flags,
+ };
+ 
+ const struct file_operations jfs_dir_operations = {
+--- a/fs/jfs/super.c	2009-12-03 20:02:52.000000000 +0100
++++ a/fs/jfs/super.c	2011-06-10 13:03:02.000000000 +0200
+@@ -192,7 +192,8 @@ static void jfs_put_super(struct super_b
+ enum {
+ 	Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
+ 	Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
+-	Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask
++	Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
++	Opt_tag, Opt_notag, Opt_tagid
+ };
+ 
+ static const match_table_t tokens = {
+@@ -202,6 +203,10 @@ static const match_table_t tokens = {
+ 	{Opt_resize, "resize=%u"},
+ 	{Opt_resize_nosize, "resize"},
+ 	{Opt_errors, "errors=%s"},
++	{Opt_tag, "tag"},
++	{Opt_notag, "notag"},
++	{Opt_tagid, "tagid=%u"},
++	{Opt_tag, "tagxid"},
+ 	{Opt_ignore, "noquota"},
+ 	{Opt_ignore, "quota"},
+ 	{Opt_usrquota, "usrquota"},
+@@ -336,6 +341,20 @@ static int parse_options(char *options, 
+ 			}
+ 			break;
+ 		}
++#ifndef CONFIG_TAGGING_NONE
++		case Opt_tag:
++			*flag |= JFS_TAGGED;
++			break;
++		case Opt_notag:
++			*flag &= JFS_TAGGED;
++			break;
++#endif
++#ifdef CONFIG_PROPAGATE
++		case Opt_tagid:
++			/* use args[0] */
++			*flag |= JFS_TAGGED;
++			break;
++#endif
+ 		default:
+ 			printk("jfs: Unrecognized mount option \"%s\" "
+ 					" or missing value\n", p);
+@@ -366,6 +385,12 @@ static int jfs_remount(struct super_bloc
+ 	if (!parse_options(data, sb, &newLVSize, &flag)) {
+ 		return -EINVAL;
+ 	}
++	if ((flag & JFS_TAGGED) && !(sb->s_flags & MS_TAGGED)) {
++		printk(KERN_ERR "JFS: %s: tagging not permitted on remount.\n",
++			sb->s_id);
++		return -EINVAL;
++	}
++
+ 	lock_kernel();
+ 	if (newLVSize) {
+ 		if (sb->s_flags & MS_RDONLY) {
+@@ -449,6 +474,9 @@ static int jfs_fill_super(struct super_b
+ #ifdef CONFIG_JFS_POSIX_ACL
+ 	sb->s_flags |= MS_POSIXACL;
+ #endif
++	/* map mount option tagxid */
++	if (sbi->flag & JFS_TAGGED)
++		sb->s_flags |= MS_TAGGED;
+ 
+ 	if (newLVSize) {
+ 		printk(KERN_ERR "resize option for remount only\n");
+--- a/fs/libfs.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/libfs.c	2011-06-10 13:03:02.000000000 +0200
+@@ -127,7 +127,8 @@ static inline unsigned char dt_type(stru
+  * both impossible due to the lock on directory.
+  */
+ 
+-int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
++static inline int do_dcache_readdir_filter(struct file *filp,
++	void *dirent, filldir_t filldir, int (*filter)(struct dentry *dentry))
+ {
+ 	struct dentry *dentry = filp->f_path.dentry;
+ 	struct dentry *cursor = filp->private_data;
+@@ -160,6 +161,8 @@ int dcache_readdir(struct file * filp, v
+ 				next = list_entry(p, struct dentry, d_u.d_child);
+ 				if (d_unhashed(next) || !next->d_inode)
+ 					continue;
++				if (filter && !filter(next))
++					continue;
+ 
+ 				spin_unlock(&dcache_lock);
+ 				if (filldir(dirent, next->d_name.name, 
+@@ -178,6 +181,18 @@ int dcache_readdir(struct file * filp, v
+ 	return 0;
+ }
+ 
++int dcache_readdir(struct file *filp, void *dirent, filldir_t filldir)
++{
++	return do_dcache_readdir_filter(filp, dirent, filldir, NULL);
++}
++
++int dcache_readdir_filter(struct file *filp, void *dirent, filldir_t filldir,
++	int (*filter)(struct dentry *))
++{
++	return do_dcache_readdir_filter(filp, dirent, filldir, filter);
++}
++
++
+ ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
+ {
+ 	return -EISDIR;
+@@ -842,6 +857,7 @@ EXPORT_SYMBOL(dcache_dir_close);
+ EXPORT_SYMBOL(dcache_dir_lseek);
+ EXPORT_SYMBOL(dcache_dir_open);
+ EXPORT_SYMBOL(dcache_readdir);
++EXPORT_SYMBOL(dcache_readdir_filter);
+ EXPORT_SYMBOL(generic_read_dir);
+ EXPORT_SYMBOL(get_sb_pseudo);
+ EXPORT_SYMBOL(simple_write_begin);
+--- a/fs/locks.c	2009-12-03 20:02:52.000000000 +0100
++++ a/fs/locks.c	2011-06-10 13:03:02.000000000 +0200
+@@ -127,6 +127,8 @@
+ #include <linux/time.h>
+ #include <linux/rcupdate.h>
+ #include <linux/pid_namespace.h>
++#include <linux/vs_base.h>
++#include <linux/vs_limit.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -148,6 +150,8 @@ static struct kmem_cache *filelock_cache
+ /* Allocate an empty lock structure. */
+ static struct file_lock *locks_alloc_lock(void)
+ {
++	if (!vx_locks_avail(1))
++		return NULL;
+ 	return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
+ }
+ 
+@@ -174,6 +178,7 @@ static void locks_free_lock(struct file_
+ 	BUG_ON(!list_empty(&fl->fl_block));
+ 	BUG_ON(!list_empty(&fl->fl_link));
+ 
++	vx_locks_dec(fl);
+ 	locks_release_private(fl);
+ 	kmem_cache_free(filelock_cache, fl);
+ }
+@@ -194,6 +199,7 @@ void locks_init_lock(struct file_lock *f
+ 	fl->fl_start = fl->fl_end = 0;
+ 	fl->fl_ops = NULL;
+ 	fl->fl_lmops = NULL;
++	fl->fl_xid = -1;
+ }
+ 
+ EXPORT_SYMBOL(locks_init_lock);
+@@ -248,6 +254,7 @@ void locks_copy_lock(struct file_lock *n
+ 	new->fl_file = fl->fl_file;
+ 	new->fl_ops = fl->fl_ops;
+ 	new->fl_lmops = fl->fl_lmops;
++	new->fl_xid = fl->fl_xid;
+ 
+ 	locks_copy_private(new, fl);
+ }
+@@ -286,6 +293,11 @@ static int flock_make_lock(struct file *
+ 	fl->fl_flags = FL_FLOCK;
+ 	fl->fl_type = type;
+ 	fl->fl_end = OFFSET_MAX;
++
++	vxd_assert(filp->f_xid == vx_current_xid(),
++		"f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
++	fl->fl_xid = filp->f_xid;
++	vx_locks_inc(fl);
+ 	
+ 	*lock = fl;
+ 	return 0;
+@@ -451,6 +463,7 @@ static int lease_init(struct file *filp,
+ 
+ 	fl->fl_owner = current->files;
+ 	fl->fl_pid = current->tgid;
++	fl->fl_xid = vx_current_xid();
+ 
+ 	fl->fl_file = filp;
+ 	fl->fl_flags = FL_LEASE;
+@@ -470,6 +483,11 @@ static struct file_lock *lease_alloc(str
+ 	if (fl == NULL)
+ 		return ERR_PTR(error);
+ 
++	fl->fl_xid = vx_current_xid();
++	if (filp)
++		vxd_assert(filp->f_xid == fl->fl_xid,
++			"f_xid(%d) == fl_xid(%d)", filp->f_xid, fl->fl_xid);
++	vx_locks_inc(fl);
+ 	error = lease_init(filp, type, fl);
+ 	if (error) {
+ 		locks_free_lock(fl);
+@@ -770,6 +788,7 @@ static int flock_lock_file(struct file *
+ 	if (found)
+ 		cond_resched();
+ 
++	new_fl->fl_xid = -1;
+ find_conflict:
+ 	for_each_lock(inode, before) {
+ 		struct file_lock *fl = *before;
+@@ -790,6 +809,7 @@ find_conflict:
+ 		goto out;
+ 	locks_copy_lock(new_fl, request);
+ 	locks_insert_lock(before, new_fl);
++	vx_locks_inc(new_fl);
+ 	new_fl = NULL;
+ 	error = 0;
+ 
+@@ -800,7 +820,8 @@ out:
+ 	return error;
+ }
+ 
+-static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
++static int __posix_lock_file(struct inode *inode, struct file_lock *request,
++	struct file_lock *conflock, xid_t xid)
+ {
+ 	struct file_lock *fl;
+ 	struct file_lock *new_fl = NULL;
+@@ -810,6 +831,8 @@ static int __posix_lock_file(struct inod
+ 	struct file_lock **before;
+ 	int error, added = 0;
+ 
++	vxd_assert(xid == vx_current_xid(),
++		"xid(%d) == current(%d)", xid, vx_current_xid());
+ 	/*
+ 	 * We may need two file_lock structures for this operation,
+ 	 * so we get them in advance to avoid races.
+@@ -820,7 +843,11 @@ static int __posix_lock_file(struct inod
+ 	    (request->fl_type != F_UNLCK ||
+ 	     request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
+ 		new_fl = locks_alloc_lock();
++		new_fl->fl_xid = xid;
++		vx_locks_inc(new_fl);
+ 		new_fl2 = locks_alloc_lock();
++		new_fl2->fl_xid = xid;
++		vx_locks_inc(new_fl2);
+ 	}
+ 
+ 	lock_kernel();
+@@ -1019,7 +1046,8 @@ static int __posix_lock_file(struct inod
+ int posix_lock_file(struct file *filp, struct file_lock *fl,
+ 			struct file_lock *conflock)
+ {
+-	return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
++	return __posix_lock_file(filp->f_path.dentry->d_inode,
++		fl, conflock, filp->f_xid);
+ }
+ EXPORT_SYMBOL(posix_lock_file);
+ 
+@@ -1109,7 +1137,7 @@ int locks_mandatory_area(int read_write,
+ 	fl.fl_end = offset + count - 1;
+ 
+ 	for (;;) {
+-		error = __posix_lock_file(inode, &fl, NULL);
++		error = __posix_lock_file(inode, &fl, NULL, filp->f_xid);
+ 		if (error != FILE_LOCK_DEFERRED)
+ 			break;
+ 		error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
+@@ -1424,6 +1452,7 @@ int generic_setlease(struct file *filp, 
+ 
+ 	locks_copy_lock(new_fl, lease);
+ 	locks_insert_lock(before, new_fl);
++	vx_locks_inc(new_fl);
+ 
+ 	*flp = new_fl;
+ 	return 0;
+@@ -1779,6 +1808,11 @@ int fcntl_setlk(unsigned int fd, struct 
+ 	if (file_lock == NULL)
+ 		return -ENOLCK;
+ 
++	vxd_assert(filp->f_xid == vx_current_xid(),
++		"f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
++	file_lock->fl_xid = filp->f_xid;
++	vx_locks_inc(file_lock);
++
+ 	/*
+ 	 * This might block, so we do it before checking the inode.
+ 	 */
+@@ -1897,6 +1931,11 @@ int fcntl_setlk64(unsigned int fd, struc
+ 	if (file_lock == NULL)
+ 		return -ENOLCK;
+ 
++	vxd_assert(filp->f_xid == vx_current_xid(),
++		"f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
++	file_lock->fl_xid = filp->f_xid;
++	vx_locks_inc(file_lock);
++
+ 	/*
+ 	 * This might block, so we do it before checking the inode.
+ 	 */
+@@ -2162,8 +2201,11 @@ static int locks_show(struct seq_file *f
+ 
+ 	lock_get_status(f, fl, (long)f->private, "");
+ 
+-	list_for_each_entry(bfl, &fl->fl_block, fl_block)
++	list_for_each_entry(bfl, &fl->fl_block, fl_block) {
++		if (!vx_check(fl->fl_xid, VS_WATCH_P | VS_IDENT))
++			continue;
+ 		lock_get_status(f, bfl, (long)f->private, " ->");
++	}
+ 
+ 	f->private++;
+ 	return 0;
+--- a/fs/namei.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/namei.c	2011-06-10 18:44:08.000000000 +0200
+@@ -33,6 +33,14 @@
+ #include <linux/fcntl.h>
+ #include <linux/device_cgroup.h>
+ #include <linux/fs_struct.h>
++#include <linux/proc_fs.h>
++#include <linux/vserver/inode.h>
++#include <linux/vs_base.h>
++#include <linux/vs_tag.h>
++#include <linux/vs_cowbl.h>
++#include <linux/vs_device.h>
++#include <linux/vs_context.h>
++#include <linux/pid_namespace.h>
+ #include <asm/uaccess.h>
+ 
+ #define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
+@@ -169,6 +177,77 @@ void putname(const char *name)
+ EXPORT_SYMBOL(putname);
+ #endif
+ 
++static inline int dx_barrier(const struct inode *inode)
++{
++	if (IS_BARRIER(inode) && !vx_check(0, VS_ADMIN | VS_WATCH)) {
++		vxwprintk_task(1, "did hit the barrier.");
++		return 1;
++	}
++	return 0;
++}
++
++static int __dx_permission(const struct inode *inode, int mask)
++{
++	if (dx_barrier(inode))
++		return -EACCES;
++
++	if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) {
++		/* devpts is xid tagged */
++		if (S_ISDIR(inode->i_mode) ||
++		    vx_check((xid_t)inode->i_tag, VS_IDENT | VS_WATCH_P))
++			return 0;
++	}
++	else if (inode->i_sb->s_magic == PROC_SUPER_MAGIC) {
++		struct proc_dir_entry *de = PDE(inode);
++
++		if (de && !vx_hide_check(0, de->vx_flags))
++			goto out;
++
++		if ((mask & (MAY_WRITE | MAY_APPEND))) {
++			struct pid *pid;
++			struct task_struct *tsk;
++
++			if (vx_check(0, VS_ADMIN | VS_WATCH_P) ||
++			    vx_flags(VXF_STATE_SETUP, 0))
++				return 0;
++
++			pid = PROC_I(inode)->pid;
++			if (!pid)
++				goto out;
++
++			tsk = pid_task(pid, PIDTYPE_PID);
++			vxdprintk(VXD_CBIT(tag, 0), "accessing %p[#%u]",
++				  tsk, (tsk ? vx_task_xid(tsk) : 0));
++			if (tsk && vx_check(vx_task_xid(tsk), VS_IDENT | VS_WATCH_P))
++				return 0;
++		}
++		else {
++			/* FIXME: Should we block some entries here? */
++			return 0;
++		}
++	}
++	else {
++		if (dx_notagcheck(inode->i_sb) ||
++		    dx_check(inode->i_tag, DX_HOSTID | DX_ADMIN | DX_WATCH |
++			     DX_IDENT))
++			return 0;
++	}
++
++out:
++	return -EACCES;
++}
++
++int dx_permission(const struct inode *inode, int mask)
++{
++	int ret = __dx_permission(inode, mask);
++	if (unlikely(ret)) {
++		vxwprintk_task(1, "denied %x access to %s:%p[#%d,%lu]",
++			mask, inode->i_sb->s_id, inode, inode->i_tag,
++			inode->i_ino);
++	}
++	return ret;
++}
++
+ /*
+  * This does basic POSIX ACL permission checking
+  */
+@@ -269,10 +348,14 @@ int inode_permission(struct inode *inode
+ 		/*
+ 		 * Nobody gets write access to an immutable file.
+ 		 */
+-		if (IS_IMMUTABLE(inode))
++		if (IS_IMMUTABLE(inode) && !IS_COW(inode))
+ 			return -EACCES;
+ 	}
+ 
++	retval = dx_permission(inode, mask);
++	if (retval)
++		return retval;
++
+ 	if (inode->i_op->permission)
+ 		retval = inode->i_op->permission(inode, mask);
+ 	else
+@@ -448,6 +531,9 @@ static int exec_permission_lite(struct i
+ {
+ 	int ret;
+ 
++	if (dx_barrier(inode))
++		return -EACCES;
++
+ 	if (inode->i_op->permission) {
+ 		ret = inode->i_op->permission(inode, MAY_EXEC);
+ 		if (!ret)
+@@ -763,7 +849,8 @@ static __always_inline void follow_dotdo
+ 
+ 		if (nd->path.dentry == nd->root.dentry &&
+ 		    nd->path.mnt == nd->root.mnt) {
+-			break;
++			/* for sane '/' avoid follow_mount() */
++			return;
+ 		}
+ 		spin_lock(&dcache_lock);
+ 		if (nd->path.dentry != nd->path.mnt->mnt_root) {
+@@ -799,16 +886,30 @@ static int do_lookup(struct nameidata *n
+ {
+ 	struct vfsmount *mnt = nd->path.mnt;
+ 	struct dentry *dentry = __d_lookup(nd->path.dentry, name);
++	struct inode *inode;
+ 
+ 	if (!dentry)
+ 		goto need_lookup;
+ 	if (dentry->d_op && dentry->d_op->d_revalidate)
+ 		goto need_revalidate;
++	inode = dentry->d_inode;
++	if (!inode)
++		goto done;
++
++	if (__dx_permission(inode, MAY_ACCESS))
++		goto hidden;
++
+ done:
+ 	path->mnt = mnt;
+ 	path->dentry = dentry;
+ 	__follow_mount(path);
+ 	return 0;
++hidden:
++	vxwprintk_task(1, "did lookup hidden %s:%p[#%d,%lu] »%s/%.*s«.",
++		inode->i_sb->s_id, inode, inode->i_tag, inode->i_ino,
++		vxd_path(&nd->path), name->len, name->name);
++	dput(dentry);
++	return -ENOENT;
+ 
+ need_lookup:
+ 	dentry = real_lookup(nd->path.dentry, name, nd);
+@@ -1400,7 +1501,7 @@ static int may_delete(struct inode *dir,
+ 	if (IS_APPEND(dir))
+ 		return -EPERM;
+ 	if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
+-	    IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
++		IS_IXORUNLINK(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
+ 		return -EPERM;
+ 	if (isdir) {
+ 		if (!S_ISDIR(victim->d_inode->i_mode))
+@@ -1540,6 +1641,14 @@ int may_open(struct path *path, int acc_
+ 		break;
+ 	}
+ 
++#ifdef	CONFIG_VSERVER_COWBL
++	if (IS_COW(inode) && (flag & FMODE_WRITE)) {
++		if (IS_COW_LINK(inode))
++			return -EMLINK;
++		inode->i_flags &= ~(S_IXUNLINK | S_IMMUTABLE);
++		mark_inode_dirty(inode);
++	}
++#endif
+ 	error = inode_permission(inode, acc_mode);
+ 	if (error)
+ 		return error;
+@@ -1688,7 +1797,11 @@ struct file *do_filp_open(int dfd, const
+ 	int count = 0;
+ 	int will_write;
+ 	int flag = open_to_namei_flags(open_flag);
+-
++#ifdef	CONFIG_VSERVER_COWBL
++	int rflag = flag;
++	int rmode = mode;
++restart:
++#endif
+ 	if (!acc_mode)
+ 		acc_mode = MAY_OPEN | ACC_MODE(flag);
+ 
+@@ -1836,6 +1949,25 @@ ok:
+ 			goto exit;
+ 	}
+ 	error = may_open(&nd.path, acc_mode, flag);
++#ifdef	CONFIG_VSERVER_COWBL
++	if (error == -EMLINK) {
++		struct dentry *dentry;
++		dentry = cow_break_link(pathname);
++		if (IS_ERR(dentry)) {
++			error = PTR_ERR(dentry);
++			goto exit_cow;
++		}
++		dput(dentry);
++		if (will_write)
++			mnt_drop_write(nd.path.mnt);
++		release_open_intent(&nd);
++		path_put(&nd.path);
++		flag = rflag;
++		mode = rmode;
++		goto restart;
++	}
++exit_cow:
++#endif
+ 	if (error) {
+ 		if (will_write)
+ 			mnt_drop_write(nd.path.mnt);
+@@ -1998,9 +2130,17 @@ int vfs_mknod(struct inode *dir, struct 
+ 	if (error)
+ 		return error;
+ 
+-	if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
++	if (!(S_ISCHR(mode) || S_ISBLK(mode)))
++		goto okay;
++
++	if (!capable(CAP_MKNOD))
+ 		return -EPERM;
+ 
++	if (S_ISCHR(mode) && !vs_chrdev_perm(dev, DATTR_CREATE))
++		return -EPERM;
++	if (S_ISBLK(mode) && !vs_blkdev_perm(dev, DATTR_CREATE))
++		return -EPERM;
++okay:
+ 	if (!dir->i_op->mknod)
+ 		return -EPERM;
+ 
+@@ -2467,7 +2607,7 @@ int vfs_link(struct dentry *old_dentry, 
+ 	/*
+ 	 * A link to an append-only or immutable file cannot be created.
+ 	 */
+-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
++	if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
+ 		return -EPERM;
+ 	if (!dir->i_op->link)
+ 		return -EPERM;
+@@ -2840,6 +2980,219 @@ int vfs_follow_link(struct nameidata *nd
+ 	return __vfs_follow_link(nd, link);
+ }
+ 
++
++#ifdef	CONFIG_VSERVER_COWBL
++
++#include <linux/file.h>
++
++static inline
++long do_cow_splice(struct file *in, struct file *out, size_t len)
++{
++	loff_t ppos = 0;
++
++	return do_splice_direct(in, &ppos, out, len, 0);
++}
++
++struct dentry *cow_break_link(const char *pathname)
++{
++	int ret, mode, pathlen, redo = 0;
++	struct nameidata old_nd, dir_nd;
++	struct path old_path, new_path;
++	struct dentry *dir, *res = NULL;
++	struct file *old_file;
++	struct file *new_file;
++	char *to, *path, pad='\251';
++	loff_t size;
++
++	vxdprintk(VXD_CBIT(misc, 1), "cow_break_link(»%s«)", pathname);
++	path = kmalloc(PATH_MAX, GFP_KERNEL);
++	ret = -ENOMEM;
++	if (!path)
++		goto out;
++
++	/* old_nd will have refs to dentry and mnt */
++	ret = path_lookup(pathname, LOOKUP_FOLLOW, &old_nd);
++	vxdprintk(VXD_CBIT(misc, 2), "path_lookup(old): %d", ret);
++	if (ret < 0)
++		goto out_free_path;
++
++	old_path = old_nd.path;
++	mode = old_path.dentry->d_inode->i_mode;
++
++	to = d_path(&old_path, path, PATH_MAX-2);
++	pathlen = strlen(to);
++	vxdprintk(VXD_CBIT(misc, 2), "old path »%s« [»%.*s«:%d]", to,
++		old_path.dentry->d_name.len, old_path.dentry->d_name.name,
++		old_path.dentry->d_name.len);
++
++	to[pathlen + 1] = 0;
++retry:
++	to[pathlen] = pad--;
++	ret = -EMLINK;
++	if (pad <= '\240')
++		goto out_rel_old;
++
++	vxdprintk(VXD_CBIT(misc, 1), "temp copy »%s«", to);
++	/* dir_nd will have refs to dentry and mnt */
++	ret = path_lookup(to,
++		LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE, &dir_nd);
++	vxdprintk(VXD_CBIT(misc, 2),
++		"path_lookup(new): %d", ret);
++	if (ret < 0)
++		goto retry;
++
++	/* this puppy downs the inode mutex */
++	new_path.dentry = lookup_create(&dir_nd, 0);
++	if (!new_path.dentry || IS_ERR(new_path.dentry)) {
++		vxdprintk(VXD_CBIT(misc, 2),
++			"lookup_create(new): %p", new_path.dentry);
++		mutex_unlock(&dir_nd.path.dentry->d_inode->i_mutex);
++		path_put(&dir_nd.path);
++		goto retry;
++	}
++	vxdprintk(VXD_CBIT(misc, 2),
++		"lookup_create(new): %p [»%.*s«:%d]", new_path.dentry,
++		new_path.dentry->d_name.len, new_path.dentry->d_name.name,
++		new_path.dentry->d_name.len);
++	dir = dir_nd.path.dentry;
++
++	ret = vfs_create(dir_nd.path.dentry->d_inode, new_path.dentry, mode, &dir_nd);
++	vxdprintk(VXD_CBIT(misc, 2),
++		"vfs_create(new): %d", ret);
++	if (ret == -EEXIST) {
++		mutex_unlock(&dir->d_inode->i_mutex);
++		dput(new_path.dentry);
++		path_put(&dir_nd.path);
++		goto retry;
++	}
++	else if (ret < 0)
++		goto out_unlock_new;
++
++	/* drop out early, ret passes ENOENT */
++	ret = -ENOENT;
++	if ((redo = d_unhashed(old_path.dentry)))
++		goto out_unlock_new;
++
++	new_path.mnt = dir_nd.path.mnt;
++	dget(old_path.dentry);
++	mntget(old_path.mnt);
++	/* this one cleans up the dentry/mnt in case of failure */
++	old_file = dentry_open(old_path.dentry, old_path.mnt,
++		O_RDONLY, current_cred());
++	vxdprintk(VXD_CBIT(misc, 2),
++		"dentry_open(old): %p", old_file);
++	if (!old_file || IS_ERR(old_file)) {
++		res = IS_ERR(old_file) ? (void *) old_file : res;
++		goto out_unlock_new;
++	}
++
++	dget(new_path.dentry);
++	mntget(new_path.mnt);
++	/* this one cleans up the dentry/mnt in case of failure */
++	new_file = dentry_open(new_path.dentry, new_path.mnt,
++		O_WRONLY, current_cred());
++	vxdprintk(VXD_CBIT(misc, 2),
++		"dentry_open(new): %p", new_file);
++
++	ret = IS_ERR(new_file) ? PTR_ERR(new_file) : -ENOENT;
++	if (!new_file || IS_ERR(new_file))
++		goto out_fput_old;
++
++	size = i_size_read(old_file->f_dentry->d_inode);
++	ret = do_cow_splice(old_file, new_file, size);
++	vxdprintk(VXD_CBIT(misc, 2), "do_splice_direct: %d", ret);
++	if (ret < 0) {
++		goto out_fput_both;
++	} else if (ret < size) {
++		ret = -ENOSPC;
++		goto out_fput_both;
++	} else {
++		struct inode *old_inode = old_path.dentry->d_inode;
++		struct inode *new_inode = new_path.dentry->d_inode;
++		struct iattr attr = {
++			.ia_uid = old_inode->i_uid,
++			.ia_gid = old_inode->i_gid,
++			.ia_valid = ATTR_UID | ATTR_GID
++			};
++
++		ret = inode_setattr(new_inode, &attr);
++		if (ret)
++			goto out_fput_both;
++	}
++
++	mutex_lock(&old_path.dentry->d_inode->i_sb->s_vfs_rename_mutex);
++
++	/* drop out late */
++	ret = -ENOENT;
++	if ((redo = d_unhashed(old_path.dentry)))
++		goto out_unlock;
++
++	vxdprintk(VXD_CBIT(misc, 2),
++		"vfs_rename: [»%*s«:%d] -> [»%*s«:%d]",
++		new_path.dentry->d_name.len, new_path.dentry->d_name.name,
++		new_path.dentry->d_name.len,
++		old_path.dentry->d_name.len, old_path.dentry->d_name.name,
++		old_path.dentry->d_name.len);
++	ret = vfs_rename(dir_nd.path.dentry->d_inode, new_path.dentry,
++		old_nd.path.dentry->d_parent->d_inode, old_path.dentry);
++	vxdprintk(VXD_CBIT(misc, 2), "vfs_rename: %d", ret);
++	res = new_path.dentry;
++
++out_unlock:
++	mutex_unlock(&old_path.dentry->d_inode->i_sb->s_vfs_rename_mutex);
++
++out_fput_both:
++	vxdprintk(VXD_CBIT(misc, 3),
++		"fput(new_file=%p[#%ld])", new_file,
++		atomic_long_read(&new_file->f_count));
++	fput(new_file);
++
++out_fput_old:
++	vxdprintk(VXD_CBIT(misc, 3),
++		"fput(old_file=%p[#%ld])", old_file,
++		atomic_long_read(&old_file->f_count));
++	fput(old_file);
++
++out_unlock_new:
++	mutex_unlock(&dir->d_inode->i_mutex);
++	if (!ret)
++		goto out_redo;
++
++	/* error path cleanup */
++	vfs_unlink(dir->d_inode, new_path.dentry);
++	dput(new_path.dentry);
++
++out_redo:
++	if (!redo)
++		goto out_rel_both;
++	/* lookup dentry once again */
++	path_put(&old_nd.path);
++	ret = path_lookup(pathname, LOOKUP_FOLLOW, &old_nd);
++	if (ret)
++		goto out_rel_both;
++
++	new_path.dentry = old_nd.path.dentry;
++	vxdprintk(VXD_CBIT(misc, 2),
++		"path_lookup(redo): %p [»%.*s«:%d]", new_path.dentry,
++		new_path.dentry->d_name.len, new_path.dentry->d_name.name,
++		new_path.dentry->d_name.len);
++	dget(new_path.dentry);
++	res = new_path.dentry;
++
++out_rel_both:
++	path_put(&dir_nd.path);
++out_rel_old:
++	path_put(&old_nd.path);
++out_free_path:
++	kfree(path);
++out:
++	if (ret)
++		res = ERR_PTR(ret);
++	return res;
++}
++
++#endif
++
+ /* get the link contents into pagecache */
+ static char *page_getlink(struct dentry * dentry, struct page **ppage)
+ {
+--- a/fs/namespace.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/namespace.c	2011-06-10 13:03:02.000000000 +0200
+@@ -29,6 +29,11 @@
+ #include <linux/log2.h>
+ #include <linux/idr.h>
+ #include <linux/fs_struct.h>
++#include <linux/vs_base.h>
++#include <linux/vs_context.h>
++#include <linux/vs_tag.h>
++#include <linux/vserver/space.h>
++#include <linux/vserver/global.h>
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+ #include "pnode.h"
+@@ -567,6 +572,7 @@ static struct vfsmount *clone_mnt(struct
+ 		mnt->mnt_root = dget(root);
+ 		mnt->mnt_mountpoint = mnt->mnt_root;
+ 		mnt->mnt_parent = mnt;
++		mnt->mnt_tag = old->mnt_tag;
+ 
+ 		if (flag & CL_SLAVE) {
+ 			list_add(&mnt->mnt_slave, &old->mnt_slave_list);
+@@ -661,6 +667,31 @@ static inline void mangle(struct seq_fil
+ 	seq_escape(m, s, " \t\n\\");
+ }
+ 
++static int mnt_is_reachable(struct vfsmount *mnt)
++{
++	struct path root;
++	struct dentry *point;
++	int ret;
++
++	if (mnt == mnt->mnt_ns->root)
++		return 1;
++
++	spin_lock(&vfsmount_lock);
++	root = current->fs->root;
++	point = root.dentry;
++
++	while ((mnt != mnt->mnt_parent) && (mnt != root.mnt)) {
++		point = mnt->mnt_mountpoint;
++		mnt = mnt->mnt_parent;
++	}
++
++	ret = (mnt == root.mnt) && is_subdir(point, root.dentry);
++
++	spin_unlock(&vfsmount_lock);
++
++	return ret;
++}
++
+ /*
+  * Simple .show_options callback for filesystems which don't want to
+  * implement more complex mount option showing.
+@@ -748,6 +779,8 @@ static int show_sb_opts(struct seq_file 
+ 		{ MS_SYNCHRONOUS, ",sync" },
+ 		{ MS_DIRSYNC, ",dirsync" },
+ 		{ MS_MANDLOCK, ",mand" },
++		{ MS_TAGGED, ",tag" },
++		{ MS_NOTAGCHECK, ",notagcheck" },
+ 		{ 0, NULL }
+ 	};
+ 	const struct proc_fs_info *fs_infop;
+@@ -795,10 +828,20 @@ static int show_vfsmnt(struct seq_file *
+ 	int err = 0;
+ 	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ 
+-	mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
+-	seq_putc(m, ' ');
+-	seq_path(m, &mnt_path, " \t\n\\");
+-	seq_putc(m, ' ');
++	if (vx_flags(VXF_HIDE_MOUNT, 0))
++		return SEQ_SKIP;
++	if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
++		return SEQ_SKIP;
++
++	if (!vx_check(0, VS_ADMIN|VS_WATCH) &&
++		mnt == current->fs->root.mnt) {
++		seq_puts(m, "/dev/root / ");
++	} else {
++		mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
++		seq_putc(m, ' ');
++		seq_path(m, &mnt_path, " \t\n\\");
++		seq_putc(m, ' ');
++	}
+ 	show_type(m, mnt->mnt_sb);
+ 	seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
+ 	err = show_sb_opts(m, mnt->mnt_sb);
+@@ -828,6 +871,11 @@ static int show_mountinfo(struct seq_fil
+ 	struct path root = p->root;
+ 	int err = 0;
+ 
++	if (vx_flags(VXF_HIDE_MOUNT, 0))
++		return SEQ_SKIP;
++	if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
++		return SEQ_SKIP;
++
+ 	seq_printf(m, "%i %i %u:%u ", mnt->mnt_id, mnt->mnt_parent->mnt_id,
+ 		   MAJOR(sb->s_dev), MINOR(sb->s_dev));
+ 	seq_dentry(m, mnt->mnt_root, " \t\n\\");
+@@ -886,17 +934,27 @@ static int show_vfsstat(struct seq_file 
+ 	struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ 	int err = 0;
+ 
+-	/* device */
+-	if (mnt->mnt_devname) {
+-		seq_puts(m, "device ");
+-		mangle(m, mnt->mnt_devname);
+-	} else
+-		seq_puts(m, "no device");
++	if (vx_flags(VXF_HIDE_MOUNT, 0))
++		return SEQ_SKIP;
++	if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
++		return SEQ_SKIP;
+ 
+-	/* mount point */
+-	seq_puts(m, " mounted on ");
+-	seq_path(m, &mnt_path, " \t\n\\");
+-	seq_putc(m, ' ');
++	if (!vx_check(0, VS_ADMIN|VS_WATCH) &&
++		mnt == current->fs->root.mnt) {
++		seq_puts(m, "device /dev/root mounted on / ");
++	} else {
++		/* device */
++		if (mnt->mnt_devname) {
++			seq_puts(m, "device ");
++			mangle(m, mnt->mnt_devname);
++		} else
++			seq_puts(m, "no device");
++
++		/* mount point */
++		seq_puts(m, " mounted on ");
++		seq_path(m, &mnt_path, " \t\n\\");
++		seq_putc(m, ' ');
++	}
+ 
+ 	/* file system type */
+ 	seq_puts(m, "with fstype ");
+@@ -1137,7 +1195,7 @@ SYSCALL_DEFINE2(umount, char __user *, n
+ 		goto dput_and_out;
+ 
+ 	retval = -EPERM;
+-	if (!capable(CAP_SYS_ADMIN))
++	if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
+ 		goto dput_and_out;
+ 
+ 	retval = do_umount(path.mnt, flags);
+@@ -1163,7 +1221,7 @@ SYSCALL_DEFINE1(oldumount, char __user *
+ 
+ static int mount_is_safe(struct path *path)
+ {
+-	if (capable(CAP_SYS_ADMIN))
++	if (vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
+ 		return 0;
+ 	return -EPERM;
+ #ifdef notyet
+@@ -1427,7 +1485,7 @@ static int do_change_type(struct path *p
+ 	int type = flag & ~MS_REC;
+ 	int err = 0;
+ 
+-	if (!capable(CAP_SYS_ADMIN))
++	if (!vx_capable(CAP_SYS_ADMIN, VXC_NAMESPACE))
+ 		return -EPERM;
+ 
+ 	if (path->dentry != path->mnt->mnt_root)
+@@ -1454,11 +1512,13 @@ static int do_change_type(struct path *p
+  * do loopback mount.
+  */
+ static int do_loopback(struct path *path, char *old_name,
+-				int recurse)
++	tag_t tag, unsigned long flags, int mnt_flags)
+ {
+ 	struct path old_path;
+ 	struct vfsmount *mnt = NULL;
+ 	int err = mount_is_safe(path);
++	int recurse = flags & MS_REC;
++
+ 	if (err)
+ 		return err;
+ 	if (!old_name || !*old_name)
+@@ -1492,6 +1552,7 @@ static int do_loopback(struct path *path
+ 		spin_unlock(&vfsmount_lock);
+ 		release_mounts(&umount_list);
+ 	}
++	mnt->mnt_flags = mnt_flags;
+ 
+ out:
+ 	up_write(&namespace_sem);
+@@ -1522,12 +1583,12 @@ static int change_mount_flags(struct vfs
+  * on it - tough luck.
+  */
+ static int do_remount(struct path *path, int flags, int mnt_flags,
+-		      void *data)
++	void *data, xid_t xid)
+ {
+ 	int err;
+ 	struct super_block *sb = path->mnt->mnt_sb;
+ 
+-	if (!capable(CAP_SYS_ADMIN))
++	if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_REMOUNT))
+ 		return -EPERM;
+ 
+ 	if (!check_mnt(path->mnt))
+@@ -1569,7 +1630,7 @@ static int do_move_mount(struct path *pa
+ 	struct path old_path, parent_path;
+ 	struct vfsmount *p;
+ 	int err = 0;
+-	if (!capable(CAP_SYS_ADMIN))
++	if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
+ 		return -EPERM;
+ 	if (!old_name || !*old_name)
+ 		return -EINVAL;
+@@ -1651,7 +1712,7 @@ static int do_new_mount(struct path *pat
+ 		return -EINVAL;
+ 
+ 	/* we need capabilities... */
+-	if (!capable(CAP_SYS_ADMIN))
++	if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
+ 		return -EPERM;
+ 
+ 	lock_kernel();
+@@ -1915,6 +1976,7 @@ long do_mount(char *dev_name, char *dir_
+ 	struct path path;
+ 	int retval = 0;
+ 	int mnt_flags = 0;
++	tag_t tag = 0;
+ 
+ 	/* Discard magic */
+ 	if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+@@ -1932,6 +1994,12 @@ long do_mount(char *dev_name, char *dir_
+ 	if (!(flags & MS_NOATIME))
+ 		mnt_flags |= MNT_RELATIME;
+ 
++	if (dx_parse_tag(data_page, &tag, 1, &mnt_flags, &flags)) {
++		/* FIXME: bind and re-mounts get the tag flag? */
++		if (flags & (MS_BIND|MS_REMOUNT))
++			flags |= MS_TAGID;
++	}
++
+ 	/* Separate the per-mountpoint flags */
+ 	if (flags & MS_NOSUID)
+ 		mnt_flags |= MNT_NOSUID;
+@@ -1948,6 +2016,8 @@ long do_mount(char *dev_name, char *dir_
+ 	if (flags & MS_RDONLY)
+ 		mnt_flags |= MNT_READONLY;
+ 
++	if (!capable(CAP_SYS_ADMIN))
++		mnt_flags |= MNT_NODEV;
+ 	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
+ 		   MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
+ 		   MS_STRICTATIME);
+@@ -1964,9 +2034,9 @@ long do_mount(char *dev_name, char *dir_
+ 
+ 	if (flags & MS_REMOUNT)
+ 		retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
+-				    data_page);
++				    data_page, tag);
+ 	else if (flags & MS_BIND)
+-		retval = do_loopback(&path, dev_name, flags & MS_REC);
++		retval = do_loopback(&path, dev_name, tag, flags, mnt_flags);
+ 	else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+ 		retval = do_change_type(&path, flags);
+ 	else if (flags & MS_MOVE)
+@@ -2045,6 +2115,7 @@ static struct mnt_namespace *dup_mnt_ns(
+ 		q = next_mnt(q, new_ns->root);
+ 	}
+ 	up_write(&namespace_sem);
++	atomic_inc(&vs_global_mnt_ns);
+ 
+ 	if (rootmnt)
+ 		mntput(rootmnt);
+@@ -2189,9 +2260,10 @@ SYSCALL_DEFINE2(pivot_root, const char _
+ 	down_write(&namespace_sem);
+ 	mutex_lock(&old.dentry->d_inode->i_mutex);
+ 	error = -EINVAL;
+-	if (IS_MNT_SHARED(old.mnt) ||
++	if ((IS_MNT_SHARED(old.mnt) ||
+ 		IS_MNT_SHARED(new.mnt->mnt_parent) ||
+-		IS_MNT_SHARED(root.mnt->mnt_parent))
++		IS_MNT_SHARED(root.mnt->mnt_parent)) &&
++		!vx_flags(VXF_STATE_SETUP, 0))
+ 		goto out2;
+ 	if (!check_mnt(root.mnt))
+ 		goto out2;
+@@ -2327,6 +2399,7 @@ void put_mnt_ns(struct mnt_namespace *ns
+ 	spin_unlock(&vfsmount_lock);
+ 	up_write(&namespace_sem);
+ 	release_mounts(&umount_list);
++	atomic_dec(&vs_global_mnt_ns);
+ 	kfree(ns);
+ }
+ EXPORT_SYMBOL(put_mnt_ns);
+--- a/fs/nfs/client.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/nfs/client.c	2011-06-10 13:03:02.000000000 +0200
+@@ -738,6 +738,9 @@ static int nfs_init_server_rpcclient(str
+ 	if (server->flags & NFS_MOUNT_SOFT)
+ 		server->client->cl_softrtry = 1;
+ 
++	server->client->cl_tag = 0;
++	if (server->flags & NFS_MOUNT_TAGGED)
++		server->client->cl_tag = 1;
+ 	return 0;
+ }
+ 
+@@ -909,6 +912,10 @@ static void nfs_server_set_fsinfo(struct
+ 		server->acdirmin = server->acdirmax = 0;
+ 	}
+ 
++	/* FIXME: needs fsinfo
++	if (server->flags & NFS_MOUNT_TAGGED)
++		sb->s_flags |= MS_TAGGED;	*/
++
+ 	server->maxfilesize = fsinfo->maxfilesize;
+ 
+ 	/* We're airborne Set socket buffersize */
+--- a/fs/nfs/dir.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/nfs/dir.c	2011-06-10 13:03:02.000000000 +0200
+@@ -33,6 +33,7 @@
+ #include <linux/namei.h>
+ #include <linux/mount.h>
+ #include <linux/sched.h>
++#include <linux/vs_tag.h>
+ 
+ #include "nfs4_fs.h"
+ #include "delegation.h"
+@@ -951,6 +952,7 @@ static struct dentry *nfs_lookup(struct 
+ 	if (IS_ERR(res))
+ 		goto out_unblock_sillyrename;
+ 
++	dx_propagate_tag(nd, inode);
+ no_entry:
+ 	res = d_materialise_unique(dentry, inode);
+ 	if (res != NULL) {
+--- a/fs/nfs/inode.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/nfs/inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -36,6 +36,7 @@
+ #include <linux/vfs.h>
+ #include <linux/inet.h>
+ #include <linux/nfs_xdr.h>
++#include <linux/vs_tag.h>
+ 
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -279,6 +280,8 @@ nfs_fhget(struct super_block *sb, struct
+ 	if (inode->i_state & I_NEW) {
+ 		struct nfs_inode *nfsi = NFS_I(inode);
+ 		unsigned long now = jiffies;
++		uid_t uid;
++		gid_t gid;
+ 
+ 		/* We set i_ino for the few things that still rely on it,
+ 		 * such as stat(2) */
+@@ -327,8 +330,8 @@ nfs_fhget(struct super_block *sb, struct
+ 		nfsi->change_attr = 0;
+ 		inode->i_size = 0;
+ 		inode->i_nlink = 0;
+-		inode->i_uid = -2;
+-		inode->i_gid = -2;
++		uid = -2;
++		gid = -2;
+ 		inode->i_blocks = 0;
+ 		memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
+ 
+@@ -365,13 +368,13 @@ nfs_fhget(struct super_block *sb, struct
+ 		else if (nfs_server_capable(inode, NFS_CAP_NLINK))
+ 			nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+ 		if (fattr->valid & NFS_ATTR_FATTR_OWNER)
+-			inode->i_uid = fattr->uid;
++			uid = fattr->uid;
+ 		else if (nfs_server_capable(inode, NFS_CAP_OWNER))
+ 			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+ 				| NFS_INO_INVALID_ACL;
+ 		if (fattr->valid & NFS_ATTR_FATTR_GROUP)
+-			inode->i_gid = fattr->gid;
++			gid = fattr->gid;
+ 		else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
+ 			nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+@@ -384,6 +387,11 @@ nfs_fhget(struct super_block *sb, struct
+ 			 */
+ 			inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
+ 		}
++		inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++		inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++		inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, 0);
++				/* maybe fattr->xid someday */
++
+ 		nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ 		nfsi->attrtimeo_timestamp = now;
+ 		nfsi->access_cache = RB_ROOT;
+@@ -496,6 +504,8 @@ void nfs_setattr_update_inode(struct ino
+ 			inode->i_uid = attr->ia_uid;
+ 		if ((attr->ia_valid & ATTR_GID) != 0)
+ 			inode->i_gid = attr->ia_gid;
++		if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
++			inode->i_tag = attr->ia_tag;
+ 		NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ 		spin_unlock(&inode->i_lock);
+ 	}
+@@ -914,6 +924,9 @@ static int nfs_check_inode_attributes(st
+ 	struct nfs_inode *nfsi = NFS_I(inode);
+ 	loff_t cur_size, new_isize;
+ 	unsigned long invalid = 0;
++	uid_t uid;
++	gid_t gid;
++	tag_t tag;
+ 
+ 
+ 	/* Has the inode gone and changed behind our back? */
+@@ -937,13 +950,18 @@ static int nfs_check_inode_attributes(st
+ 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ 	}
+ 
++	uid = INOTAG_UID(DX_TAG(inode), fattr->uid, fattr->gid);
++	gid = INOTAG_GID(DX_TAG(inode), fattr->uid, fattr->gid);
++	tag = INOTAG_TAG(DX_TAG(inode), fattr->uid, fattr->gid, 0);
++
+ 	/* Have any file permissions changed? */
+ 	if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
+ 		invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+-	if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && inode->i_uid != fattr->uid)
++	if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && uid != fattr->uid)
+ 		invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+-	if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && inode->i_gid != fattr->gid)
++	if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && gid != fattr->gid)
+ 		invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
++		/* maybe check for tag too? */
+ 
+ 	/* Has the link count changed? */
+ 	if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
+@@ -1158,6 +1176,9 @@ static int nfs_update_inode(struct inode
+ 	unsigned long invalid = 0;
+ 	unsigned long now = jiffies;
+ 	unsigned long save_cache_validity;
++	uid_t uid;
++	gid_t gid;
++	tag_t tag;
+ 
+ 	dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
+ 			__func__, inode->i_sb->s_id, inode->i_ino,
+@@ -1260,6 +1281,9 @@ static int nfs_update_inode(struct inode
+ 				| NFS_INO_REVAL_PAGECACHE
+ 				| NFS_INO_REVAL_FORCED);
+ 
++	uid = INOTAG_UID(DX_TAG(inode), fattr->uid, fattr->gid);
++	gid = INOTAG_GID(DX_TAG(inode), fattr->uid, fattr->gid);
++	tag = INOTAG_TAG(DX_TAG(inode), fattr->uid, fattr->gid, 0);
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_ATIME)
+ 		memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
+@@ -1279,9 +1303,9 @@ static int nfs_update_inode(struct inode
+ 				| NFS_INO_REVAL_FORCED);
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
+-		if (inode->i_uid != fattr->uid) {
++		if (uid != fattr->uid) {
+ 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+-			inode->i_uid = fattr->uid;
++			uid = fattr->uid;
+ 		}
+ 	} else if (server->caps & NFS_CAP_OWNER)
+ 		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+@@ -1290,9 +1314,9 @@ static int nfs_update_inode(struct inode
+ 				| NFS_INO_REVAL_FORCED);
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
+-		if (inode->i_gid != fattr->gid) {
++		if (gid != fattr->gid) {
+ 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+-			inode->i_gid = fattr->gid;
++			gid = fattr->gid;
+ 		}
+ 	} else if (server->caps & NFS_CAP_OWNER_GROUP)
+ 		invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+@@ -1300,6 +1324,10 @@ static int nfs_update_inode(struct inode
+ 				| NFS_INO_INVALID_ACL
+ 				| NFS_INO_REVAL_FORCED);
+ 
++	inode->i_uid = uid;
++	inode->i_gid = gid;
++	inode->i_tag = tag;
++
+ 	if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
+ 		if (inode->i_nlink != fattr->nlink) {
+ 			invalid |= NFS_INO_INVALID_ATTR;
+--- a/fs/nfs/nfs3xdr.c	2009-12-03 20:02:52.000000000 +0100
++++ a/fs/nfs/nfs3xdr.c	2011-06-10 13:03:02.000000000 +0200
+@@ -21,6 +21,7 @@
+ #include <linux/nfs3.h>
+ #include <linux/nfs_fs.h>
+ #include <linux/nfsacl.h>
++#include <linux/vs_tag.h>
+ #include "internal.h"
+ 
+ #define NFSDBG_FACILITY		NFSDBG_XDR
+@@ -176,7 +177,7 @@ xdr_decode_fattr(__be32 *p, struct nfs_f
+ }
+ 
+ static inline __be32 *
+-xdr_encode_sattr(__be32 *p, struct iattr *attr)
++xdr_encode_sattr(__be32 *p, struct iattr *attr, int tag)
+ {
+ 	if (attr->ia_valid & ATTR_MODE) {
+ 		*p++ = xdr_one;
+@@ -184,15 +185,17 @@ xdr_encode_sattr(__be32 *p, struct iattr
+ 	} else {
+ 		*p++ = xdr_zero;
+ 	}
+-	if (attr->ia_valid & ATTR_UID) {
++	if (attr->ia_valid & ATTR_UID ||
++		(tag && (attr->ia_valid & ATTR_TAG))) {
+ 		*p++ = xdr_one;
+-		*p++ = htonl(attr->ia_uid);
++		*p++ = htonl(TAGINO_UID(tag, attr->ia_uid, attr->ia_tag));
+ 	} else {
+ 		*p++ = xdr_zero;
+ 	}
+-	if (attr->ia_valid & ATTR_GID) {
++	if (attr->ia_valid & ATTR_GID ||
++		(tag && (attr->ia_valid & ATTR_TAG))) {
+ 		*p++ = xdr_one;
+-		*p++ = htonl(attr->ia_gid);
++		*p++ = htonl(TAGINO_GID(tag, attr->ia_gid, attr->ia_tag));
+ 	} else {
+ 		*p++ = xdr_zero;
+ 	}
+@@ -279,7 +282,8 @@ static int
+ nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args)
+ {
+ 	p = xdr_encode_fhandle(p, args->fh);
+-	p = xdr_encode_sattr(p, args->sattr);
++	p = xdr_encode_sattr(p, args->sattr,
++		req->rq_task->tk_client->cl_tag);
+ 	*p++ = htonl(args->guard);
+ 	if (args->guard)
+ 		p = xdr_encode_time3(p, &args->guardtime);
+@@ -384,7 +388,8 @@ nfs3_xdr_createargs(struct rpc_rqst *req
+ 		*p++ = args->verifier[0];
+ 		*p++ = args->verifier[1];
+ 	} else
+-		p = xdr_encode_sattr(p, args->sattr);
++		p = xdr_encode_sattr(p, args->sattr,
++			req->rq_task->tk_client->cl_tag);
+ 
+ 	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ 	return 0;
+@@ -398,7 +403,8 @@ nfs3_xdr_mkdirargs(struct rpc_rqst *req,
+ {
+ 	p = xdr_encode_fhandle(p, args->fh);
+ 	p = xdr_encode_array(p, args->name, args->len);
+-	p = xdr_encode_sattr(p, args->sattr);
++	p = xdr_encode_sattr(p, args->sattr,
++		req->rq_task->tk_client->cl_tag);
+ 	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ 	return 0;
+ }
+@@ -411,7 +417,8 @@ nfs3_xdr_symlinkargs(struct rpc_rqst *re
+ {
+ 	p = xdr_encode_fhandle(p, args->fromfh);
+ 	p = xdr_encode_array(p, args->fromname, args->fromlen);
+-	p = xdr_encode_sattr(p, args->sattr);
++	p = xdr_encode_sattr(p, args->sattr,
++		req->rq_task->tk_client->cl_tag);
+ 	*p++ = htonl(args->pathlen);
+ 	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ 
+@@ -429,7 +436,8 @@ nfs3_xdr_mknodargs(struct rpc_rqst *req,
+ 	p = xdr_encode_fhandle(p, args->fh);
+ 	p = xdr_encode_array(p, args->name, args->len);
+ 	*p++ = htonl(args->type);
+-	p = xdr_encode_sattr(p, args->sattr);
++	p = xdr_encode_sattr(p, args->sattr,
++		req->rq_task->tk_client->cl_tag);
+ 	if (args->type == NF3CHR || args->type == NF3BLK) {
+ 		*p++ = htonl(MAJOR(args->rdev));
+ 		*p++ = htonl(MINOR(args->rdev));
+--- a/fs/nfs/nfsroot.c	2009-09-10 15:26:23.000000000 +0200
++++ a/fs/nfs/nfsroot.c	2011-06-10 13:03:02.000000000 +0200
+@@ -122,12 +122,12 @@ static int mount_port __initdata = 0;		/
+ enum {
+ 	/* Options that take integer arguments */
+ 	Opt_port, Opt_rsize, Opt_wsize, Opt_timeo, Opt_retrans, Opt_acregmin,
+-	Opt_acregmax, Opt_acdirmin, Opt_acdirmax,
++	Opt_acregmax, Opt_acdirmin, Opt_acdirmax, Opt_tagid,
+ 	/* Options that take no arguments */
+ 	Opt_soft, Opt_hard, Opt_intr,
+ 	Opt_nointr, Opt_posix, Opt_noposix, Opt_cto, Opt_nocto, Opt_ac, 
+ 	Opt_noac, Opt_lock, Opt_nolock, Opt_v2, Opt_v3, Opt_udp, Opt_tcp,
+-	Opt_acl, Opt_noacl,
++	Opt_acl, Opt_noacl, Opt_tag, Opt_notag,
+ 	/* Error token */
+ 	Opt_err
+ };
+@@ -164,6 +164,9 @@ static const match_table_t tokens __init
+ 	{Opt_tcp, "tcp"},
+ 	{Opt_acl, "acl"},
+ 	{Opt_noacl, "noacl"},
++	{Opt_tag, "tag"},
++	{Opt_notag, "notag"},
++	{Opt_tagid, "tagid=%u"},
+ 	{Opt_err, NULL}
+ 	
+ };
+@@ -275,6 +278,20 @@ static int __init root_nfs_parse(char *n
+ 			case Opt_noacl:
+ 				nfs_data.flags |= NFS_MOUNT_NOACL;
+ 				break;
++#ifndef CONFIG_TAGGING_NONE
++			case Opt_tag:
++				nfs_data.flags |= NFS_MOUNT_TAGGED;
++				break;
++			case Opt_notag:
++				nfs_data.flags &= ~NFS_MOUNT_TAGGED;
++				break;
++#endif
++#ifdef CONFIG_PROPAGATE
++			case Opt_tagid:
++				/* use args[0] */
++				nfs_data.flags |= NFS_MOUNT_TAGGED;
++				break;
++#endif
+ 			default:
+ 				printk(KERN_WARNING "Root-NFS: unknown "
+ 					"option: %s\n", p);
+--- a/fs/nfs/super.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/nfs/super.c	2011-06-10 13:03:02.000000000 +0200
+@@ -53,6 +53,7 @@
+ #include <linux/nfs_xdr.h>
+ #include <linux/magic.h>
+ #include <linux/parser.h>
++#include <linux/vs_tag.h>
+ 
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -570,6 +571,7 @@ static void nfs_show_mount_options(struc
+ 		{ NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
+ 		{ NFS_MOUNT_UNSHARED, ",nosharecache", "" },
+ 		{ NFS_MOUNT_NORESVPORT, ",noresvport", "" },
++		{ NFS_MOUNT_TAGGED, ",tag", "" },
+ 		{ 0, NULL, NULL }
+ 	};
+ 	const struct proc_nfs_info *nfs_infop;
+--- a/fs/nfsd/auth.c	2009-12-03 20:02:52.000000000 +0100
++++ a/fs/nfsd/auth.c	2011-06-10 13:03:02.000000000 +0200
+@@ -10,6 +10,7 @@
+ #include <linux/sunrpc/svcauth.h>
+ #include <linux/nfsd/nfsd.h>
+ #include <linux/nfsd/export.h>
++#include <linux/vs_tag.h>
+ #include "auth.h"
+ 
+ int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
+@@ -44,6 +45,9 @@ int nfsd_setuser(struct svc_rqst *rqstp,
+ 
+ 	new->fsuid = rqstp->rq_cred.cr_uid;
+ 	new->fsgid = rqstp->rq_cred.cr_gid;
++	/* FIXME: this desperately needs a tag :)
++	new->xid = (xid_t)INOTAG_TAG(DX_TAG_NFSD, cred.cr_uid, cred.cr_gid, 0);
++			*/
+ 
+ 	rqgi = rqstp->rq_cred.cr_group_info;
+ 
+--- a/fs/nfsd/nfs3xdr.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/nfsd/nfs3xdr.c	2011-06-10 13:03:02.000000000 +0200
+@@ -21,6 +21,7 @@
+ #include <linux/sunrpc/svc.h>
+ #include <linux/nfsd/nfsd.h>
+ #include <linux/nfsd/xdr3.h>
++#include <linux/vs_tag.h>
+ #include "auth.h"
+ 
+ #define NFSDDBG_FACILITY		NFSDDBG_XDR
+@@ -108,6 +109,8 @@ static __be32 *
+ decode_sattr3(__be32 *p, struct iattr *iap)
+ {
+ 	u32	tmp;
++	uid_t	uid = 0;
++	gid_t	gid = 0;
+ 
+ 	iap->ia_valid = 0;
+ 
+@@ -117,12 +120,15 @@ decode_sattr3(__be32 *p, struct iattr *i
+ 	}
+ 	if (*p++) {
+ 		iap->ia_valid |= ATTR_UID;
+-		iap->ia_uid = ntohl(*p++);
++		uid = ntohl(*p++);
+ 	}
+ 	if (*p++) {
+ 		iap->ia_valid |= ATTR_GID;
+-		iap->ia_gid = ntohl(*p++);
++		gid = ntohl(*p++);
+ 	}
++	iap->ia_uid = INOTAG_UID(DX_TAG_NFSD, uid, gid);
++	iap->ia_gid = INOTAG_GID(DX_TAG_NFSD, uid, gid);
++	iap->ia_tag = INOTAG_TAG(DX_TAG_NFSD, uid, gid, 0);
+ 	if (*p++) {
+ 		u64	newsize;
+ 
+@@ -178,8 +184,12 @@ encode_fattr3(struct svc_rqst *rqstp, __
+ 	*p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
+ 	*p++ = htonl((u32) stat->mode);
+ 	*p++ = htonl((u32) stat->nlink);
+-	*p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
+-	*p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
++	*p++ = htonl((u32) nfsd_ruid(rqstp,
++		TAGINO_UID(0 /* FIXME: DX_TAG(dentry->d_inode) */,
++		stat->uid, stat->tag)));
++	*p++ = htonl((u32) nfsd_rgid(rqstp,
++		TAGINO_GID(0 /* FIXME: DX_TAG(dentry->d_inode) */,
++		stat->gid, stat->tag)));
+ 	if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) {
+ 		p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
+ 	} else {
+--- a/fs/nfsd/nfs4xdr.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/nfsd/nfs4xdr.c	2011-06-10 13:03:02.000000000 +0200
+@@ -57,6 +57,7 @@
+ #include <linux/nfs4_acl.h>
+ #include <linux/sunrpc/gss_api.h>
+ #include <linux/sunrpc/svcauth_gss.h>
++#include <linux/vs_tag.h>
+ 
+ #define NFSDDBG_FACILITY		NFSDDBG_XDR
+ 
+@@ -2047,14 +2048,18 @@ out_acl:
+ 		WRITE32(stat.nlink);
+ 	}
+ 	if (bmval1 & FATTR4_WORD1_OWNER) {
+-		status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
++		status = nfsd4_encode_user(rqstp,
++			TAGINO_UID(DX_TAG(dentry->d_inode),
++			stat.uid, stat.tag), &p, &buflen);
+ 		if (status == nfserr_resource)
+ 			goto out_resource;
+ 		if (status)
+ 			goto out;
+ 	}
+ 	if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
+-		status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
++		status = nfsd4_encode_group(rqstp,
++			TAGINO_GID(DX_TAG(dentry->d_inode),
++			stat.gid, stat.tag), &p, &buflen);
+ 		if (status == nfserr_resource)
+ 			goto out_resource;
+ 		if (status)
+--- a/fs/nfsd/nfsxdr.c	2008-12-25 00:26:37.000000000 +0100
++++ a/fs/nfsd/nfsxdr.c	2011-06-10 13:03:02.000000000 +0200
+@@ -15,6 +15,7 @@
+ #include <linux/nfsd/nfsd.h>
+ #include <linux/nfsd/xdr.h>
+ #include <linux/mm.h>
++#include <linux/vs_tag.h>
+ #include "auth.h"
+ 
+ #define NFSDDBG_FACILITY		NFSDDBG_XDR
+@@ -98,6 +99,8 @@ static __be32 *
+ decode_sattr(__be32 *p, struct iattr *iap)
+ {
+ 	u32	tmp, tmp1;
++	uid_t	uid = 0;
++	gid_t	gid = 0;
+ 
+ 	iap->ia_valid = 0;
+ 
+@@ -111,12 +114,15 @@ decode_sattr(__be32 *p, struct iattr *ia
+ 	}
+ 	if ((tmp = ntohl(*p++)) != (u32)-1) {
+ 		iap->ia_valid |= ATTR_UID;
+-		iap->ia_uid = tmp;
++		uid = tmp;
+ 	}
+ 	if ((tmp = ntohl(*p++)) != (u32)-1) {
+ 		iap->ia_valid |= ATTR_GID;
+-		iap->ia_gid = tmp;
++		gid = tmp;
+ 	}
++	iap->ia_uid = INOTAG_UID(DX_TAG_NFSD, uid, gid);
++	iap->ia_gid = INOTAG_GID(DX_TAG_NFSD, uid, gid);
++	iap->ia_tag = INOTAG_TAG(DX_TAG_NFSD, uid, gid, 0);
+ 	if ((tmp = ntohl(*p++)) != (u32)-1) {
+ 		iap->ia_valid |= ATTR_SIZE;
+ 		iap->ia_size = tmp;
+@@ -161,8 +167,10 @@ encode_fattr(struct svc_rqst *rqstp, __b
+ 	*p++ = htonl(nfs_ftypes[type >> 12]);
+ 	*p++ = htonl((u32) stat->mode);
+ 	*p++ = htonl((u32) stat->nlink);
+-	*p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
+-	*p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
++	*p++ = htonl((u32) nfsd_ruid(rqstp,
++		TAGINO_UID(DX_TAG(dentry->d_inode), stat->uid, stat->tag)));
++	*p++ = htonl((u32) nfsd_rgid(rqstp,
++		TAGINO_GID(DX_TAG(dentry->d_inode), stat->gid, stat->tag)));
+ 
+ 	if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) {
+ 		*p++ = htonl(NFS_MAXPATHLEN);
+--- a/fs/ocfs2/dlm/dlmfs.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/ocfs2/dlm/dlmfs.c	2011-06-10 13:03:02.000000000 +0200
+@@ -43,6 +43,7 @@
+ #include <linux/init.h>
+ #include <linux/string.h>
+ #include <linux/backing-dev.h>
++#include <linux/vs_tag.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -342,6 +343,7 @@ static struct inode *dlmfs_get_root_inod
+ 		inode->i_mode = mode;
+ 		inode->i_uid = current_fsuid();
+ 		inode->i_gid = current_fsgid();
++		inode->i_tag = dx_current_fstag(sb);
+ 		inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
+ 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ 		inc_nlink(inode);
+@@ -367,6 +369,7 @@ static struct inode *dlmfs_get_inode(str
+ 	inode->i_mode = mode;
+ 	inode->i_uid = current_fsuid();
+ 	inode->i_gid = current_fsgid();
++	inode->i_tag = dx_current_fstag(sb);
+ 	inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
+ 	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ 
+--- a/fs/ocfs2/dlmglue.c	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/ocfs2/dlmglue.c	2011-06-10 13:03:02.000000000 +0200
+@@ -1991,6 +1991,7 @@ static void __ocfs2_stuff_meta_lvb(struc
+ 	lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
+ 	lvb->lvb_iuid      = cpu_to_be32(inode->i_uid);
+ 	lvb->lvb_igid      = cpu_to_be32(inode->i_gid);
++	lvb->lvb_itag      = cpu_to_be16(inode->i_tag);
+ 	lvb->lvb_imode     = cpu_to_be16(inode->i_mode);
+ 	lvb->lvb_inlink    = cpu_to_be16(inode->i_nlink);
+ 	lvb->lvb_iatime_packed  =
+@@ -2045,6 +2046,7 @@ static void ocfs2_refresh_inode_from_lvb
+ 
+ 	inode->i_uid     = be32_to_cpu(lvb->lvb_iuid);
+ 	inode->i_gid     = be32_to_cpu(lvb->lvb_igid);
++	inode->i_tag     = be16_to_cpu(lvb->lvb_itag);
+ 	inode->i_mode    = be16_to_cpu(lvb->lvb_imode);
+ 	inode->i_nlink   = be16_to_cpu(lvb->lvb_inlink);
+ 	ocfs2_unpack_timespec(&inode->i_atime,
+--- a/fs/ocfs2/dlmglue.h	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/ocfs2/dlmglue.h	2011-06-10 13:03:02.000000000 +0200
+@@ -46,7 +46,8 @@ struct ocfs2_meta_lvb {
+ 	__be16       lvb_inlink;
+ 	__be32       lvb_iattr;
+ 	__be32       lvb_igeneration;
+-	__be32       lvb_reserved2;
++	__be16       lvb_itag;
++	__be16       lvb_reserved2;
+ };
+ 
+ #define OCFS2_QINFO_LVB_VERSION 1
+--- a/fs/ocfs2/file.c	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/ocfs2/file.c	2011-06-10 13:03:02.000000000 +0200
+@@ -960,13 +960,15 @@ int ocfs2_setattr(struct dentry *dentry,
+ 		mlog(0, "uid change: %d\n", attr->ia_uid);
+ 	if (attr->ia_valid & ATTR_GID)
+ 		mlog(0, "gid change: %d\n", attr->ia_gid);
++	if (attr->ia_valid & ATTR_TAG)
++		mlog(0, "tag change: %d\n", attr->ia_tag);
+ 	if (attr->ia_valid & ATTR_SIZE)
+ 		mlog(0, "size change...\n");
+ 	if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
+ 		mlog(0, "time change...\n");
+ 
+ #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
+-			   | ATTR_GID | ATTR_UID | ATTR_MODE)
++			   | ATTR_GID | ATTR_UID | ATTR_TAG | ATTR_MODE)
+ 	if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
+ 		mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
+ 		return 0;
+--- a/fs/ocfs2/inode.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/ocfs2/inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -29,6 +29,7 @@
+ #include <linux/highmem.h>
+ #include <linux/pagemap.h>
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
+ 
+ #include <asm/byteorder.h>
+ 
+@@ -79,11 +80,13 @@ void ocfs2_set_inode_flags(struct inode 
+ {
+ 	unsigned int flags = OCFS2_I(inode)->ip_attr;
+ 
+-	inode->i_flags &= ~(S_IMMUTABLE |
++	inode->i_flags &= ~(S_IMMUTABLE | S_IXUNLINK |
+ 		S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+ 
+ 	if (flags & OCFS2_IMMUTABLE_FL)
+ 		inode->i_flags |= S_IMMUTABLE;
++	if (flags & OCFS2_IXUNLINK_FL)
++		inode->i_flags |= S_IXUNLINK;
+ 
+ 	if (flags & OCFS2_SYNC_FL)
+ 		inode->i_flags |= S_SYNC;
+@@ -93,25 +96,44 @@ void ocfs2_set_inode_flags(struct inode 
+ 		inode->i_flags |= S_NOATIME;
+ 	if (flags & OCFS2_DIRSYNC_FL)
+ 		inode->i_flags |= S_DIRSYNC;
++
++	inode->i_vflags &= ~(V_BARRIER | V_COW);
++
++	if (flags & OCFS2_BARRIER_FL)
++		inode->i_vflags |= V_BARRIER;
++	if (flags & OCFS2_COW_FL)
++		inode->i_vflags |= V_COW;
+ }
+ 
+ /* Propagate flags from i_flags to OCFS2_I(inode)->ip_attr */
+ void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi)
+ {
+ 	unsigned int flags = oi->vfs_inode.i_flags;
++	unsigned int vflags = oi->vfs_inode.i_vflags;
++
++	oi->ip_attr &= ~(OCFS2_SYNC_FL | OCFS2_APPEND_FL |
++			OCFS2_IMMUTABLE_FL | OCFS2_IXUNLINK_FL |
++			OCFS2_NOATIME_FL | OCFS2_DIRSYNC_FL |
++			OCFS2_BARRIER_FL | OCFS2_COW_FL);
++
++	if (flags & S_IMMUTABLE)
++		oi->ip_attr |= OCFS2_IMMUTABLE_FL;
++	if (flags & S_IXUNLINK)
++		oi->ip_attr |= OCFS2_IXUNLINK_FL;
+ 
+-	oi->ip_attr &= ~(OCFS2_SYNC_FL|OCFS2_APPEND_FL|
+-			OCFS2_IMMUTABLE_FL|OCFS2_NOATIME_FL|OCFS2_DIRSYNC_FL);
+ 	if (flags & S_SYNC)
+ 		oi->ip_attr |= OCFS2_SYNC_FL;
+ 	if (flags & S_APPEND)
+ 		oi->ip_attr |= OCFS2_APPEND_FL;
+-	if (flags & S_IMMUTABLE)
+-		oi->ip_attr |= OCFS2_IMMUTABLE_FL;
+ 	if (flags & S_NOATIME)
+ 		oi->ip_attr |= OCFS2_NOATIME_FL;
+ 	if (flags & S_DIRSYNC)
+ 		oi->ip_attr |= OCFS2_DIRSYNC_FL;
++
++	if (vflags & V_BARRIER)
++		oi->ip_attr |= OCFS2_BARRIER_FL;
++	if (vflags & V_COW)
++		oi->ip_attr |= OCFS2_COW_FL;
+ }
+ 
+ struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno)
+@@ -246,6 +268,8 @@ void ocfs2_populate_inode(struct inode *
+ 	struct super_block *sb;
+ 	struct ocfs2_super *osb;
+ 	int use_plocks = 1;
++	uid_t uid;
++	gid_t gid;
+ 
+ 	mlog_entry("(0x%p, size:%llu)\n", inode,
+ 		   (unsigned long long)le64_to_cpu(fe->i_size));
+@@ -277,8 +301,12 @@ void ocfs2_populate_inode(struct inode *
+ 	inode->i_generation = le32_to_cpu(fe->i_generation);
+ 	inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
+ 	inode->i_mode = le16_to_cpu(fe->i_mode);
+-	inode->i_uid = le32_to_cpu(fe->i_uid);
+-	inode->i_gid = le32_to_cpu(fe->i_gid);
++	uid = le32_to_cpu(fe->i_uid);
++	gid = le32_to_cpu(fe->i_gid);
++	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
++		/* le16_to_cpu(raw_inode->i_raw_tag)i */ 0);
+ 
+ 	/* Fast symlinks will have i_size but no allocated clusters. */
+ 	if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
+--- a/fs/ocfs2/inode.h	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/ocfs2/inode.h	2011-06-10 13:03:02.000000000 +0200
+@@ -150,6 +150,7 @@ struct buffer_head *ocfs2_bread(struct i
+ 
+ void ocfs2_set_inode_flags(struct inode *inode);
+ void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi);
++int ocfs2_sync_flags(struct inode *inode, int, int);
+ 
+ static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode)
+ {
+--- a/fs/ocfs2/ioctl.c	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/ocfs2/ioctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -42,7 +42,41 @@ static int ocfs2_get_inode_attr(struct i
+ 	return status;
+ }
+ 
+-static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
++int ocfs2_sync_flags(struct inode *inode, int flags, int vflags)
++{
++	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++	struct buffer_head *bh = NULL;
++	handle_t *handle = NULL;
++	int status;
++
++	status = ocfs2_inode_lock(inode, &bh, 1);
++	if (status < 0) {
++		mlog_errno(status);
++		return status;
++	}
++	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
++	if (IS_ERR(handle)) {
++		status = PTR_ERR(handle);
++		mlog_errno(status);
++		goto bail_unlock;
++	}
++
++	inode->i_flags = flags;
++	inode->i_vflags = vflags;
++	ocfs2_get_inode_flags(OCFS2_I(inode));
++
++	status = ocfs2_mark_inode_dirty(handle, inode, bh);
++	if (status < 0)
++		mlog_errno(status);
++
++	ocfs2_commit_trans(osb, handle);
++bail_unlock:
++	ocfs2_inode_unlock(inode, 1);
++	brelse(bh);
++	return status;
++}
++
++int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
+ 				unsigned mask)
+ {
+ 	struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
+@@ -67,6 +101,11 @@ static int ocfs2_set_inode_attr(struct i
+ 	if (!S_ISDIR(inode->i_mode))
+ 		flags &= ~OCFS2_DIRSYNC_FL;
+ 
++	if (IS_BARRIER(inode)) {
++		vxwprintk_task(1, "messing with the barrier.");
++		goto bail_unlock;
++	}
++
+ 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+ 	if (IS_ERR(handle)) {
+ 		status = PTR_ERR(handle);
+@@ -108,6 +147,7 @@ bail:
+ 	return status;
+ }
+ 
++
+ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ {
+ 	struct inode *inode = filp->f_path.dentry->d_inode;
+--- a/fs/ocfs2/namei.c	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/ocfs2/namei.c	2011-06-10 13:03:02.000000000 +0200
+@@ -41,6 +41,7 @@
+ #include <linux/slab.h>
+ #include <linux/highmem.h>
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
+ 
+ #define MLOG_MASK_PREFIX ML_NAMEI
+ #include <cluster/masklog.h>
+@@ -481,6 +482,7 @@ static int ocfs2_mknod_locked(struct ocf
+ 	u64 fe_blkno = 0;
+ 	u16 suballoc_bit;
+ 	u16 feat;
++	tag_t tag;
+ 
+ 	*new_fe_bh = NULL;
+ 
+@@ -524,8 +526,11 @@ static int ocfs2_mknod_locked(struct ocf
+ 	fe->i_blkno = cpu_to_le64(fe_blkno);
+ 	fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
+ 	fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot);
+-	fe->i_uid = cpu_to_le32(inode->i_uid);
+-	fe->i_gid = cpu_to_le32(inode->i_gid);
++
++	tag = dx_current_fstag(osb->sb);
++	fe->i_uid = cpu_to_le32(TAGINO_UID(DX_TAG(inode), inode->i_uid, tag));
++	fe->i_gid = cpu_to_le32(TAGINO_GID(DX_TAG(inode), inode->i_gid, tag));
++	inode->i_tag = tag;
+ 	fe->i_mode = cpu_to_le16(inode->i_mode);
+ 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
+ 		fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev));
+--- a/fs/ocfs2/ocfs2.h	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/ocfs2/ocfs2.h	2011-06-10 13:03:02.000000000 +0200
+@@ -248,6 +248,7 @@ enum ocfs2_mount_options
+ 	OCFS2_MOUNT_POSIX_ACL = 1 << 8,	/* POSIX access control lists */
+ 	OCFS2_MOUNT_USRQUOTA = 1 << 9, /* We support user quotas */
+ 	OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */
++	OCFS2_MOUNT_TAGGED = 1 << 11, /* use tagging */
+ };
+ 
+ #define OCFS2_OSB_SOFT_RO			0x0001
+--- a/fs/ocfs2/ocfs2_fs.h	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/ocfs2/ocfs2_fs.h	2011-06-10 13:03:02.000000000 +0200
+@@ -231,18 +231,23 @@
+ #define OCFS2_HAS_REFCOUNT_FL   (0x0010)
+ 
+ /* Inode attributes, keep in sync with EXT2 */
+-#define OCFS2_SECRM_FL		(0x00000001)	/* Secure deletion */
+-#define OCFS2_UNRM_FL		(0x00000002)	/* Undelete */
+-#define OCFS2_COMPR_FL		(0x00000004)	/* Compress file */
+-#define OCFS2_SYNC_FL		(0x00000008)	/* Synchronous updates */
+-#define OCFS2_IMMUTABLE_FL	(0x00000010)	/* Immutable file */
+-#define OCFS2_APPEND_FL		(0x00000020)	/* writes to file may only append */
+-#define OCFS2_NODUMP_FL		(0x00000040)	/* do not dump file */
+-#define OCFS2_NOATIME_FL	(0x00000080)	/* do not update atime */
+-#define OCFS2_DIRSYNC_FL	(0x00010000)	/* dirsync behaviour (directories only) */
++#define OCFS2_SECRM_FL		FS_SECRM_FL	/* Secure deletion */
++#define OCFS2_UNRM_FL		FS_UNRM_FL	/* Undelete */
++#define OCFS2_COMPR_FL		FS_COMPR_FL	/* Compress file */
++#define OCFS2_SYNC_FL		FS_SYNC_FL	/* Synchronous updates */
++#define OCFS2_IMMUTABLE_FL	FS_IMMUTABLE_FL	/* Immutable file */
++#define OCFS2_APPEND_FL		FS_APPEND_FL	/* writes to file may only append */
++#define OCFS2_NODUMP_FL		FS_NODUMP_FL	/* do not dump file */
++#define OCFS2_NOATIME_FL	FS_NOATIME_FL	/* do not update atime */
+ 
+-#define OCFS2_FL_VISIBLE	(0x000100FF)	/* User visible flags */
+-#define OCFS2_FL_MODIFIABLE	(0x000100FF)	/* User modifiable flags */
++#define OCFS2_DIRSYNC_FL	FS_DIRSYNC_FL	/* dirsync behaviour (directories only) */
++#define OCFS2_IXUNLINK_FL	FS_IXUNLINK_FL	/* Immutable invert on unlink */
++
++#define OCFS2_BARRIER_FL	FS_BARRIER_FL	/* Barrier for chroot() */
++#define OCFS2_COW_FL		FS_COW_FL	/* Copy on Write marker */
++
++#define OCFS2_FL_VISIBLE	(0x010300FF)	/* User visible flags */
++#define OCFS2_FL_MODIFIABLE	(0x010300FF)	/* User modifiable flags */
+ 
+ /*
+  * Extent record flags (e_node.leaf.flags)
+--- a/fs/ocfs2/super.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/ocfs2/super.c	2011-06-10 13:03:02.000000000 +0200
+@@ -173,6 +173,7 @@ enum {
+ 	Opt_noacl,
+ 	Opt_usrquota,
+ 	Opt_grpquota,
++	Opt_tag, Opt_notag, Opt_tagid,
+ 	Opt_err,
+ };
+ 
+@@ -199,6 +200,9 @@ static const match_table_t tokens = {
+ 	{Opt_noacl, "noacl"},
+ 	{Opt_usrquota, "usrquota"},
+ 	{Opt_grpquota, "grpquota"},
++	{Opt_tag, "tag"},
++	{Opt_notag, "notag"},
++	{Opt_tagid, "tagid=%u"},
+ 	{Opt_err, NULL}
+ };
+ 
+@@ -605,6 +609,13 @@ static int ocfs2_remount(struct super_bl
+ 		goto out;
+ 	}
+ 
++	if ((osb->s_mount_opt & OCFS2_MOUNT_TAGGED) !=
++	    (parsed_options.mount_opt & OCFS2_MOUNT_TAGGED)) {
++		ret = -EINVAL;
++		mlog(ML_ERROR, "Cannot change tagging on remount\n");
++		goto out;
++	}
++
+ 	if ((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) !=
+ 	    (parsed_options.mount_opt & OCFS2_MOUNT_HB_LOCAL)) {
+ 		ret = -EINVAL;
+@@ -1152,6 +1163,9 @@ static int ocfs2_fill_super(struct super
+ 
+ 	ocfs2_complete_mount_recovery(osb);
+ 
++	if (osb->s_mount_opt & OCFS2_MOUNT_TAGGED)
++		sb->s_flags |= MS_TAGGED;
++
+ 	if (ocfs2_mount_local(osb))
+ 		snprintf(nodestr, sizeof(nodestr), "local");
+ 	else
+@@ -1430,6 +1444,20 @@ static int ocfs2_parse_options(struct su
+ 			printk(KERN_INFO "ocfs2 (no)acl options not supported\n");
+ 			break;
+ #endif
++#ifndef CONFIG_TAGGING_NONE
++		case Opt_tag:
++			mopt->mount_opt |= OCFS2_MOUNT_TAGGED;
++			break;
++		case Opt_notag:
++			mopt->mount_opt &= ~OCFS2_MOUNT_TAGGED;
++			break;
++#endif
++#ifdef CONFIG_PROPAGATE
++		case Opt_tagid:
++			/* use args[0] */
++			mopt->mount_opt |= OCFS2_MOUNT_TAGGED;
++			break;
++#endif
+ 		default:
+ 			mlog(ML_ERROR,
+ 			     "Unrecognized mount option \"%s\" "
+--- a/fs/open.c	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/open.c	2011-06-10 13:03:02.000000000 +0200
+@@ -30,22 +30,30 @@
+ #include <linux/audit.h>
+ #include <linux/falloc.h>
+ #include <linux/fs_struct.h>
++#include <linux/vs_base.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_tag.h>
++#include <linux/vs_cowbl.h>
+ 
+ int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+ 	int retval = -ENODEV;
+ 
+ 	if (dentry) {
++		struct super_block *sb = dentry->d_sb;
++
+ 		retval = -ENOSYS;
+-		if (dentry->d_sb->s_op->statfs) {
++		if (sb->s_op->statfs) {
+ 			memset(buf, 0, sizeof(*buf));
+ 			retval = security_sb_statfs(dentry);
+ 			if (retval)
+ 				return retval;
+-			retval = dentry->d_sb->s_op->statfs(dentry, buf);
++			retval = sb->s_op->statfs(dentry, buf);
+ 			if (retval == 0 && buf->f_frsize == 0)
+ 				buf->f_frsize = buf->f_bsize;
+ 		}
++		if (!vx_check(0, VS_ADMIN|VS_WATCH))
++			vx_vsi_statfs(sb, buf);
+ 	}
+ 	return retval;
+ }
+@@ -640,6 +648,10 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
+ 	error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
+ 	if (error)
+ 		goto out;
++
++	error = cow_check_and_break(&path);
++	if (error)
++		goto dput_and_out;
+ 	inode = path.dentry->d_inode;
+ 
+ 	error = mnt_want_write(path.mnt);
+@@ -673,11 +685,11 @@ static int chown_common(struct dentry * 
+ 	newattrs.ia_valid =  ATTR_CTIME;
+ 	if (user != (uid_t) -1) {
+ 		newattrs.ia_valid |= ATTR_UID;
+-		newattrs.ia_uid = user;
++		newattrs.ia_uid = dx_map_uid(user);
+ 	}
+ 	if (group != (gid_t) -1) {
+ 		newattrs.ia_valid |= ATTR_GID;
+-		newattrs.ia_gid = group;
++		newattrs.ia_gid = dx_map_gid(group);
+ 	}
+ 	if (!S_ISDIR(inode->i_mode))
+ 		newattrs.ia_valid |=
+@@ -700,7 +712,11 @@ SYSCALL_DEFINE3(chown, const char __user
+ 	error = mnt_want_write(path.mnt);
+ 	if (error)
+ 		goto out_release;
+-	error = chown_common(path.dentry, user, group);
++#ifdef CONFIG_VSERVER_COWBL
++	error = cow_check_and_break(&path);
++	if (!error)
++#endif
++		error = chown_common(path.dentry, user, group);
+ 	mnt_drop_write(path.mnt);
+ out_release:
+ 	path_put(&path);
+@@ -725,7 +741,11 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
+ 	error = mnt_want_write(path.mnt);
+ 	if (error)
+ 		goto out_release;
+-	error = chown_common(path.dentry, user, group);
++#ifdef CONFIG_VSERVER_COWBL
++	error = cow_check_and_break(&path);
++	if (!error)
++#endif
++		error = chown_common(path.dentry, user, group);
+ 	mnt_drop_write(path.mnt);
+ out_release:
+ 	path_put(&path);
+@@ -744,7 +764,11 @@ SYSCALL_DEFINE3(lchown, const char __use
+ 	error = mnt_want_write(path.mnt);
+ 	if (error)
+ 		goto out_release;
+-	error = chown_common(path.dentry, user, group);
++#ifdef CONFIG_VSERVER_COWBL
++	error = cow_check_and_break(&path);
++	if (!error)
++#endif
++		error = chown_common(path.dentry, user, group);
+ 	mnt_drop_write(path.mnt);
+ out_release:
+ 	path_put(&path);
+@@ -990,6 +1014,7 @@ static void __put_unused_fd(struct files
+ 	__FD_CLR(fd, fdt->open_fds);
+ 	if (fd < files->next_fd)
+ 		files->next_fd = fd;
++	vx_openfd_dec(fd);
+ }
+ 
+ void put_unused_fd(unsigned int fd)
+--- a/fs/proc/array.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/proc/array.c	2011-06-10 18:44:08.000000000 +0200
+@@ -82,6 +82,8 @@
+ #include <linux/pid_namespace.h>
+ #include <linux/ptrace.h>
+ #include <linux/tracehook.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
+ 
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+@@ -166,6 +168,9 @@ static inline void task_state(struct seq
+ 	rcu_read_lock();
+ 	ppid = pid_alive(p) ?
+ 		task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
++	if (unlikely(vx_current_initpid(p->pid)))
++		ppid = 0;
++
+ 	tpid = 0;
+ 	if (pid_alive(p)) {
+ 		struct task_struct *tracer = tracehook_tracer_task(p);
+@@ -281,7 +286,7 @@ static inline void task_sig(struct seq_f
+ }
+ 
+ static void render_cap_t(struct seq_file *m, const char *header,
+-			kernel_cap_t *a)
++			struct vx_info *vxi, kernel_cap_t *a)
+ {
+ 	unsigned __capi;
+ 
+@@ -306,10 +311,11 @@ static inline void task_cap(struct seq_f
+ 	cap_bset	= cred->cap_bset;
+ 	rcu_read_unlock();
+ 
+-	render_cap_t(m, "CapInh:\t", &cap_inheritable);
+-	render_cap_t(m, "CapPrm:\t", &cap_permitted);
+-	render_cap_t(m, "CapEff:\t", &cap_effective);
+-	render_cap_t(m, "CapBnd:\t", &cap_bset);
++	/* FIXME: maybe move the p->vx_info masking to __task_cred() ? */
++	render_cap_t(m, "CapInh:\t", p->vx_info, &cap_inheritable);
++	render_cap_t(m, "CapPrm:\t", p->vx_info, &cap_permitted);
++	render_cap_t(m, "CapEff:\t", p->vx_info, &cap_effective);
++	render_cap_t(m, "CapBnd:\t", p->vx_info, &cap_bset);
+ }
+ 
+ static inline void task_context_switch_counts(struct seq_file *m,
+@@ -321,6 +327,43 @@ static inline void task_context_switch_c
+ 			p->nivcsw);
+ }
+ 
++
++int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
++			struct pid *pid, struct task_struct *task)
++{
++	seq_printf(m,	"Proxy:\t%p(%c)\n"
++			"Count:\t%u\n"
++			"uts:\t%p(%c)\n"
++			"ipc:\t%p(%c)\n"
++			"mnt:\t%p(%c)\n"
++			"pid:\t%p(%c)\n"
++			"net:\t%p(%c)\n",
++			task->nsproxy,
++			(task->nsproxy == init_task.nsproxy ? 'I' : '-'),
++			atomic_read(&task->nsproxy->count),
++			task->nsproxy->uts_ns,
++			(task->nsproxy->uts_ns == init_task.nsproxy->uts_ns ? 'I' : '-'),
++			task->nsproxy->ipc_ns,
++			(task->nsproxy->ipc_ns == init_task.nsproxy->ipc_ns ? 'I' : '-'),
++			task->nsproxy->mnt_ns,
++			(task->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns ? 'I' : '-'),
++			task->nsproxy->pid_ns,
++			(task->nsproxy->pid_ns == init_task.nsproxy->pid_ns ? 'I' : '-'),
++			task->nsproxy->net_ns,
++			(task->nsproxy->net_ns == init_task.nsproxy->net_ns ? 'I' : '-'));
++	return 0;
++}
++
++void task_vs_id(struct seq_file *m, struct task_struct *task)
++{
++	if (task_vx_flags(task, VXF_HIDE_VINFO, 0))
++		return;
++
++	seq_printf(m, "VxID: %d\n", vx_task_xid(task));
++	seq_printf(m, "NxID: %d\n", nx_task_nid(task));
++}
++
++
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+ 			struct pid *pid, struct task_struct *task)
+ {
+@@ -336,6 +379,7 @@ int proc_pid_status(struct seq_file *m, 
+ 	task_sig(m, task);
+ 	task_cap(m, task);
+ 	cpuset_task_status_allowed(m, task);
++	task_vs_id(m, task);
+ 	task_context_switch_counts(m, task);
+ 	return 0;
+ }
+@@ -446,6 +490,17 @@ static int do_task_stat(struct seq_file 
+ 	/* convert nsec -> ticks */
+ 	start_time = nsec_to_clock_t(start_time);
+ 
++	/* fixup start time for virt uptime */
++	if (vx_flags(VXF_VIRT_UPTIME, 0)) {
++		unsigned long long bias =
++			current->vx_info->cvirt.bias_clock;
++
++		if (start_time > bias)
++			start_time -= bias;
++		else
++			start_time = 0;
++	}
++
+ 	seq_printf(m, "%d (%s) %c %d %d %d %d %d %u %lu \
+ %lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
+ %lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n",
+--- a/fs/proc/base.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/proc/base.c	2011-06-10 13:20:55.000000000 +0200
+@@ -81,6 +81,8 @@
+ #include <linux/elf.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/fs_struct.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
+ #include "internal.h"
+ 
+ /* NOTE:
+@@ -1048,12 +1050,17 @@ static ssize_t oom_adjust_write(struct f
+ 		return -ESRCH;
+ 	}
+ 
+-	if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) {
++	if (oom_adjust < task->signal->oom_adj &&
++		!vx_capable(CAP_SYS_RESOURCE, VXC_OOM_ADJUST)) {
+ 		unlock_task_sighand(task, &flags);
+ 		put_task_struct(task);
+ 		return -EACCES;
+ 	}
+ 
++	/* prevent guest processes from circumventing the oom killer */
++	if (vx_current_xid() && (oom_adjust == OOM_DISABLE))
++		oom_adjust = OOM_ADJUST_MIN;
++
+ 	task->signal->oom_adj = oom_adjust;
+ 
+ 	unlock_task_sighand(task, &flags);
+@@ -1093,7 +1100,7 @@ static ssize_t proc_loginuid_write(struc
+ 	ssize_t length;
+ 	uid_t loginuid;
+ 
+-	if (!capable(CAP_AUDIT_CONTROL))
++	if (!vx_capable(CAP_AUDIT_CONTROL, VXC_AUDIT_CONTROL))
+ 		return -EPERM;
+ 
+ 	if (current != pid_task(proc_pid(inode), PIDTYPE_PID))
+@@ -1459,6 +1466,8 @@ static struct inode *proc_pid_make_inode
+ 		inode->i_gid = cred->egid;
+ 		rcu_read_unlock();
+ 	}
++	/* procfs is xid tagged */
++	inode->i_tag = (tag_t)vx_task_xid(task);
+ 	security_task_to_inode(task, inode);
+ 
+ out:
+@@ -2009,6 +2018,13 @@ static struct dentry *proc_pident_lookup
+ 	if (!task)
+ 		goto out_no_task;
+ 
++	/* TODO: maybe we can come up with a generic approach? */
++	if (task_vx_flags(task, VXF_HIDE_VINFO, 0) &&
++		(dentry->d_name.len == 5) &&
++		(!memcmp(dentry->d_name.name, "vinfo", 5) ||
++		!memcmp(dentry->d_name.name, "ninfo", 5)))
++		goto out;
++
+ 	/*
+ 	 * Yes, it does not scale. And it should not. Don't add
+ 	 * new entries into /proc/<tgid>/ without very good reasons.
+@@ -2414,7 +2430,7 @@ out_iput:
+ static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
+ {
+ 	struct dentry *error;
+-	struct task_struct *task = get_proc_task(dir);
++	struct task_struct *task = get_proc_task_real(dir);
+ 	const struct pid_entry *p, *last;
+ 
+ 	error = ERR_PTR(-ENOENT);
+@@ -2504,6 +2520,9 @@ static int proc_pid_personality(struct s
+ static const struct file_operations proc_task_operations;
+ static const struct inode_operations proc_task_inode_operations;
+ 
++extern int proc_pid_vx_info(struct task_struct *, char *);
++extern int proc_pid_nx_info(struct task_struct *, char *);
++
+ static const struct pid_entry tgid_base_stuff[] = {
+ 	DIR("task",       S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
+ 	DIR("fd",         S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
+@@ -2562,6 +2581,8 @@ static const struct pid_entry tgid_base_
+ #ifdef CONFIG_CGROUPS
+ 	REG("cgroup",  S_IRUGO, proc_cgroup_operations),
+ #endif
++	INF("vinfo",      S_IRUGO, proc_pid_vx_info),
++	INF("ninfo",	  S_IRUGO, proc_pid_nx_info),
+ 	INF("oom_score",  S_IRUGO, proc_oom_score),
+ 	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
+ #ifdef CONFIG_AUDITSYSCALL
+@@ -2577,6 +2598,7 @@ static const struct pid_entry tgid_base_
+ #ifdef CONFIG_TASK_IO_ACCOUNTING
+ 	INF("io",	S_IRUSR, proc_tgid_io_accounting),
+ #endif
++	ONE("nsproxy",	S_IRUGO, proc_pid_nsproxy),
+ };
+ 
+ static int proc_tgid_base_readdir(struct file * filp,
+@@ -2768,7 +2790,7 @@ retry:
+ 	iter.task = NULL;
+ 	pid = find_ge_pid(iter.tgid, ns);
+ 	if (pid) {
+-		iter.tgid = pid_nr_ns(pid, ns);
++		iter.tgid = pid_unmapped_nr_ns(pid, ns);
+ 		iter.task = pid_task(pid, PIDTYPE_PID);
+ 		/* What we to know is if the pid we have find is the
+ 		 * pid of a thread_group_leader.  Testing for task
+@@ -2798,7 +2820,7 @@ static int proc_pid_fill_cache(struct fi
+ 	struct tgid_iter iter)
+ {
+ 	char name[PROC_NUMBUF];
+-	int len = snprintf(name, sizeof(name), "%d", iter.tgid);
++	int len = snprintf(name, sizeof(name), "%d", vx_map_tgid(iter.tgid));
+ 	return proc_fill_cache(filp, dirent, filldir, name, len,
+ 				proc_pid_instantiate, iter.task, NULL);
+ }
+@@ -2815,7 +2837,7 @@ int proc_pid_readdir(struct file * filp,
+ 		goto out_no_task;
+ 	nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+ 
+-	reaper = get_proc_task(filp->f_path.dentry->d_inode);
++	reaper = get_proc_task_real(filp->f_path.dentry->d_inode);
+ 	if (!reaper)
+ 		goto out_no_task;
+ 
+@@ -2832,6 +2854,8 @@ int proc_pid_readdir(struct file * filp,
+ 	     iter.task;
+ 	     iter.tgid += 1, iter = next_tgid(ns, iter)) {
+ 		filp->f_pos = iter.tgid + TGID_OFFSET;
++		if (!vx_proc_task_visible(iter.task))
++			continue;
+ 		if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
+ 			put_task_struct(iter.task);
+ 			goto out;
+@@ -2978,6 +3002,8 @@ static struct dentry *proc_task_lookup(s
+ 	tid = name_to_int(dentry);
+ 	if (tid == ~0U)
+ 		goto out;
++	if (vx_current_initpid(tid))
++		goto out;
+ 
+ 	ns = dentry->d_sb->s_fs_info;
+ 	rcu_read_lock();
+--- a/fs/proc/generic.c	2009-06-11 17:13:07.000000000 +0200
++++ a/fs/proc/generic.c	2011-06-10 13:03:02.000000000 +0200
+@@ -20,6 +20,7 @@
+ #include <linux/bitops.h>
+ #include <linux/spinlock.h>
+ #include <linux/completion.h>
++#include <linux/vserver/inode.h>
+ #include <asm/uaccess.h>
+ 
+ #include "internal.h"
+@@ -425,6 +426,8 @@ struct dentry *proc_lookup_de(struct pro
+ 	for (de = de->subdir; de ; de = de->next) {
+ 		if (de->namelen != dentry->d_name.len)
+ 			continue;
++			if (!vx_hide_check(0, de->vx_flags))
++				continue;
+ 		if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
+ 			unsigned int ino;
+ 
+@@ -433,6 +436,8 @@ struct dentry *proc_lookup_de(struct pro
+ 			spin_unlock(&proc_subdir_lock);
+ 			error = -EINVAL;
+ 			inode = proc_get_inode(dir->i_sb, ino, de);
++				/* generic proc entries belong to the host */
++				inode->i_tag = 0;
+ 			goto out_unlock;
+ 		}
+ 	}
+@@ -510,6 +515,8 @@ int proc_readdir_de(struct proc_dir_entr
+ 
+ 				/* filldir passes info to user space */
+ 				de_get(de);
++				if (!vx_hide_check(0, de->vx_flags))
++					goto skip;
+ 				spin_unlock(&proc_subdir_lock);
+ 				if (filldir(dirent, de->name, de->namelen, filp->f_pos,
+ 					    de->low_ino, de->mode >> 12) < 0) {
+@@ -517,6 +524,7 @@ int proc_readdir_de(struct proc_dir_entr
+ 					goto out;
+ 				}
+ 				spin_lock(&proc_subdir_lock);
++			skip:
+ 				filp->f_pos++;
+ 				next = de->next;
+ 				de_put(de);
+@@ -631,6 +639,7 @@ static struct proc_dir_entry *__proc_cre
+ 	ent->nlink = nlink;
+ 	atomic_set(&ent->count, 1);
+ 	ent->pde_users = 0;
++	ent->vx_flags = IATTR_PROC_DEFAULT;
+ 	spin_lock_init(&ent->pde_unload_lock);
+ 	ent->pde_unload_completion = NULL;
+ 	INIT_LIST_HEAD(&ent->pde_openers);
+@@ -654,7 +663,8 @@ struct proc_dir_entry *proc_symlink(cons
+ 				kfree(ent->data);
+ 				kfree(ent);
+ 				ent = NULL;
+-			}
++			} else
++				ent->vx_flags = IATTR_PROC_SYMLINK;
+ 		} else {
+ 			kfree(ent);
+ 			ent = NULL;
+--- a/fs/proc/inode.c	2009-06-11 17:13:07.000000000 +0200
++++ a/fs/proc/inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -459,6 +459,8 @@ struct inode *proc_get_inode(struct supe
+ 			inode->i_uid = de->uid;
+ 			inode->i_gid = de->gid;
+ 		}
++		if (de->vx_flags)
++			PROC_I(inode)->vx_flags = de->vx_flags;
+ 		if (de->size)
+ 			inode->i_size = de->size;
+ 		if (de->nlink)
+--- a/fs/proc/internal.h	2009-09-10 15:26:23.000000000 +0200
++++ a/fs/proc/internal.h	2011-06-10 13:03:02.000000000 +0200
+@@ -10,6 +10,7 @@
+  */
+ 
+ #include <linux/proc_fs.h>
++#include <linux/vs_pid.h>
+ 
+ extern struct proc_dir_entry proc_root;
+ #ifdef CONFIG_PROC_SYSCTL
+@@ -51,6 +52,9 @@ extern int proc_pid_status(struct seq_fi
+ 				struct pid *pid, struct task_struct *task);
+ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+ 				struct pid *pid, struct task_struct *task);
++extern int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
++				struct pid *pid, struct task_struct *task);
++
+ extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
+ 
+ extern const struct file_operations proc_maps_operations;
+@@ -70,11 +74,16 @@ static inline struct pid *proc_pid(struc
+ 	return PROC_I(inode)->pid;
+ }
+ 
+-static inline struct task_struct *get_proc_task(struct inode *inode)
++static inline struct task_struct *get_proc_task_real(struct inode *inode)
+ {
+ 	return get_pid_task(proc_pid(inode), PIDTYPE_PID);
+ }
+ 
++static inline struct task_struct *get_proc_task(struct inode *inode)
++{
++	return vx_get_proc_task(inode, proc_pid(inode));
++}
++
+ static inline int proc_fd(struct inode *inode)
+ {
+ 	return PROC_I(inode)->fd;
+--- a/fs/proc/loadavg.c	2009-09-10 15:26:23.000000000 +0200
++++ a/fs/proc/loadavg.c	2011-06-10 13:03:02.000000000 +0200
+@@ -12,15 +12,27 @@
+ 
+ static int loadavg_proc_show(struct seq_file *m, void *v)
+ {
++	unsigned long running;
++	unsigned int threads;
+ 	unsigned long avnrun[3];
+ 
+ 	get_avenrun(avnrun, FIXED_1/200, 0);
+ 
++	if (vx_flags(VXF_VIRT_LOAD, 0)) {
++		struct vx_info *vxi = current_vx_info();
++
++		running = atomic_read(&vxi->cvirt.nr_running);
++		threads = atomic_read(&vxi->cvirt.nr_threads);
++	} else {
++		running = nr_running();
++		threads = nr_threads;
++	}
++
+ 	seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
+ 		LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
+ 		LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
+ 		LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
+-		nr_running(), nr_threads,
++		running, threads,
+ 		task_active_pid_ns(current)->last_pid);
+ 	return 0;
+ }
+--- a/fs/proc/meminfo.c	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/proc/meminfo.c	2011-06-10 13:03:02.000000000 +0200
+@@ -39,7 +39,8 @@ static int meminfo_proc_show(struct seq_
+ 	allowed = ((totalram_pages - hugetlb_total_pages())
+ 		* sysctl_overcommit_ratio / 100) + total_swap_pages;
+ 
+-	cached = global_page_state(NR_FILE_PAGES) -
++	cached = vx_flags(VXF_VIRT_MEM, 0) ?
++		vx_vsi_cached(&i) : global_page_state(NR_FILE_PAGES) -
+ 			total_swapcache_pages - i.bufferram;
+ 	if (cached < 0)
+ 		cached = 0;
+--- a/fs/proc/root.c	2009-06-11 17:13:07.000000000 +0200
++++ a/fs/proc/root.c	2011-06-10 13:03:02.000000000 +0200
+@@ -18,9 +18,14 @@
+ #include <linux/bitops.h>
+ #include <linux/mount.h>
+ #include <linux/pid_namespace.h>
++#include <linux/vserver/inode.h>
+ 
+ #include "internal.h"
+ 
++struct proc_dir_entry *proc_virtual;
++
++extern void proc_vx_init(void);
++
+ static int proc_test_super(struct super_block *sb, void *data)
+ {
+ 	return sb->s_fs_info == data;
+@@ -136,6 +141,7 @@ void __init proc_root_init(void)
+ #endif
+ 	proc_mkdir("bus", NULL);
+ 	proc_sys_init();
++	proc_vx_init();
+ }
+ 
+ static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat
+@@ -203,6 +209,7 @@ struct proc_dir_entry proc_root = {
+ 	.proc_iops	= &proc_root_inode_operations, 
+ 	.proc_fops	= &proc_root_operations,
+ 	.parent		= &proc_root,
++	.vx_flags	= IATTR_ADMIN | IATTR_WATCH,
+ };
+ 
+ int pid_ns_prepare_proc(struct pid_namespace *ns)
+--- a/fs/proc/uptime.c	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/proc/uptime.c	2011-06-10 13:03:02.000000000 +0200
+@@ -4,22 +4,22 @@
+ #include <linux/sched.h>
+ #include <linux/seq_file.h>
+ #include <linux/time.h>
+-#include <linux/kernel_stat.h>
++#include <linux/vserver/cvirt.h>
+ #include <asm/cputime.h>
+ 
+ static int uptime_proc_show(struct seq_file *m, void *v)
+ {
+ 	struct timespec uptime;
+ 	struct timespec idle;
+-	int i;
+-	cputime_t idletime = cputime_zero;
+-
+-	for_each_possible_cpu(i)
+-		idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
++	cputime_t idletime = cputime_add(init_task.utime, init_task.stime);
+ 
+ 	do_posix_clock_monotonic_gettime(&uptime);
+ 	monotonic_to_bootbased(&uptime);
+ 	cputime_to_timespec(idletime, &idle);
++
++	if (vx_flags(VXF_VIRT_UPTIME, 0))
++		vx_vsi_uptime(&uptime, &idle);
++
+ 	seq_printf(m, "%lu.%02lu %lu.%02lu\n",
+ 			(unsigned long) uptime.tv_sec,
+ 			(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
+--- a/fs/quota/quota.c	2009-09-10 15:26:24.000000000 +0200
++++ a/fs/quota/quota.c	2011-06-10 13:03:02.000000000 +0200
+@@ -18,6 +18,7 @@
+ #include <linux/capability.h>
+ #include <linux/quotaops.h>
+ #include <linux/types.h>
++#include <linux/vs_context.h>
+ 
+ /* Check validity of generic quotactl commands */
+ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
+@@ -83,11 +84,11 @@ static int generic_quotactl_valid(struct
+ 	if (cmd == Q_GETQUOTA) {
+ 		if (((type == USRQUOTA && current_euid() != id) ||
+ 		     (type == GRPQUOTA && !in_egroup_p(id))) &&
+-		    !capable(CAP_SYS_ADMIN))
++		    !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
+ 			return -EPERM;
+ 	}
+ 	else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO)
+-		if (!capable(CAP_SYS_ADMIN))
++		if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
+ 			return -EPERM;
+ 
+ 	return 0;
+@@ -135,10 +136,10 @@ static int xqm_quotactl_valid(struct sup
+ 	if (cmd == Q_XGETQUOTA) {
+ 		if (((type == XQM_USRQUOTA && current_euid() != id) ||
+ 		     (type == XQM_GRPQUOTA && !in_egroup_p(id))) &&
+-		     !capable(CAP_SYS_ADMIN))
++		     !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
+ 			return -EPERM;
+ 	} else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) {
+-		if (!capable(CAP_SYS_ADMIN))
++		if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
+ 			return -EPERM;
+ 	}
+ 
+@@ -351,6 +352,46 @@ static int do_quotactl(struct super_bloc
+ 	return 0;
+ }
+ 
++#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
++
++#include <linux/vroot.h>
++#include <linux/major.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/vserver/debug.h>
++
++static vroot_grb_func *vroot_get_real_bdev = NULL;
++
++static spinlock_t vroot_grb_lock = SPIN_LOCK_UNLOCKED;
++
++int register_vroot_grb(vroot_grb_func *func) {
++	int ret = -EBUSY;
++
++	spin_lock(&vroot_grb_lock);
++	if (!vroot_get_real_bdev) {
++		vroot_get_real_bdev = func;
++		ret = 0;
++	}
++	spin_unlock(&vroot_grb_lock);
++	return ret;
++}
++EXPORT_SYMBOL(register_vroot_grb);
++
++int unregister_vroot_grb(vroot_grb_func *func) {
++	int ret = -EINVAL;
++
++	spin_lock(&vroot_grb_lock);
++	if (vroot_get_real_bdev) {
++		vroot_get_real_bdev = NULL;
++		ret = 0;
++	}
++	spin_unlock(&vroot_grb_lock);
++	return ret;
++}
++EXPORT_SYMBOL(unregister_vroot_grb);
++
++#endif
++
+ /*
+  * look up a superblock on which quota ops will be performed
+  * - use the name of a block device to find the superblock thereon
+@@ -368,6 +409,22 @@ static struct super_block *quotactl_bloc
+ 	putname(tmp);
+ 	if (IS_ERR(bdev))
+ 		return ERR_CAST(bdev);
++#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
++	if (bdev && bdev->bd_inode &&
++			imajor(bdev->bd_inode) == VROOT_MAJOR) {
++		struct block_device *bdnew = (void *)-EINVAL;
++
++		if (vroot_get_real_bdev)
++			bdnew = vroot_get_real_bdev(bdev);
++		else
++			vxdprintk(VXD_CBIT(misc, 0),
++					"vroot_get_real_bdev not set");
++		bdput(bdev);
++		if (IS_ERR(bdnew))
++			return ERR_PTR(PTR_ERR(bdnew));
++		bdev = bdnew;
++	}
++#endif
+ 	sb = get_super(bdev);
+ 	bdput(bdev);
+ 	if (!sb)
+--- a/fs/reiserfs/file.c	2009-06-11 17:13:08.000000000 +0200
++++ a/fs/reiserfs/file.c	2011-06-10 13:03:02.000000000 +0200
+@@ -307,4 +307,5 @@ const struct inode_operations reiserfs_f
+ 	.listxattr = reiserfs_listxattr,
+ 	.removexattr = reiserfs_removexattr,
+ 	.permission = reiserfs_permission,
++	.sync_flags = reiserfs_sync_flags,
+ };
+--- a/fs/reiserfs/inode.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/reiserfs/inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -18,6 +18,7 @@
+ #include <linux/writeback.h>
+ #include <linux/quotaops.h>
+ #include <linux/swap.h>
++#include <linux/vs_tag.h>
+ 
+ int reiserfs_commit_write(struct file *f, struct page *page,
+ 			  unsigned from, unsigned to);
+@@ -1117,6 +1118,8 @@ static void init_inode(struct inode *ino
+ 	struct buffer_head *bh;
+ 	struct item_head *ih;
+ 	__u32 rdev;
++	uid_t uid;
++	gid_t gid;
+ 	//int version = ITEM_VERSION_1;
+ 
+ 	bh = PATH_PLAST_BUFFER(path);
+@@ -1138,12 +1141,13 @@ static void init_inode(struct inode *ino
+ 		    (struct stat_data_v1 *)B_I_PITEM(bh, ih);
+ 		unsigned long blocks;
+ 
++		uid = sd_v1_uid(sd);
++		gid = sd_v1_gid(sd);
++
+ 		set_inode_item_key_version(inode, KEY_FORMAT_3_5);
+ 		set_inode_sd_version(inode, STAT_DATA_V1);
+ 		inode->i_mode = sd_v1_mode(sd);
+ 		inode->i_nlink = sd_v1_nlink(sd);
+-		inode->i_uid = sd_v1_uid(sd);
+-		inode->i_gid = sd_v1_gid(sd);
+ 		inode->i_size = sd_v1_size(sd);
+ 		inode->i_atime.tv_sec = sd_v1_atime(sd);
+ 		inode->i_mtime.tv_sec = sd_v1_mtime(sd);
+@@ -1185,11 +1189,12 @@ static void init_inode(struct inode *ino
+ 		// (directories and symlinks)
+ 		struct stat_data *sd = (struct stat_data *)B_I_PITEM(bh, ih);
+ 
++		uid    = sd_v2_uid(sd);
++		gid    = sd_v2_gid(sd);
++
+ 		inode->i_mode = sd_v2_mode(sd);
+ 		inode->i_nlink = sd_v2_nlink(sd);
+-		inode->i_uid = sd_v2_uid(sd);
+ 		inode->i_size = sd_v2_size(sd);
+-		inode->i_gid = sd_v2_gid(sd);
+ 		inode->i_mtime.tv_sec = sd_v2_mtime(sd);
+ 		inode->i_atime.tv_sec = sd_v2_atime(sd);
+ 		inode->i_ctime.tv_sec = sd_v2_ctime(sd);
+@@ -1219,6 +1224,10 @@ static void init_inode(struct inode *ino
+ 		sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
+ 	}
+ 
++	inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
++	inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
++	inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, 0);
++
+ 	pathrelse(path);
+ 	if (S_ISREG(inode->i_mode)) {
+ 		inode->i_op = &reiserfs_file_inode_operations;
+@@ -1241,13 +1250,15 @@ static void init_inode(struct inode *ino
+ static void inode2sd(void *sd, struct inode *inode, loff_t size)
+ {
+ 	struct stat_data *sd_v2 = (struct stat_data *)sd;
++	uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
++	gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
+ 	__u16 flags;
+ 
++	set_sd_v2_uid(sd_v2, uid);
++	set_sd_v2_gid(sd_v2, gid);
+ 	set_sd_v2_mode(sd_v2, inode->i_mode);
+ 	set_sd_v2_nlink(sd_v2, inode->i_nlink);
+-	set_sd_v2_uid(sd_v2, inode->i_uid);
+ 	set_sd_v2_size(sd_v2, size);
+-	set_sd_v2_gid(sd_v2, inode->i_gid);
+ 	set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
+ 	set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
+ 	set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
+@@ -2839,14 +2850,19 @@ int reiserfs_commit_write(struct file *f
+ void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode)
+ {
+ 	if (reiserfs_attrs(inode->i_sb)) {
+-		if (sd_attrs & REISERFS_SYNC_FL)
+-			inode->i_flags |= S_SYNC;
+-		else
+-			inode->i_flags &= ~S_SYNC;
+ 		if (sd_attrs & REISERFS_IMMUTABLE_FL)
+ 			inode->i_flags |= S_IMMUTABLE;
+ 		else
+ 			inode->i_flags &= ~S_IMMUTABLE;
++		if (sd_attrs & REISERFS_IXUNLINK_FL)
++			inode->i_flags |= S_IXUNLINK;
++		else
++			inode->i_flags &= ~S_IXUNLINK;
++
++		if (sd_attrs & REISERFS_SYNC_FL)
++			inode->i_flags |= S_SYNC;
++		else
++			inode->i_flags &= ~S_SYNC;
+ 		if (sd_attrs & REISERFS_APPEND_FL)
+ 			inode->i_flags |= S_APPEND;
+ 		else
+@@ -2859,6 +2875,15 @@ void sd_attrs_to_i_attrs(__u16 sd_attrs,
+ 			REISERFS_I(inode)->i_flags |= i_nopack_mask;
+ 		else
+ 			REISERFS_I(inode)->i_flags &= ~i_nopack_mask;
++
++		if (sd_attrs & REISERFS_BARRIER_FL)
++			inode->i_vflags |= V_BARRIER;
++		else
++			inode->i_vflags &= ~V_BARRIER;
++		if (sd_attrs & REISERFS_COW_FL)
++			inode->i_vflags |= V_COW;
++		else
++			inode->i_vflags &= ~V_COW;
+ 	}
+ }
+ 
+@@ -2869,6 +2894,11 @@ void i_attrs_to_sd_attrs(struct inode *i
+ 			*sd_attrs |= REISERFS_IMMUTABLE_FL;
+ 		else
+ 			*sd_attrs &= ~REISERFS_IMMUTABLE_FL;
++		if (inode->i_flags & S_IXUNLINK)
++			*sd_attrs |= REISERFS_IXUNLINK_FL;
++		else
++			*sd_attrs &= ~REISERFS_IXUNLINK_FL;
++
+ 		if (inode->i_flags & S_SYNC)
+ 			*sd_attrs |= REISERFS_SYNC_FL;
+ 		else
+@@ -2881,6 +2911,15 @@ void i_attrs_to_sd_attrs(struct inode *i
+ 			*sd_attrs |= REISERFS_NOTAIL_FL;
+ 		else
+ 			*sd_attrs &= ~REISERFS_NOTAIL_FL;
++
++		if (inode->i_vflags & V_BARRIER)
++			*sd_attrs |= REISERFS_BARRIER_FL;
++		else
++			*sd_attrs &= ~REISERFS_BARRIER_FL;
++		if (inode->i_vflags & V_COW)
++			*sd_attrs |= REISERFS_COW_FL;
++		else
++			*sd_attrs &= ~REISERFS_COW_FL;
+ 	}
+ }
+ 
+@@ -3101,9 +3140,11 @@ int reiserfs_setattr(struct dentry *dent
+ 	}
+ 
+ 	error = inode_change_ok(inode, attr);
++
+ 	if (!error) {
+ 		if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+-		    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
++		    (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
++		    (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
+ 			error = reiserfs_chown_xattrs(inode, attr);
+ 
+ 			if (!error) {
+@@ -3133,6 +3174,9 @@ int reiserfs_setattr(struct dentry *dent
+ 					inode->i_uid = attr->ia_uid;
+ 				if (attr->ia_valid & ATTR_GID)
+ 					inode->i_gid = attr->ia_gid;
++				if ((attr->ia_valid & ATTR_TAG) &&
++					IS_TAGGED(inode))
++					inode->i_tag = attr->ia_tag;
+ 				mark_inode_dirty(inode);
+ 				error =
+ 				    journal_end(&th, inode->i_sb, jbegin_count);
+--- a/fs/reiserfs/ioctl.c	2009-06-11 17:13:08.000000000 +0200
++++ a/fs/reiserfs/ioctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -7,11 +7,27 @@
+ #include <linux/mount.h>
+ #include <linux/reiserfs_fs.h>
+ #include <linux/time.h>
++#include <linux/mount.h>
+ #include <asm/uaccess.h>
+ #include <linux/pagemap.h>
+ #include <linux/smp_lock.h>
+ #include <linux/compat.h>
+ 
++
++int reiserfs_sync_flags(struct inode *inode, int flags, int vflags)
++{
++	__u16 sd_attrs = 0;
++
++	inode->i_flags = flags;
++	inode->i_vflags = vflags;
++
++	i_attrs_to_sd_attrs(inode, &sd_attrs);
++	REISERFS_I(inode)->i_attrs = sd_attrs;
++	inode->i_ctime = CURRENT_TIME_SEC;
++	mark_inode_dirty(inode);
++	return 0;
++}
++
+ /*
+ ** reiserfs_ioctl - handler for ioctl for inode
+ ** supported commands:
+@@ -23,7 +39,7 @@
+ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+ 		   unsigned long arg)
+ {
+-	unsigned int flags;
++	unsigned int flags, oldflags;
+ 	int err = 0;
+ 
+ 	switch (cmd) {
+@@ -43,6 +59,7 @@ int reiserfs_ioctl(struct inode *inode, 
+ 
+ 		flags = REISERFS_I(inode)->i_attrs;
+ 		i_attrs_to_sd_attrs(inode, (__u16 *) & flags);
++		flags &= REISERFS_FL_USER_VISIBLE;
+ 		return put_user(flags, (int __user *)arg);
+ 	case REISERFS_IOC_SETFLAGS:{
+ 			if (!reiserfs_attrs(inode->i_sb))
+@@ -60,6 +77,10 @@ int reiserfs_ioctl(struct inode *inode, 
+ 				err = -EFAULT;
+ 				goto setflags_out;
+ 			}
++			if (IS_BARRIER(inode)) {
++				vxwprintk_task(1, "messing with the barrier.");
++				return -EACCES;
++			}
+ 			/*
+ 			 * Is it quota file? Do not allow user to mess with it
+ 			 */
+@@ -84,6 +105,10 @@ int reiserfs_ioctl(struct inode *inode, 
+ 					goto setflags_out;
+ 				}
+ 			}
++
++			oldflags = REISERFS_I(inode)->i_attrs;
++			flags &= REISERFS_FL_USER_MODIFIABLE;
++			flags |= oldflags & ~REISERFS_FL_USER_MODIFIABLE;
+ 			sd_attrs_to_i_attrs(flags, inode);
+ 			REISERFS_I(inode)->i_attrs = flags;
+ 			inode->i_ctime = CURRENT_TIME_SEC;
+--- a/fs/reiserfs/namei.c	2009-06-11 17:13:08.000000000 +0200
++++ a/fs/reiserfs/namei.c	2011-06-10 13:03:02.000000000 +0200
+@@ -17,6 +17,7 @@
+ #include <linux/reiserfs_acl.h>
+ #include <linux/reiserfs_xattr.h>
+ #include <linux/quotaops.h>
++#include <linux/vs_tag.h>
+ 
+ #define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { inc_nlink(i); if (i->i_nlink >= REISERFS_LINK_MAX) i->i_nlink=1; }
+ #define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) drop_nlink(i);
+@@ -354,6 +355,7 @@ static struct dentry *reiserfs_lookup(st
+ 	if (retval == IO_ERROR) {
+ 		return ERR_PTR(-EIO);
+ 	}
++		dx_propagate_tag(nd, inode);
+ 
+ 	return d_splice_alias(inode, dentry);
+ }
+@@ -570,6 +572,7 @@ static int new_inode_init(struct inode *
+ 	} else {
+ 		inode->i_gid = current_fsgid();
+ 	}
++	inode->i_tag = dx_current_fstag(inode->i_sb);
+ 	vfs_dq_init(inode);
+ 	return 0;
+ }
+@@ -1515,6 +1518,7 @@ const struct inode_operations reiserfs_d
+ 	.listxattr = reiserfs_listxattr,
+ 	.removexattr = reiserfs_removexattr,
+ 	.permission = reiserfs_permission,
++	.sync_flags = reiserfs_sync_flags,
+ };
+ 
+ /*
+--- a/fs/reiserfs/super.c	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/reiserfs/super.c	2011-06-10 13:03:02.000000000 +0200
+@@ -884,6 +884,14 @@ static int reiserfs_parse_options(struct
+ 		{"user_xattr",.setmask = 1 << REISERFS_UNSUPPORTED_OPT},
+ 		{"nouser_xattr",.clrmask = 1 << REISERFS_UNSUPPORTED_OPT},
+ #endif
++#ifndef CONFIG_TAGGING_NONE
++		{"tagxid",.setmask = 1 << REISERFS_TAGGED},
++		{"tag",.setmask = 1 << REISERFS_TAGGED},
++		{"notag",.clrmask = 1 << REISERFS_TAGGED},
++#endif
++#ifdef CONFIG_PROPAGATE
++		{"tag",.arg_required = 'T',.values = NULL},
++#endif
+ #ifdef CONFIG_REISERFS_FS_POSIX_ACL
+ 		{"acl",.setmask = 1 << REISERFS_POSIXACL},
+ 		{"noacl",.clrmask = 1 << REISERFS_POSIXACL},
+@@ -1190,6 +1198,14 @@ static int reiserfs_remount(struct super
+ 	handle_quota_files(s, qf_names, &qfmt);
+ #endif
+ 
++	if ((mount_options & (1 << REISERFS_TAGGED)) &&
++		!(s->s_flags & MS_TAGGED)) {
++		reiserfs_warning(s, "super-vs01",
++			"reiserfs: tagging not permitted on remount.");
++		err = -EINVAL;
++		goto out_err;
++	}
++
+ 	handle_attrs(s);
+ 
+ 	/* Add options that are safe here */
+@@ -1652,6 +1668,10 @@ static int reiserfs_fill_super(struct su
+ 		goto error;
+ 	}
+ 
++	/* map mount option tagxid */
++	if (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_TAGGED))
++		s->s_flags |= MS_TAGGED;
++
+ 	rs = SB_DISK_SUPER_BLOCK(s);
+ 	/* Let's do basic sanity check to verify that underlying device is not
+ 	   smaller than the filesystem. If the check fails then abort and scream,
+--- a/fs/reiserfs/xattr.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/reiserfs/xattr.c	2011-06-10 13:03:02.000000000 +0200
+@@ -39,6 +39,7 @@
+ #include <linux/namei.h>
+ #include <linux/errno.h>
+ #include <linux/fs.h>
++#include <linux/mount.h>
+ #include <linux/file.h>
+ #include <linux/pagemap.h>
+ #include <linux/xattr.h>
+--- a/fs/stat.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/stat.c	2011-06-10 13:03:02.000000000 +0200
+@@ -26,6 +26,7 @@ void generic_fillattr(struct inode *inod
+ 	stat->nlink = inode->i_nlink;
+ 	stat->uid = inode->i_uid;
+ 	stat->gid = inode->i_gid;
++	stat->tag = inode->i_tag;
+ 	stat->rdev = inode->i_rdev;
+ 	stat->atime = inode->i_atime;
+ 	stat->mtime = inode->i_mtime;
+--- a/fs/super.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/super.c	2011-06-10 13:03:02.000000000 +0200
+@@ -37,6 +37,9 @@
+ #include <linux/kobject.h>
+ #include <linux/mutex.h>
+ #include <linux/file.h>
++#include <linux/devpts_fs.h>
++#include <linux/proc_fs.h>
++#include <linux/vs_context.h>
+ #include <asm/uaccess.h>
+ #include "internal.h"
+ 
+@@ -914,12 +917,18 @@ struct vfsmount *
+ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
+ {
+ 	struct vfsmount *mnt;
++	struct super_block *sb;
+ 	char *secdata = NULL;
+ 	int error;
+ 
+ 	if (!type)
+ 		return ERR_PTR(-ENODEV);
+ 
++	error = -EPERM;
++	if ((type->fs_flags & FS_BINARY_MOUNTDATA) &&
++		!vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT))
++		goto out;
++
+ 	error = -ENOMEM;
+ 	mnt = alloc_vfsmnt(name);
+ 	if (!mnt)
+@@ -938,9 +947,17 @@ vfs_kern_mount(struct file_system_type *
+ 	error = type->get_sb(type, flags, name, data, mnt);
+ 	if (error < 0)
+ 		goto out_free_secdata;
+-	BUG_ON(!mnt->mnt_sb);
+ 
+- 	error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata);
++	sb = mnt->mnt_sb;
++	BUG_ON(!sb);
++
++	error = -EPERM;
++	if (!vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT) && !sb->s_bdev &&
++		(sb->s_magic != PROC_SUPER_MAGIC) &&
++		(sb->s_magic != DEVPTS_SUPER_MAGIC))
++		goto out_sb;
++
++	error = security_sb_kern_mount(sb, flags, secdata);
+  	if (error)
+  		goto out_sb;
+ 
+--- a/fs/sysfs/mount.c	2009-06-11 17:13:08.000000000 +0200
++++ a/fs/sysfs/mount.c	2011-06-10 13:03:02.000000000 +0200
+@@ -47,7 +47,7 @@ static int sysfs_fill_super(struct super
+ 
+ 	sb->s_blocksize = PAGE_CACHE_SIZE;
+ 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+-	sb->s_magic = SYSFS_MAGIC;
++	sb->s_magic = SYSFS_SUPER_MAGIC;
+ 	sb->s_op = &sysfs_ops;
+ 	sb->s_time_gran = 1;
+ 	sysfs_sb = sb;
+--- a/fs/utimes.c	2009-03-24 14:22:37.000000000 +0100
++++ a/fs/utimes.c	2011-06-10 13:03:02.000000000 +0200
+@@ -8,6 +8,8 @@
+ #include <linux/stat.h>
+ #include <linux/utime.h>
+ #include <linux/syscalls.h>
++#include <linux/mount.h>
++#include <linux/vs_cowbl.h>
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+ 
+--- a/fs/xattr.c	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/xattr.c	2011-06-10 13:03:02.000000000 +0200
+@@ -18,6 +18,7 @@
+ #include <linux/module.h>
+ #include <linux/fsnotify.h>
+ #include <linux/audit.h>
++#include <linux/mount.h>
+ #include <asm/uaccess.h>
+ 
+ 
+@@ -49,7 +50,7 @@ xattr_permission(struct inode *inode, co
+ 	 * The trusted.* namespace can only be accessed by a privileged user.
+ 	 */
+ 	if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+-		return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM);
++		return (vx_capable(CAP_SYS_ADMIN, VXC_FS_TRUSTED) ? 0 : -EPERM);
+ 
+ 	/* In user.* namespace, only regular files and directories can have
+ 	 * extended attributes. For sticky directories, only the owner and
+--- a/fs/xfs/linux-2.6/xfs_ioctl.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/linux-2.6/xfs_ioctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -34,7 +34,6 @@
+ #include "xfs_dir2_sf.h"
+ #include "xfs_dinode.h"
+ #include "xfs_inode.h"
+-#include "xfs_ioctl.h"
+ #include "xfs_btree.h"
+ #include "xfs_ialloc.h"
+ #include "xfs_rtalloc.h"
+@@ -746,6 +745,10 @@ xfs_merge_ioc_xflags(
+ 		xflags |= XFS_XFLAG_IMMUTABLE;
+ 	else
+ 		xflags &= ~XFS_XFLAG_IMMUTABLE;
++	if (flags & FS_IXUNLINK_FL)
++		xflags |= XFS_XFLAG_IXUNLINK;
++	else
++		xflags &= ~XFS_XFLAG_IXUNLINK;
+ 	if (flags & FS_APPEND_FL)
+ 		xflags |= XFS_XFLAG_APPEND;
+ 	else
+@@ -774,6 +777,8 @@ xfs_di2lxflags(
+ 
+ 	if (di_flags & XFS_DIFLAG_IMMUTABLE)
+ 		flags |= FS_IMMUTABLE_FL;
++	if (di_flags & XFS_DIFLAG_IXUNLINK)
++		flags |= FS_IXUNLINK_FL;
+ 	if (di_flags & XFS_DIFLAG_APPEND)
+ 		flags |= FS_APPEND_FL;
+ 	if (di_flags & XFS_DIFLAG_SYNC)
+@@ -834,6 +839,8 @@ xfs_set_diflags(
+ 	di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
+ 	if (xflags & XFS_XFLAG_IMMUTABLE)
+ 		di_flags |= XFS_DIFLAG_IMMUTABLE;
++	if (xflags & XFS_XFLAG_IXUNLINK)
++		di_flags |= XFS_DIFLAG_IXUNLINK;
+ 	if (xflags & XFS_XFLAG_APPEND)
+ 		di_flags |= XFS_DIFLAG_APPEND;
+ 	if (xflags & XFS_XFLAG_SYNC)
+@@ -876,6 +883,10 @@ xfs_diflags_to_linux(
+ 		inode->i_flags |= S_IMMUTABLE;
+ 	else
+ 		inode->i_flags &= ~S_IMMUTABLE;
++	if (xflags & XFS_XFLAG_IXUNLINK)
++		inode->i_flags |= S_IXUNLINK;
++	else
++		inode->i_flags &= ~S_IXUNLINK;
+ 	if (xflags & XFS_XFLAG_APPEND)
+ 		inode->i_flags |= S_APPEND;
+ 	else
+@@ -1352,10 +1363,18 @@ xfs_file_ioctl(
+ 	case XFS_IOC_FSGETXATTRA:
+ 		return xfs_ioc_fsgetxattr(ip, 1, arg);
+ 	case XFS_IOC_FSSETXATTR:
++		if (IS_BARRIER(inode)) {
++			vxwprintk_task(1, "messing with the barrier.");
++			return -XFS_ERROR(EACCES);
++		}
+ 		return xfs_ioc_fssetxattr(ip, filp, arg);
+ 	case XFS_IOC_GETXFLAGS:
+ 		return xfs_ioc_getxflags(ip, arg);
+ 	case XFS_IOC_SETXFLAGS:
++		if (IS_BARRIER(inode)) {
++			vxwprintk_task(1, "messing with the barrier.");
++			return -XFS_ERROR(EACCES);
++		}
+ 		return xfs_ioc_setxflags(ip, filp, arg);
+ 
+ 	case XFS_IOC_FSSETDM: {
+--- a/fs/xfs/linux-2.6/xfs_ioctl.h	2009-03-24 14:22:37.000000000 +0100
++++ a/fs/xfs/linux-2.6/xfs_ioctl.h	2011-06-10 13:03:02.000000000 +0200
+@@ -70,6 +70,12 @@ xfs_handle_to_dentry(
+ 	void __user		*uhandle,
+ 	u32			hlen);
+ 
++extern int
++xfs_sync_flags(
++	struct inode		*inode,
++	int			flags,
++	int			vflags);
++
+ extern long
+ xfs_file_ioctl(
+ 	struct file		*filp,
+--- a/fs/xfs/linux-2.6/xfs_iops.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/linux-2.6/xfs_iops.c	2011-06-10 13:03:02.000000000 +0200
+@@ -36,6 +36,7 @@
+ #include "xfs_attr_sf.h"
+ #include "xfs_dinode.h"
+ #include "xfs_inode.h"
++#include "xfs_ioctl.h"
+ #include "xfs_bmap.h"
+ #include "xfs_btree.h"
+ #include "xfs_ialloc.h"
+@@ -55,6 +56,7 @@
+ #include <linux/security.h>
+ #include <linux/falloc.h>
+ #include <linux/fiemap.h>
++#include <linux/vs_tag.h>
+ 
+ /*
+  * Bring the timestamps in the XFS inode uptodate.
+@@ -495,6 +497,7 @@ xfs_vn_getattr(
+ 	stat->nlink = ip->i_d.di_nlink;
+ 	stat->uid = ip->i_d.di_uid;
+ 	stat->gid = ip->i_d.di_gid;
++	stat->tag = ip->i_d.di_tag;
+ 	stat->ino = ip->i_ino;
+ 	stat->atime = inode->i_atime;
+ 	stat->mtime = inode->i_mtime;
+@@ -686,6 +689,7 @@ static const struct inode_operations xfs
+ 	.listxattr		= xfs_vn_listxattr,
+ 	.fallocate		= xfs_vn_fallocate,
+ 	.fiemap			= xfs_vn_fiemap,
++	.sync_flags		= xfs_sync_flags,
+ };
+ 
+ static const struct inode_operations xfs_dir_inode_operations = {
+@@ -711,6 +715,7 @@ static const struct inode_operations xfs
+ 	.getxattr		= generic_getxattr,
+ 	.removexattr		= generic_removexattr,
+ 	.listxattr		= xfs_vn_listxattr,
++	.sync_flags		= xfs_sync_flags,
+ };
+ 
+ static const struct inode_operations xfs_dir_ci_inode_operations = {
+@@ -760,6 +765,10 @@ xfs_diflags_to_iflags(
+ 		inode->i_flags |= S_IMMUTABLE;
+ 	else
+ 		inode->i_flags &= ~S_IMMUTABLE;
++	if (ip->i_d.di_flags & XFS_DIFLAG_IXUNLINK)
++		inode->i_flags |= S_IXUNLINK;
++	else
++		inode->i_flags &= ~S_IXUNLINK;
+ 	if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
+ 		inode->i_flags |= S_APPEND;
+ 	else
+@@ -772,6 +781,15 @@ xfs_diflags_to_iflags(
+ 		inode->i_flags |= S_NOATIME;
+ 	else
+ 		inode->i_flags &= ~S_NOATIME;
++
++	if (ip->i_d.di_vflags & XFS_DIVFLAG_BARRIER)
++		inode->i_vflags |= V_BARRIER;
++	else
++		inode->i_vflags &= ~V_BARRIER;
++	if (ip->i_d.di_vflags & XFS_DIVFLAG_COW)
++		inode->i_vflags |= V_COW;
++	else
++		inode->i_vflags &= ~V_COW;
+ }
+ 
+ /*
+@@ -800,6 +818,7 @@ xfs_setup_inode(
+ 	inode->i_nlink	= ip->i_d.di_nlink;
+ 	inode->i_uid	= ip->i_d.di_uid;
+ 	inode->i_gid	= ip->i_d.di_gid;
++	inode->i_tag    = ip->i_d.di_tag;
+ 
+ 	switch (inode->i_mode & S_IFMT) {
+ 	case S_IFBLK:
+--- a/fs/xfs/linux-2.6/xfs_linux.h	2009-09-10 15:26:24.000000000 +0200
++++ a/fs/xfs/linux-2.6/xfs_linux.h	2011-06-10 13:03:02.000000000 +0200
+@@ -119,6 +119,7 @@
+ 
+ #define current_cpu()		(raw_smp_processor_id())
+ #define current_pid()		(current->pid)
++#define current_fstag(cred,vp)	(dx_current_fstag((vp)->i_sb))
+ #define current_test_flags(f)	(current->flags & (f))
+ #define current_set_flags_nested(sp, f)		\
+ 		(*(sp) = current->flags, current->flags |= (f))
+--- a/fs/xfs/linux-2.6/xfs_super.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/linux-2.6/xfs_super.c	2011-06-10 13:03:02.000000000 +0200
+@@ -117,6 +117,9 @@ mempool_t *xfs_ioend_pool;
+ #define MNTOPT_DMAPI	"dmapi"		/* DMI enabled (DMAPI / XDSM) */
+ #define MNTOPT_XDSM	"xdsm"		/* DMI enabled (DMAPI / XDSM) */
+ #define MNTOPT_DMI	"dmi"		/* DMI enabled (DMAPI / XDSM) */
++#define MNTOPT_TAGXID	"tagxid"	/* context tagging for inodes */
++#define MNTOPT_TAGGED	"tag"		/* context tagging for inodes */
++#define MNTOPT_NOTAGTAG	"notag"		/* do not use context tagging */
+ 
+ /*
+  * Table driven mount option parser.
+@@ -125,10 +128,14 @@ mempool_t *xfs_ioend_pool;
+  * in the future, too.
+  */
+ enum {
++	Opt_tag, Opt_notag,
+ 	Opt_barrier, Opt_nobarrier, Opt_err
+ };
+ 
+ static const match_table_t tokens = {
++	{Opt_tag, "tagxid"},
++	{Opt_tag, "tag"},
++	{Opt_notag, "notag"},
+ 	{Opt_barrier, "barrier"},
+ 	{Opt_nobarrier, "nobarrier"},
+ 	{Opt_err, NULL}
+@@ -382,6 +389,19 @@ xfs_parseargs(
+ 		} else if (!strcmp(this_char, "irixsgid")) {
+ 			cmn_err(CE_WARN,
+ 	"XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
++#ifndef CONFIG_TAGGING_NONE
++		} else if (!strcmp(this_char, MNTOPT_TAGGED)) {
++			mp->m_flags |= XFS_MOUNT_TAGGED;
++		} else if (!strcmp(this_char, MNTOPT_NOTAGTAG)) {
++			mp->m_flags &= ~XFS_MOUNT_TAGGED;
++		} else if (!strcmp(this_char, MNTOPT_TAGXID)) {
++			mp->m_flags |= XFS_MOUNT_TAGGED;
++#endif
++#ifdef CONFIG_PROPAGATE
++		} else if (!strcmp(this_char, MNTOPT_TAGGED)) {
++			/* use value */
++			mp->m_flags |= XFS_MOUNT_TAGGED;
++#endif
+ 		} else {
+ 			cmn_err(CE_WARN,
+ 				"XFS: unknown mount option [%s].", this_char);
+@@ -1295,6 +1315,16 @@ xfs_fs_remount(
+ 		case Opt_nobarrier:
+ 			mp->m_flags &= ~XFS_MOUNT_BARRIER;
+ 			break;
++		case Opt_tag:
++			if (!(sb->s_flags & MS_TAGGED)) {
++				printk(KERN_INFO
++					"XFS: %s: tagging not permitted on remount.\n",
++					sb->s_id);
++				return -EINVAL;
++			}
++			break;
++		case Opt_notag:
++			break;
+ 		default:
+ 			/*
+ 			 * Logically we would return an error here to prevent
+@@ -1530,6 +1560,9 @@ xfs_fs_fill_super(
+ 
+ 	XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname);
+ 
++	if (mp->m_flags & XFS_MOUNT_TAGGED)
++		sb->s_flags |= MS_TAGGED;
++
+ 	sb->s_magic = XFS_SB_MAGIC;
+ 	sb->s_blocksize = mp->m_sb.sb_blocksize;
+ 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
+--- a/fs/xfs/xfs_dinode.h	2009-06-11 17:13:09.000000000 +0200
++++ a/fs/xfs/xfs_dinode.h	2011-06-10 13:03:02.000000000 +0200
+@@ -50,7 +50,9 @@ typedef struct xfs_dinode {
+ 	__be32		di_gid;		/* owner's group id */
+ 	__be32		di_nlink;	/* number of links to file */
+ 	__be16		di_projid;	/* owner's project id */
+-	__u8		di_pad[8];	/* unused, zeroed space */
++	__be16		di_tag;		/* context tagging */
++	__be16		di_vflags;	/* vserver specific flags */
++	__u8		di_pad[4];	/* unused, zeroed space */
+ 	__be16		di_flushiter;	/* incremented on flush */
+ 	xfs_timestamp_t	di_atime;	/* time last accessed */
+ 	xfs_timestamp_t	di_mtime;	/* time last modified */
+@@ -183,6 +185,8 @@ static inline void xfs_dinode_put_rdev(s
+ #define XFS_DIFLAG_EXTSZINHERIT_BIT 12	/* inherit inode extent size */
+ #define XFS_DIFLAG_NODEFRAG_BIT     13	/* do not reorganize/defragment */
+ #define XFS_DIFLAG_FILESTREAM_BIT   14  /* use filestream allocator */
++#define XFS_DIFLAG_IXUNLINK_BIT     15	/* Immutable inver on unlink */
++
+ #define XFS_DIFLAG_REALTIME      (1 << XFS_DIFLAG_REALTIME_BIT)
+ #define XFS_DIFLAG_PREALLOC      (1 << XFS_DIFLAG_PREALLOC_BIT)
+ #define XFS_DIFLAG_NEWRTBM       (1 << XFS_DIFLAG_NEWRTBM_BIT)
+@@ -198,6 +202,7 @@ static inline void xfs_dinode_put_rdev(s
+ #define XFS_DIFLAG_EXTSZINHERIT  (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
+ #define XFS_DIFLAG_NODEFRAG      (1 << XFS_DIFLAG_NODEFRAG_BIT)
+ #define XFS_DIFLAG_FILESTREAM    (1 << XFS_DIFLAG_FILESTREAM_BIT)
++#define XFS_DIFLAG_IXUNLINK      (1 << XFS_DIFLAG_IXUNLINK_BIT)
+ 
+ #ifdef CONFIG_XFS_RT
+ #define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
+@@ -210,6 +215,10 @@ static inline void xfs_dinode_put_rdev(s
+ 	 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
+ 	 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
+ 	 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
+-	 XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM)
++	 XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM | \
++	 XFS_DIFLAG_IXUNLINK)
++
++#define XFS_DIVFLAG_BARRIER	0x01
++#define XFS_DIVFLAG_COW		0x02
+ 
+ #endif	/* __XFS_DINODE_H__ */
+--- a/fs/xfs/xfs_fs.h	2009-12-03 20:02:53.000000000 +0100
++++ a/fs/xfs/xfs_fs.h	2011-06-10 13:03:02.000000000 +0200
+@@ -67,6 +67,9 @@ struct fsxattr {
+ #define XFS_XFLAG_EXTSZINHERIT	0x00001000	/* inherit inode extent size */
+ #define XFS_XFLAG_NODEFRAG	0x00002000  	/* do not defragment */
+ #define XFS_XFLAG_FILESTREAM	0x00004000	/* use filestream allocator */
++#define XFS_XFLAG_IXUNLINK	0x00008000	/* immutable invert on unlink */
++#define XFS_XFLAG_BARRIER	0x10000000	/* chroot() barrier */
++#define XFS_XFLAG_COW		0x20000000	/* copy on write mark */
+ #define XFS_XFLAG_HASATTR	0x80000000	/* no DIFLAG for this	*/
+ 
+ /*
+@@ -292,7 +295,8 @@ typedef struct xfs_bstat {
+ 	__s32		bs_extents;	/* number of extents		*/
+ 	__u32		bs_gen;		/* generation count		*/
+ 	__u16		bs_projid;	/* project id			*/
+-	unsigned char	bs_pad[14];	/* pad space, unused		*/
++	__u16		bs_tag;		/* context tagging		*/
++	unsigned char	bs_pad[12];	/* pad space, unused		*/
+ 	__u32		bs_dmevmask;	/* DMIG event mask		*/
+ 	__u16		bs_dmstate;	/* DMIG state info		*/
+ 	__u16		bs_aextents;	/* attribute number of extents	*/
+--- a/fs/xfs/xfs_ialloc.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/xfs_ialloc.c	2011-06-10 13:03:02.000000000 +0200
+@@ -41,7 +41,6 @@
+ #include "xfs_error.h"
+ #include "xfs_bmap.h"
+ 
+-
+ /*
+  * Allocation group level functions.
+  */
+--- a/fs/xfs/xfs_inode.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/xfs_inode.c	2011-06-10 13:03:02.000000000 +0200
+@@ -249,6 +249,7 @@ xfs_inotobp(
+ 	return 0;
+ }
+ 
++#include <linux/vs_tag.h>
+ 
+ /*
+  * This routine is called to map an inode to the buffer containing
+@@ -654,15 +655,25 @@ xfs_iformat_btree(
+ STATIC void
+ xfs_dinode_from_disk(
+ 	xfs_icdinode_t		*to,
+-	xfs_dinode_t		*from)
++	xfs_dinode_t		*from,
++	int tagged)
+ {
++	uint32_t uid, gid, tag;
++
+ 	to->di_magic = be16_to_cpu(from->di_magic);
+ 	to->di_mode = be16_to_cpu(from->di_mode);
+ 	to->di_version = from ->di_version;
+ 	to->di_format = from->di_format;
+ 	to->di_onlink = be16_to_cpu(from->di_onlink);
+-	to->di_uid = be32_to_cpu(from->di_uid);
+-	to->di_gid = be32_to_cpu(from->di_gid);
++
++	uid = be32_to_cpu(from->di_uid);
++	gid = be32_to_cpu(from->di_gid);
++	tag = be16_to_cpu(from->di_tag);
++
++	to->di_uid = INOTAG_UID(tagged, uid, gid);
++	to->di_gid = INOTAG_GID(tagged, uid, gid);
++	to->di_tag = INOTAG_TAG(tagged, uid, gid, tag);
++
+ 	to->di_nlink = be32_to_cpu(from->di_nlink);
+ 	to->di_projid = be16_to_cpu(from->di_projid);
+ 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
+@@ -683,21 +694,26 @@ xfs_dinode_from_disk(
+ 	to->di_dmevmask	= be32_to_cpu(from->di_dmevmask);
+ 	to->di_dmstate	= be16_to_cpu(from->di_dmstate);
+ 	to->di_flags	= be16_to_cpu(from->di_flags);
++	to->di_vflags	= be16_to_cpu(from->di_vflags);
+ 	to->di_gen	= be32_to_cpu(from->di_gen);
+ }
+ 
+ void
+ xfs_dinode_to_disk(
+ 	xfs_dinode_t		*to,
+-	xfs_icdinode_t		*from)
++	xfs_icdinode_t		*from,
++	int tagged)
+ {
+ 	to->di_magic = cpu_to_be16(from->di_magic);
+ 	to->di_mode = cpu_to_be16(from->di_mode);
+ 	to->di_version = from ->di_version;
+ 	to->di_format = from->di_format;
+ 	to->di_onlink = cpu_to_be16(from->di_onlink);
+-	to->di_uid = cpu_to_be32(from->di_uid);
+-	to->di_gid = cpu_to_be32(from->di_gid);
++
++	to->di_uid = cpu_to_be32(TAGINO_UID(tagged, from->di_uid, from->di_tag));
++	to->di_gid = cpu_to_be32(TAGINO_GID(tagged, from->di_gid, from->di_tag));
++	to->di_tag = cpu_to_be16(TAGINO_TAG(tagged, from->di_tag));
++
+ 	to->di_nlink = cpu_to_be32(from->di_nlink);
+ 	to->di_projid = cpu_to_be16(from->di_projid);
+ 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
+@@ -718,12 +734,14 @@ xfs_dinode_to_disk(
+ 	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
+ 	to->di_dmstate = cpu_to_be16(from->di_dmstate);
+ 	to->di_flags = cpu_to_be16(from->di_flags);
++	to->di_vflags = cpu_to_be16(from->di_vflags);
+ 	to->di_gen = cpu_to_be32(from->di_gen);
+ }
+ 
+ STATIC uint
+ _xfs_dic2xflags(
+-	__uint16_t		di_flags)
++	__uint16_t		di_flags,
++	__uint16_t		di_vflags)
+ {
+ 	uint			flags = 0;
+ 
+@@ -734,6 +752,8 @@ _xfs_dic2xflags(
+ 			flags |= XFS_XFLAG_PREALLOC;
+ 		if (di_flags & XFS_DIFLAG_IMMUTABLE)
+ 			flags |= XFS_XFLAG_IMMUTABLE;
++		if (di_flags & XFS_DIFLAG_IXUNLINK)
++			flags |= XFS_XFLAG_IXUNLINK;
+ 		if (di_flags & XFS_DIFLAG_APPEND)
+ 			flags |= XFS_XFLAG_APPEND;
+ 		if (di_flags & XFS_DIFLAG_SYNC)
+@@ -758,6 +778,10 @@ _xfs_dic2xflags(
+ 			flags |= XFS_XFLAG_FILESTREAM;
+ 	}
+ 
++	if (di_vflags & XFS_DIVFLAG_BARRIER)
++		flags |= FS_BARRIER_FL;
++	if (di_vflags & XFS_DIVFLAG_COW)
++		flags |= FS_COW_FL;
+ 	return flags;
+ }
+ 
+@@ -767,7 +791,7 @@ xfs_ip2xflags(
+ {
+ 	xfs_icdinode_t		*dic = &ip->i_d;
+ 
+-	return _xfs_dic2xflags(dic->di_flags) |
++	return _xfs_dic2xflags(dic->di_flags, dic->di_vflags) |
+ 				(XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
+ }
+ 
+@@ -775,7 +799,8 @@ uint
+ xfs_dic2xflags(
+ 	xfs_dinode_t		*dip)
+ {
+-	return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) |
++	return _xfs_dic2xflags(be16_to_cpu(dip->di_flags),
++				be16_to_cpu(dip->di_vflags)) |
+ 				(XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
+ }
+ 
+@@ -808,7 +833,6 @@ xfs_iread(
+ 	if (error)
+ 		return error;
+ 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
+-
+ 	/*
+ 	 * If we got something that isn't an inode it means someone
+ 	 * (nfs or dmi) has a stale handle.
+@@ -833,7 +857,8 @@ xfs_iread(
+ 	 * Otherwise, just get the truly permanent information.
+ 	 */
+ 	if (dip->di_mode) {
+-		xfs_dinode_from_disk(&ip->i_d, dip);
++		xfs_dinode_from_disk(&ip->i_d, dip,
++			mp->m_flags & XFS_MOUNT_TAGGED);
+ 		error = xfs_iformat(ip, dip);
+ 		if (error)  {
+ #ifdef DEBUG
+@@ -1033,6 +1058,7 @@ xfs_ialloc(
+ 	ASSERT(ip->i_d.di_nlink == nlink);
+ 	ip->i_d.di_uid = current_fsuid();
+ 	ip->i_d.di_gid = current_fsgid();
++	ip->i_d.di_tag = current_fstag(cr, &ip->i_vnode);
+ 	ip->i_d.di_projid = prid;
+ 	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
+ 
+@@ -1093,6 +1119,7 @@ xfs_ialloc(
+ 	ip->i_d.di_dmevmask = 0;
+ 	ip->i_d.di_dmstate = 0;
+ 	ip->i_d.di_flags = 0;
++	ip->i_d.di_vflags = 0;
+ 	flags = XFS_ILOG_CORE;
+ 	switch (mode & S_IFMT) {
+ 	case S_IFIFO:
+@@ -2169,6 +2196,7 @@ xfs_ifree(
+ 	}
+ 	ip->i_d.di_mode = 0;		/* mark incore inode as free */
+ 	ip->i_d.di_flags = 0;
++	ip->i_d.di_vflags = 0;
+ 	ip->i_d.di_dmevmask = 0;
+ 	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
+ 	ip->i_df.if_ext_max =
+@@ -3149,7 +3177,8 @@ xfs_iflush_int(
+ 	 * because if the inode is dirty at all the core must
+ 	 * be.
+ 	 */
+-	xfs_dinode_to_disk(dip, &ip->i_d);
++	xfs_dinode_to_disk(dip, &ip->i_d,
++		mp->m_flags & XFS_MOUNT_TAGGED);
+ 
+ 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
+ 	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
+--- a/fs/xfs/xfs_inode.h	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/xfs_inode.h	2011-06-10 13:03:02.000000000 +0200
+@@ -135,7 +135,9 @@ typedef struct xfs_icdinode {
+ 	__uint32_t	di_gid;		/* owner's group id */
+ 	__uint32_t	di_nlink;	/* number of links to file */
+ 	__uint16_t	di_projid;	/* owner's project id */
+-	__uint8_t	di_pad[8];	/* unused, zeroed space */
++	__uint16_t	di_tag;		/* context tagging */
++	__uint16_t	di_vflags;	/* vserver specific flags */
++	__uint8_t	di_pad[4];	/* unused, zeroed space */
+ 	__uint16_t	di_flushiter;	/* incremented on flush */
+ 	xfs_ictimestamp_t di_atime;	/* time last accessed */
+ 	xfs_ictimestamp_t di_mtime;	/* time last modified */
+@@ -569,7 +571,7 @@ int		xfs_itobp(struct xfs_mount *, struc
+ int		xfs_iread(struct xfs_mount *, struct xfs_trans *,
+ 			  struct xfs_inode *, uint);
+ void		xfs_dinode_to_disk(struct xfs_dinode *,
+-				   struct xfs_icdinode *);
++				   struct xfs_icdinode *, int);
+ void		xfs_idestroy_fork(struct xfs_inode *, int);
+ void		xfs_idata_realloc(struct xfs_inode *, int, int);
+ void		xfs_iroot_realloc(struct xfs_inode *, int, int);
+--- a/fs/xfs/xfs_itable.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/xfs_itable.c	2011-06-10 13:03:02.000000000 +0200
+@@ -100,6 +100,7 @@ xfs_bulkstat_one_int(
+ 	buf->bs_mode = dic->di_mode;
+ 	buf->bs_uid = dic->di_uid;
+ 	buf->bs_gid = dic->di_gid;
++	buf->bs_tag = dic->di_tag;
+ 	buf->bs_size = dic->di_size;
+ 
+ 	/*
+--- a/fs/xfs/xfs_log_recover.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/xfs_log_recover.c	2011-06-10 13:03:02.000000000 +0200
+@@ -2467,7 +2467,8 @@ xlog_recover_do_inode_trans(
+ 	}
+ 
+ 	/* The core is in in-core format */
+-	xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr);
++	xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr,
++		mp->m_flags & XFS_MOUNT_TAGGED);
+ 
+ 	/* the rest is in on-disk format */
+ 	if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
+--- a/fs/xfs/xfs_mount.h	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/xfs_mount.h	2011-06-10 13:03:02.000000000 +0200
+@@ -285,6 +285,7 @@ typedef struct xfs_mount {
+ 						   allocator */
+ #define XFS_MOUNT_NOATTR2	(1ULL << 25)	/* disable use of attr2 format */
+ 
++#define XFS_MOUNT_TAGGED	(1ULL << 31)	/* context tagging */
+ 
+ /*
+  * Default minimum read and write sizes.
+--- a/fs/xfs/xfs_vnodeops.c	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/xfs_vnodeops.c	2011-06-10 13:03:02.000000000 +0200
+@@ -54,6 +54,80 @@
+ #include "xfs_filestream.h"
+ #include "xfs_vnodeops.h"
+ 
++
++STATIC void
++xfs_get_inode_flags(
++	xfs_inode_t	*ip)
++{
++	struct inode 	*inode = VFS_I(ip);
++	unsigned int 	flags = inode->i_flags;
++	unsigned int 	vflags = inode->i_vflags;
++
++	if (flags & S_IMMUTABLE)
++		ip->i_d.di_flags |= XFS_DIFLAG_IMMUTABLE;
++	else
++		ip->i_d.di_flags &= ~XFS_DIFLAG_IMMUTABLE;
++	if (flags & S_IXUNLINK)
++		ip->i_d.di_flags |= XFS_DIFLAG_IXUNLINK;
++	else
++		ip->i_d.di_flags &= ~XFS_DIFLAG_IXUNLINK;
++
++	if (vflags & V_BARRIER)
++		ip->i_d.di_vflags |= XFS_DIVFLAG_BARRIER;
++	else
++		ip->i_d.di_vflags &= ~XFS_DIVFLAG_BARRIER;
++	if (vflags & V_COW)
++		ip->i_d.di_vflags |= XFS_DIVFLAG_COW;
++	else
++		ip->i_d.di_vflags &= ~XFS_DIVFLAG_COW;
++}
++
++int
++xfs_sync_flags(
++	struct inode		*inode,
++	int			flags,
++	int			vflags)
++{
++	struct xfs_inode	*ip = XFS_I(inode);
++	struct xfs_mount	*mp = ip->i_mount;
++	struct xfs_trans        *tp;
++	unsigned int		lock_flags = 0;
++	int			code;
++
++	tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
++	code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
++	if (code)
++		goto error_out;
++
++	lock_flags = XFS_ILOCK_EXCL;
++	xfs_ilock(ip, lock_flags);
++
++	xfs_trans_ijoin(tp, ip, lock_flags);
++	xfs_trans_ihold(tp, ip);
++
++	inode->i_flags = flags;
++	inode->i_vflags = vflags;
++	xfs_get_inode_flags(ip);
++
++	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
++	xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
++
++	XFS_STATS_INC(xs_ig_attrchg);
++
++	if (mp->m_flags & XFS_MOUNT_WSYNC)
++		xfs_trans_set_sync(tp);
++	code = xfs_trans_commit(tp, 0);
++	xfs_iunlock(ip, lock_flags);
++	return code;
++
++error_out:
++	xfs_trans_cancel(tp, 0);
++	if (lock_flags)
++		xfs_iunlock(ip, lock_flags);
++	return code;
++}
++
++
+ int
+ xfs_setattr(
+ 	struct xfs_inode	*ip,
+@@ -69,6 +143,7 @@ xfs_setattr(
+ 	uint			commit_flags=0;
+ 	uid_t			uid=0, iuid=0;
+ 	gid_t			gid=0, igid=0;
++	tag_t			tag=0, itag=0;
+ 	struct xfs_dquot	*udqp, *gdqp, *olddquot1, *olddquot2;
+ 	int			need_iolock = 1;
+ 
+@@ -161,7 +236,7 @@ xfs_setattr(
+ 	/*
+ 	 * Change file ownership.  Must be the owner or privileged.
+ 	 */
+-	if (mask & (ATTR_UID|ATTR_GID)) {
++	if (mask & (ATTR_UID|ATTR_GID|ATTR_TAG)) {
+ 		/*
+ 		 * These IDs could have changed since we last looked at them.
+ 		 * But, we're assured that if the ownership did change
+@@ -170,8 +245,10 @@ xfs_setattr(
+ 		 */
+ 		iuid = ip->i_d.di_uid;
+ 		igid = ip->i_d.di_gid;
++		itag = ip->i_d.di_tag;
+ 		gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
+ 		uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
++		tag = (mask & ATTR_TAG) ? iattr->ia_tag : itag;
+ 
+ 		/*
+ 		 * Do a quota reservation only if uid/gid is actually
+@@ -179,7 +256,8 @@ xfs_setattr(
+ 		 */
+ 		if (XFS_IS_QUOTA_RUNNING(mp) &&
+ 		    ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
+-		     (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
++		     (XFS_IS_GQUOTA_ON(mp) && igid != gid) ||
++		     (XFS_IS_GQUOTA_ON(mp) && itag != tag))) {
+ 			ASSERT(tp);
+ 			code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
+ 						capable(CAP_FOWNER) ?
+@@ -340,7 +418,7 @@ xfs_setattr(
+ 	/*
+ 	 * Change file ownership.  Must be the owner or privileged.
+ 	 */
+-	if (mask & (ATTR_UID|ATTR_GID)) {
++	if (mask & (ATTR_UID|ATTR_GID|ATTR_TAG)) {
+ 		/*
+ 		 * CAP_FSETID overrides the following restrictions:
+ 		 *
+@@ -356,6 +434,10 @@ xfs_setattr(
+ 		 * Change the ownerships and register quota modifications
+ 		 * in the transaction.
+ 		 */
++		if (itag != tag) {
++			ip->i_d.di_tag = tag;
++			inode->i_tag = tag;
++		}
+ 		if (iuid != uid) {
+ 			if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
+ 				ASSERT(mask & ATTR_UID);
+--- a/fs/xfs/xfs_vnodeops.h	2011-05-29 23:42:27.000000000 +0200
++++ a/fs/xfs/xfs_vnodeops.h	2011-06-10 13:03:02.000000000 +0200
+@@ -14,6 +14,7 @@ struct xfs_inode;
+ struct xfs_iomap;
+ 
+ 
++int xfs_sync_xflags(struct xfs_inode *ip);
+ int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags);
+ #define	XFS_ATTR_DMI		0x01	/* invocation from a DMI function */
+ #define	XFS_ATTR_NONBLOCK	0x02	/* return EAGAIN if operation would block */
+--- a/include/asm-generic/tlb.h	2009-09-10 15:26:24.000000000 +0200
++++ a/include/asm-generic/tlb.h	2011-06-10 13:03:02.000000000 +0200
+@@ -14,6 +14,7 @@
+ #define _ASM_GENERIC__TLB_H
+ 
+ #include <linux/swap.h>
++#include <linux/vs_memory.h>
+ #include <asm/pgalloc.h>
+ #include <asm/tlbflush.h>
+ 
+--- a/include/linux/capability.h	2009-12-03 20:02:54.000000000 +0100
++++ a/include/linux/capability.h	2011-06-10 13:03:02.000000000 +0200
+@@ -285,6 +285,7 @@ struct cpu_vfs_cap_data {
+    arbitrary SCSI commands */
+ /* Allow setting encryption key on loopback filesystem */
+ /* Allow setting zone reclaim policy */
++/* Allow the selection of a security context */
+ 
+ #define CAP_SYS_ADMIN        21
+ 
+@@ -357,7 +358,13 @@ struct cpu_vfs_cap_data {
+ 
+ #define CAP_MAC_ADMIN        33
+ 
+-#define CAP_LAST_CAP         CAP_MAC_ADMIN
++/* Allow context manipulations */
++/* Allow changing context info on files */
++
++#define CAP_CONTEXT	     34
++
++
++#define CAP_LAST_CAP         CAP_CONTEXT
+ 
+ #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
+ 
+--- a/include/linux/devpts_fs.h	2008-12-25 00:26:37.000000000 +0100
++++ a/include/linux/devpts_fs.h	2011-06-10 13:03:02.000000000 +0200
+@@ -45,5 +45,4 @@ static inline void devpts_pty_kill(struc
+ 
+ #endif
+ 
+-
+ #endif /* _LINUX_DEVPTS_FS_H */
+--- a/include/linux/ext2_fs.h	2009-03-24 14:22:41.000000000 +0100
++++ a/include/linux/ext2_fs.h	2011-06-10 13:03:02.000000000 +0200
+@@ -189,8 +189,12 @@ struct ext2_group_desc
+ #define EXT2_NOTAIL_FL			FS_NOTAIL_FL	/* file tail should not be merged */
+ #define EXT2_DIRSYNC_FL			FS_DIRSYNC_FL	/* dirsync behaviour (directories only) */
+ #define EXT2_TOPDIR_FL			FS_TOPDIR_FL	/* Top of directory hierarchies*/
++#define EXT2_IXUNLINK_FL		FS_IXUNLINK_FL	/* Immutable invert on unlink */
+ #define EXT2_RESERVED_FL		FS_RESERVED_FL	/* reserved for ext2 lib */
+ 
++#define EXT2_BARRIER_FL			FS_BARRIER_FL	/* Barrier for chroot() */
++#define EXT2_COW_FL			FS_COW_FL	/* Copy on Write marker */
++
+ #define EXT2_FL_USER_VISIBLE		FS_FL_USER_VISIBLE	/* User visible flags */
+ #define EXT2_FL_USER_MODIFIABLE		FS_FL_USER_MODIFIABLE	/* User modifiable flags */
+ 
+@@ -274,7 +278,8 @@ struct ext2_inode {
+ 			__u16	i_pad1;
+ 			__le16	l_i_uid_high;	/* these 2 fields    */
+ 			__le16	l_i_gid_high;	/* were reserved2[0] */
+-			__u32	l_i_reserved2;
++			__le16	l_i_tag;	/* Context Tag */
++			__u16	l_i_reserved2;
+ 		} linux2;
+ 		struct {
+ 			__u8	h_i_frag;	/* Fragment number */
+@@ -303,6 +308,7 @@ struct ext2_inode {
+ #define i_gid_low	i_gid
+ #define i_uid_high	osd2.linux2.l_i_uid_high
+ #define i_gid_high	osd2.linux2.l_i_gid_high
++#define i_raw_tag	osd2.linux2.l_i_tag
+ #define i_reserved2	osd2.linux2.l_i_reserved2
+ #endif
+ 
+@@ -347,6 +353,7 @@ struct ext2_inode {
+ #define EXT2_MOUNT_USRQUOTA		0x020000  /* user quota */
+ #define EXT2_MOUNT_GRPQUOTA		0x040000  /* group quota */
+ #define EXT2_MOUNT_RESERVATION		0x080000  /* Preallocation */
++#define EXT2_MOUNT_TAGGED		(1<<24)	  /* Enable Context Tags */
+ 
+ 
+ #define clear_opt(o, opt)		o &= ~EXT2_MOUNT_##opt
+--- a/include/linux/ext3_fs.h	2009-09-10 15:26:25.000000000 +0200
++++ a/include/linux/ext3_fs.h	2011-06-10 13:03:02.000000000 +0200
+@@ -173,10 +173,14 @@ struct ext3_group_desc
+ #define EXT3_NOTAIL_FL			0x00008000 /* file tail should not be merged */
+ #define EXT3_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
+ #define EXT3_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
++#define EXT3_IXUNLINK_FL		0x08000000 /* Immutable invert on unlink */
+ #define EXT3_RESERVED_FL		0x80000000 /* reserved for ext3 lib */
+ 
+-#define EXT3_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
+-#define EXT3_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
++#define EXT3_BARRIER_FL			0x04000000 /* Barrier for chroot() */
++#define EXT3_COW_FL			0x20000000 /* Copy on Write marker */
++
++#define EXT3_FL_USER_VISIBLE		0x0103DFFF /* User visible flags */
++#define EXT3_FL_USER_MODIFIABLE		0x010380FF /* User modifiable flags */
+ 
+ /* Flags that should be inherited by new inodes from their parent. */
+ #define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
+@@ -320,7 +324,8 @@ struct ext3_inode {
+ 			__u16	i_pad1;
+ 			__le16	l_i_uid_high;	/* these 2 fields    */
+ 			__le16	l_i_gid_high;	/* were reserved2[0] */
+-			__u32	l_i_reserved2;
++			__le16	l_i_tag;	/* Context Tag */
++			__u16	l_i_reserved2;
+ 		} linux2;
+ 		struct {
+ 			__u8	h_i_frag;	/* Fragment number */
+@@ -351,6 +356,7 @@ struct ext3_inode {
+ #define i_gid_low	i_gid
+ #define i_uid_high	osd2.linux2.l_i_uid_high
+ #define i_gid_high	osd2.linux2.l_i_gid_high
++#define i_raw_tag	osd2.linux2.l_i_tag
+ #define i_reserved2	osd2.linux2.l_i_reserved2
+ 
+ #elif defined(__GNU__)
+@@ -414,6 +420,7 @@ struct ext3_inode {
+ #define EXT3_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
+ #define EXT3_MOUNT_DATA_ERR_ABORT	0x400000 /* Abort on file data write
+ 						  * error in ordered mode */
++#define EXT3_MOUNT_TAGGED		(1<<24) /* Enable Context Tags */
+ 
+ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
+ #ifndef _LINUX_EXT2_FS_H
+@@ -892,6 +899,7 @@ extern void ext3_get_inode_flags(struct 
+ extern void ext3_set_aops(struct inode *inode);
+ extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 		       u64 start, u64 len);
++extern int ext3_sync_flags(struct inode *, int, int);
+ 
+ /* ioctl.c */
+ extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
+--- a/include/linux/fs.h	2011-05-29 23:42:27.000000000 +0200
++++ a/include/linux/fs.h	2011-06-10 18:58:46.000000000 +0200
+@@ -208,6 +208,9 @@ struct inodes_stat_t {
+ #define MS_KERNMOUNT	(1<<22) /* this is a kern_mount call */
+ #define MS_I_VERSION	(1<<23) /* Update inode I_version field */
+ #define MS_STRICTATIME	(1<<24) /* Always perform atime updates */
++#define MS_TAGGED	(1<<25) /* use generic inode tagging */
++#define MS_TAGID	(1<<26) /* use specific tag for this mount */
++#define MS_NOTAGCHECK	(1<<27) /* don't check tags */
+ #define MS_ACTIVE	(1<<30)
+ #define MS_NOUSER	(1<<31)
+ 
+@@ -234,6 +237,14 @@ struct inodes_stat_t {
+ #define S_NOCMTIME	128	/* Do not update file c/mtime */
+ #define S_SWAPFILE	256	/* Do not truncate: swapon got its bmaps */
+ #define S_PRIVATE	512	/* Inode is fs-internal */
++#define S_IXUNLINK	1024	/* Immutable Invert on unlink */
++
++/* Linux-VServer related Inode flags */
++
++#define V_VALID		1
++#define V_XATTR		2
++#define V_BARRIER	4	/* Barrier for chroot() */
++#define V_COW		8	/* Copy on Write */
+ 
+ /*
+  * Note that nosuid etc flags are inode-specific: setting some file-system
+@@ -256,12 +267,15 @@ struct inodes_stat_t {
+ #define IS_DIRSYNC(inode)	(__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
+ 					((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
+ #define IS_MANDLOCK(inode)	__IS_FLG(inode, MS_MANDLOCK)
+-#define IS_NOATIME(inode)   __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
+-#define IS_I_VERSION(inode)   __IS_FLG(inode, MS_I_VERSION)
++#define IS_NOATIME(inode)	__IS_FLG(inode, MS_RDONLY|MS_NOATIME)
++#define IS_I_VERSION(inode)	__IS_FLG(inode, MS_I_VERSION)
++#define IS_TAGGED(inode)	__IS_FLG(inode, MS_TAGGED)
+ 
+ #define IS_NOQUOTA(inode)	((inode)->i_flags & S_NOQUOTA)
+ #define IS_APPEND(inode)	((inode)->i_flags & S_APPEND)
+ #define IS_IMMUTABLE(inode)	((inode)->i_flags & S_IMMUTABLE)
++#define IS_IXUNLINK(inode)	((inode)->i_flags & S_IXUNLINK)
++#define IS_IXORUNLINK(inode)	((IS_IXUNLINK(inode) ? S_IMMUTABLE : 0) ^ IS_IMMUTABLE(inode))
+ #define IS_POSIXACL(inode)	__IS_FLG(inode, MS_POSIXACL)
+ 
+ #define IS_DEADDIR(inode)	((inode)->i_flags & S_DEAD)
+@@ -269,6 +283,16 @@ struct inodes_stat_t {
+ #define IS_SWAPFILE(inode)	((inode)->i_flags & S_SWAPFILE)
+ #define IS_PRIVATE(inode)	((inode)->i_flags & S_PRIVATE)
+ 
++#define IS_BARRIER(inode)	(S_ISDIR((inode)->i_mode) && ((inode)->i_vflags & V_BARRIER))
++
++#ifdef CONFIG_VSERVER_COWBL
++#  define IS_COW(inode)		(IS_IXUNLINK(inode) && IS_IMMUTABLE(inode))
++#  define IS_COW_LINK(inode)	(S_ISREG((inode)->i_mode) && ((inode)->i_nlink > 1))
++#else
++#  define IS_COW(inode)		(0)
++#  define IS_COW_LINK(inode)	(0)
++#endif
++
+ /* the read-only stuff doesn't really belong here, but any other place is
+    probably as bad and I don't want to create yet another include file. */
+ 
+@@ -350,11 +374,14 @@ struct inodes_stat_t {
+ #define FS_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
+ #define FS_EXTENT_FL			0x00080000 /* Extents */
+ #define FS_DIRECTIO_FL			0x00100000 /* Use direct i/o */
++#define FS_IXUNLINK_FL			0x08000000 /* Immutable invert on unlink */
+ #define FS_RESERVED_FL			0x80000000 /* reserved for ext2 lib */
+ 
+-#define FS_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
+-#define FS_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
++#define FS_BARRIER_FL			0x04000000 /* Barrier for chroot() */
++#define FS_COW_FL			0x20000000 /* Copy on Write marker */
+ 
++#define FS_FL_USER_VISIBLE		0x0103DFFF /* User visible flags */
++#define FS_FL_USER_MODIFIABLE		0x010380FF /* User modifiable flags */
+ 
+ #define SYNC_FILE_RANGE_WAIT_BEFORE	1
+ #define SYNC_FILE_RANGE_WRITE		2
+@@ -436,6 +463,7 @@ typedef void (dio_iodone_t)(struct kiocb
+ #define ATTR_KILL_PRIV	(1 << 14)
+ #define ATTR_OPEN	(1 << 15) /* Truncating from open(O_TRUNC) */
+ #define ATTR_TIMES_SET	(1 << 16)
++#define ATTR_TAG	(1 << 17)
+ 
+ /*
+  * This is the Inode Attributes structure, used for notify_change().  It
+@@ -451,6 +479,7 @@ struct iattr {
+ 	umode_t		ia_mode;
+ 	uid_t		ia_uid;
+ 	gid_t		ia_gid;
++	tag_t		ia_tag;
+ 	loff_t		ia_size;
+ 	struct timespec	ia_atime;
+ 	struct timespec	ia_mtime;
+@@ -464,6 +493,9 @@ struct iattr {
+ 	struct file	*ia_file;
+ };
+ 
++#define ATTR_FLAG_BARRIER	512	/* Barrier for chroot() */
++#define ATTR_FLAG_IXUNLINK	1024	/* Immutable invert on unlink */
++
+ /*
+  * Includes for diskquotas.
+  */
+@@ -729,7 +761,9 @@ struct inode {
+ 	unsigned int		i_nlink;
+ 	uid_t			i_uid;
+ 	gid_t			i_gid;
++	tag_t			i_tag;
+ 	dev_t			i_rdev;
++	dev_t			i_mdev;
+ 	u64			i_version;
+ 	loff_t			i_size;
+ #ifdef __NEED_I_SIZE_ORDERED
+@@ -776,7 +810,8 @@ struct inode {
+ 	unsigned long		i_state;
+ 	unsigned long		dirtied_when;	/* jiffies of first dirtying */
+ 
+-	unsigned int		i_flags;
++	unsigned short		i_flags;
++	unsigned short		i_vflags;
+ 
+ 	atomic_t		i_writecount;
+ #ifdef CONFIG_SECURITY
+@@ -864,12 +899,12 @@ static inline void i_size_write(struct i
+ 
+ static inline unsigned iminor(const struct inode *inode)
+ {
+-	return MINOR(inode->i_rdev);
++	return MINOR(inode->i_mdev);
+ }
+ 
+ static inline unsigned imajor(const struct inode *inode)
+ {
+-	return MAJOR(inode->i_rdev);
++	return MAJOR(inode->i_mdev);
+ }
+ 
+ extern struct block_device *I_BDEV(struct inode *inode);
+@@ -928,6 +963,7 @@ struct file {
+ 	loff_t			f_pos;
+ 	struct fown_struct	f_owner;
+ 	const struct cred	*f_cred;
++	xid_t			f_xid;
+ 	struct file_ra_state	f_ra;
+ 
+ 	u64			f_version;
+@@ -1069,6 +1105,7 @@ struct file_lock {
+ 	struct file *fl_file;
+ 	loff_t fl_start;
+ 	loff_t fl_end;
++	xid_t fl_xid;
+ 
+ 	struct fasync_struct *	fl_fasync; /* for lease break notifications */
+ 	unsigned long fl_break_time;	/* for nonblocking lease breaks */
+@@ -1536,6 +1573,7 @@ struct inode_operations {
+ 	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
+ 	ssize_t (*listxattr) (struct dentry *, char *, size_t);
+ 	int (*removexattr) (struct dentry *, const char *);
++	int (*sync_flags) (struct inode *, int, int);
+ 	void (*truncate_range)(struct inode *, loff_t, loff_t);
+ 	long (*fallocate)(struct inode *inode, int mode, loff_t offset,
+ 			  loff_t len);
+@@ -1556,6 +1594,7 @@ extern ssize_t vfs_readv(struct file *, 
+ 		unsigned long, loff_t *);
+ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
+ 		unsigned long, loff_t *);
++ssize_t vfs_sendfile(struct file *, struct file *, loff_t *, size_t, loff_t);
+ 
+ struct super_operations {
+    	struct inode *(*alloc_inode)(struct super_block *sb);
+@@ -2354,6 +2393,7 @@ extern int dcache_dir_open(struct inode 
+ extern int dcache_dir_close(struct inode *, struct file *);
+ extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
+ extern int dcache_readdir(struct file *, void *, filldir_t);
++extern int dcache_readdir_filter(struct file *, void *, filldir_t, int (*)(struct dentry *));
+ extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+ extern int simple_statfs(struct dentry *, struct kstatfs *);
+ extern int simple_link(struct dentry *, struct inode *, struct dentry *);
+--- a/include/linux/gfs2_ondisk.h	2009-12-03 20:02:55.000000000 +0100
++++ a/include/linux/gfs2_ondisk.h	2011-06-10 13:03:02.000000000 +0200
+@@ -235,6 +235,9 @@ enum {
+ 	gfs2fl_NoAtime		= 7,
+ 	gfs2fl_Sync		= 8,
+ 	gfs2fl_System		= 9,
++	gfs2fl_IXUnlink		= 16,
++	gfs2fl_Barrier		= 17,
++	gfs2fl_Cow		= 18,
+ 	gfs2fl_TruncInProg	= 29,
+ 	gfs2fl_InheritDirectio	= 30,
+ 	gfs2fl_InheritJdata	= 31,
+@@ -251,6 +254,9 @@ enum {
+ #define GFS2_DIF_NOATIME		0x00000080
+ #define GFS2_DIF_SYNC			0x00000100
+ #define GFS2_DIF_SYSTEM			0x00000200 /* New in gfs2 */
++#define GFS2_DIF_IXUNLINK		0x00010000
++#define GFS2_DIF_BARRIER		0x00020000
++#define GFS2_DIF_COW			0x00040000
+ #define GFS2_DIF_TRUNC_IN_PROG		0x20000000 /* New in gfs2 */
+ #define GFS2_DIF_INHERIT_DIRECTIO	0x40000000
+ #define GFS2_DIF_INHERIT_JDATA		0x80000000
+--- a/include/linux/if_tun.h	2009-12-03 20:02:55.000000000 +0100
++++ a/include/linux/if_tun.h	2011-06-10 13:03:02.000000000 +0200
+@@ -48,6 +48,7 @@
+ #define TUNGETIFF      _IOR('T', 210, unsigned int)
+ #define TUNGETSNDBUF   _IOR('T', 211, int)
+ #define TUNSETSNDBUF   _IOW('T', 212, int)
++#define TUNSETNID     _IOW('T', 215, int)
+ 
+ /* TUNSETIFF ifr flags */
+ #define IFF_TUN		0x0001
+--- a/include/linux/init_task.h	2009-12-03 20:02:55.000000000 +0100
++++ a/include/linux/init_task.h	2011-06-10 13:03:02.000000000 +0200
+@@ -184,6 +184,10 @@ extern struct cred init_cred;
+ 	INIT_FTRACE_GRAPH						\
+ 	INIT_TRACE_RECURSION						\
+ 	INIT_TASK_RCU_PREEMPT(tsk)					\
++	.xid		= 0,						\
++	.vx_info	= NULL,						\
++	.nid		= 0,						\
++	.nx_info	= NULL,						\
+ }
+ 
+ 
+--- a/include/linux/ipc.h	2009-12-03 20:02:55.000000000 +0100
++++ a/include/linux/ipc.h	2011-06-10 13:03:02.000000000 +0200
+@@ -91,6 +91,7 @@ struct kern_ipc_perm
+ 	key_t		key;
+ 	uid_t		uid;
+ 	gid_t		gid;
++	xid_t		xid;
+ 	uid_t		cuid;
+ 	gid_t		cgid;
+ 	mode_t		mode; 
+--- a/include/linux/loop.h	2009-09-10 15:26:25.000000000 +0200
++++ a/include/linux/loop.h	2011-06-10 13:03:02.000000000 +0200
+@@ -45,6 +45,7 @@ struct loop_device {
+ 	struct loop_func_table *lo_encryption;
+ 	__u32           lo_init[2];
+ 	uid_t		lo_key_owner;	/* Who set the key */
++	xid_t		lo_xid;
+ 	int		(*ioctl)(struct loop_device *, int cmd, 
+ 				 unsigned long arg); 
+ 
+--- a/include/linux/magic.h	2009-12-03 20:02:55.000000000 +0100
++++ a/include/linux/magic.h	2011-06-10 13:03:02.000000000 +0200
+@@ -3,7 +3,7 @@
+ 
+ #define ADFS_SUPER_MAGIC	0xadf5
+ #define AFFS_SUPER_MAGIC	0xadff
+-#define AFS_SUPER_MAGIC                0x5346414F
++#define AFS_SUPER_MAGIC		0x5346414F
+ #define AUTOFS_SUPER_MAGIC	0x0187
+ #define CODA_SUPER_MAGIC	0x73757245
+ #define CRAMFS_MAGIC		0x28cd3d45	/* some random number */
+@@ -38,6 +38,7 @@
+ #define NFS_SUPER_MAGIC		0x6969
+ #define OPENPROM_SUPER_MAGIC	0x9fa1
+ #define PROC_SUPER_MAGIC	0x9fa0
++#define DEVPTS_SUPER_MAGIC	0x1cd1
+ #define QNX4_SUPER_MAGIC	0x002f		/* qnx4 fs detection */
+ 
+ #define REISERFS_SUPER_MAGIC	0x52654973	/* used by gcc */
+--- a/include/linux/major.h	2009-09-10 15:26:25.000000000 +0200
++++ a/include/linux/major.h	2011-06-10 13:03:02.000000000 +0200
+@@ -15,6 +15,7 @@
+ #define HD_MAJOR		IDE0_MAJOR
+ #define PTY_SLAVE_MAJOR		3
+ #define TTY_MAJOR		4
++#define VROOT_MAJOR		4
+ #define TTYAUX_MAJOR		5
+ #define LP_MAJOR		6
+ #define VCS_MAJOR		7
+--- a/include/linux/memcontrol.h	2009-12-03 20:02:55.000000000 +0100
++++ a/include/linux/memcontrol.h	2011-06-10 13:03:02.000000000 +0200
+@@ -70,6 +70,13 @@ int task_in_mem_cgroup(struct task_struc
+ 
+ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+ 
++extern u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member);
++extern u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member);
++
++extern s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem);
++extern s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem);
++extern s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem);
++
+ static inline
+ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
+ {
+--- a/include/linux/mm_types.h	2011-05-29 23:42:27.000000000 +0200
++++ a/include/linux/mm_types.h	2011-06-10 13:03:02.000000000 +0200
+@@ -246,6 +246,7 @@ struct mm_struct {
+ 
+ 	/* Architecture-specific MM context */
+ 	mm_context_t context;
++	struct vx_info *mm_vx_info;
+ 
+ 	/* Swap token stuff */
+ 	/*
+--- a/include/linux/mount.h	2009-09-10 15:26:25.000000000 +0200
++++ a/include/linux/mount.h	2011-06-10 13:03:02.000000000 +0200
+@@ -36,6 +36,9 @@ struct mnt_namespace;
+ #define MNT_UNBINDABLE	0x2000	/* if the vfsmount is a unbindable mount */
+ #define MNT_PNODE_MASK	0x3000	/* propagation flag mask */
+ 
++#define MNT_TAGID	0x10000
++#define MNT_NOTAG	0x20000
++
+ struct vfsmount {
+ 	struct list_head mnt_hash;
+ 	struct vfsmount *mnt_parent;	/* fs we are mounted on */
+@@ -70,6 +73,7 @@ struct vfsmount {
+ #else
+ 	int mnt_writers;
+ #endif
++	tag_t mnt_tag;			/* tagging used for vfsmount */
+ };
+ 
+ static inline int *get_mnt_writers_ptr(struct vfsmount *mnt)
+--- a/include/linux/net.h	2009-12-03 20:02:55.000000000 +0100
++++ a/include/linux/net.h	2011-06-10 13:03:02.000000000 +0200
+@@ -69,6 +69,7 @@ struct net;
+ #define SOCK_NOSPACE		2
+ #define SOCK_PASSCRED		3
+ #define SOCK_PASSSEC		4
++#define SOCK_USER_SOCKET	5
+ 
+ #ifndef ARCH_HAS_SOCKET_TYPES
+ /**
+--- a/include/linux/nfs_mount.h	2009-03-24 14:22:43.000000000 +0100
++++ a/include/linux/nfs_mount.h	2011-06-10 13:03:02.000000000 +0200
+@@ -63,7 +63,8 @@ struct nfs_mount_data {
+ #define NFS_MOUNT_SECFLAVOUR	0x2000	/* 5 */
+ #define NFS_MOUNT_NORDIRPLUS	0x4000	/* 5 */
+ #define NFS_MOUNT_UNSHARED	0x8000	/* 5 */
+-#define NFS_MOUNT_FLAGMASK	0xFFFF
++#define NFS_MOUNT_TAGGED	0x10000	/* context tagging */
++#define NFS_MOUNT_FLAGMASK	0x1FFFF
+ 
+ /* The following are for internal use only */
+ #define NFS_MOUNT_LOOKUP_CACHE_NONEG	0x10000
+--- a/include/linux/nsproxy.h	2009-06-11 17:13:17.000000000 +0200
++++ a/include/linux/nsproxy.h	2011-06-10 13:03:02.000000000 +0200
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/spinlock.h>
+ #include <linux/sched.h>
++#include <linux/vserver/debug.h>
+ 
+ struct mnt_namespace;
+ struct uts_namespace;
+@@ -63,22 +64,33 @@ static inline struct nsproxy *task_nspro
+ }
+ 
+ int copy_namespaces(unsigned long flags, struct task_struct *tsk);
++struct nsproxy *copy_nsproxy(struct nsproxy *orig);
+ void exit_task_namespaces(struct task_struct *tsk);
+ void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
+ void free_nsproxy(struct nsproxy *ns);
+ int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
+ 	struct fs_struct *);
+ 
+-static inline void put_nsproxy(struct nsproxy *ns)
++#define	get_nsproxy(n)	__get_nsproxy(n, __FILE__, __LINE__)
++
++static inline void __get_nsproxy(struct nsproxy *ns,
++	const char *_file, int _line)
+ {
+-	if (atomic_dec_and_test(&ns->count)) {
+-		free_nsproxy(ns);
+-	}
++	vxlprintk(VXD_CBIT(space, 0), "get_nsproxy(%p[%u])",
++		ns, atomic_read(&ns->count), _file, _line);
++	atomic_inc(&ns->count);
+ }
+ 
+-static inline void get_nsproxy(struct nsproxy *ns)
++#define	put_nsproxy(n)	__put_nsproxy(n, __FILE__, __LINE__)
++
++static inline void __put_nsproxy(struct nsproxy *ns,
++	const char *_file, int _line)
+ {
+-	atomic_inc(&ns->count);
++	vxlprintk(VXD_CBIT(space, 0), "put_nsproxy(%p[%u])",
++		ns, atomic_read(&ns->count), _file, _line);
++	if (atomic_dec_and_test(&ns->count)) {
++		free_nsproxy(ns);
++	}
+ }
+ 
+ #ifdef CONFIG_CGROUP_NS
+--- a/include/linux/pid.h	2011-05-29 23:42:27.000000000 +0200
++++ a/include/linux/pid.h	2011-06-10 13:03:02.000000000 +0200
+@@ -8,7 +8,8 @@ enum pid_type
+ 	PIDTYPE_PID,
+ 	PIDTYPE_PGID,
+ 	PIDTYPE_SID,
+-	PIDTYPE_MAX
++	PIDTYPE_MAX,
++	PIDTYPE_REALPID
+ };
+ 
+ /*
+@@ -160,6 +161,7 @@ static inline pid_t pid_nr(struct pid *p
+ }
+ 
+ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
++pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns);
+ pid_t pid_vnr(struct pid *pid);
+ 
+ #define do_each_pid_task(pid, type, task)				\
+--- a/include/linux/proc_fs.h	2009-12-03 20:02:56.000000000 +0100
++++ a/include/linux/proc_fs.h	2011-06-10 13:03:02.000000000 +0200
+@@ -56,6 +56,7 @@ struct proc_dir_entry {
+ 	nlink_t nlink;
+ 	uid_t uid;
+ 	gid_t gid;
++	int vx_flags;
+ 	loff_t size;
+ 	const struct inode_operations *proc_iops;
+ 	/*
+@@ -250,12 +251,18 @@ kclist_add(struct kcore_list *new, void 
+ extern void kclist_add(struct kcore_list *, void *, size_t, int type);
+ #endif
+ 
++struct vx_info;
++struct nx_info;
++
+ union proc_op {
+ 	int (*proc_get_link)(struct inode *, struct path *);
+ 	int (*proc_read)(struct task_struct *task, char *page);
+ 	int (*proc_show)(struct seq_file *m,
+ 		struct pid_namespace *ns, struct pid *pid,
+ 		struct task_struct *task);
++	int (*proc_vs_read)(char *page);
++	int (*proc_vxi_read)(struct vx_info *vxi, char *page);
++	int (*proc_nxi_read)(struct nx_info *nxi, char *page);
+ };
+ 
+ struct ctl_table_header;
+@@ -263,6 +270,7 @@ struct ctl_table;
+ 
+ struct proc_inode {
+ 	struct pid *pid;
++	int vx_flags;
+ 	int fd;
+ 	union proc_op op;
+ 	struct proc_dir_entry *pde;
+--- a/include/linux/quotaops.h	2011-05-29 23:42:27.000000000 +0200
++++ a/include/linux/quotaops.h	2011-06-10 13:03:02.000000000 +0200
+@@ -8,6 +8,7 @@
+ #define _LINUX_QUOTAOPS_
+ 
+ #include <linux/fs.h>
++#include <linux/vs_dlimit.h>
+ 
+ static inline struct quota_info *sb_dqopt(struct super_block *sb)
+ {
+@@ -157,10 +158,14 @@ static inline void vfs_dq_init(struct in
+  * a transaction (deadlocks possible otherwise) */
+ static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr)
+ {
++	if (dl_alloc_space(inode, nr))
++		return 1;
+ 	if (sb_any_quota_active(inode->i_sb)) {
+ 		/* Used space is updated in alloc_space() */
+-		if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA)
++		if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA) {
++			dl_free_space(inode, nr);
+ 			return 1;
++		}
+ 	}
+ 	else
+ 		inode_add_bytes(inode, nr);
+@@ -177,10 +182,14 @@ static inline int vfs_dq_prealloc_space(
+ 
+ static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr)
+ {
++	if (dl_alloc_space(inode, nr))
++		return 1;
+ 	if (sb_any_quota_active(inode->i_sb)) {
+ 		/* Used space is updated in alloc_space() */
+-		if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA)
++		if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA) {
++			dl_free_space(inode, nr);
+ 			return 1;
++		}
+ 	}
+ 	else
+ 		inode_add_bytes(inode, nr);
+@@ -197,10 +206,14 @@ static inline int vfs_dq_alloc_space(str
+ 
+ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
+ {
++	if (dl_reserve_space(inode, nr))
++		return 1;
+ 	if (sb_any_quota_active(inode->i_sb)) {
+ 		/* Used space is updated in alloc_space() */
+-		if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA)
++		if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA) {
++			dl_release_space(inode, nr);
+ 			return 1;
++		}
+ 	}
+ 	else
+ 		inode_add_rsv_space(inode, nr);
+@@ -209,10 +222,14 @@ static inline int vfs_dq_reserve_space(s
+ 
+ static inline int vfs_dq_alloc_inode(struct inode *inode)
+ {
++	if (dl_alloc_inode(inode))
++		return 1;
+ 	if (sb_any_quota_active(inode->i_sb)) {
+ 		vfs_dq_init(inode);
+-		if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA)
++		if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) {
++			dl_free_inode(inode);
+ 			return 1;
++		}
+ 	}
+ 	return 0;
+ }
+@@ -222,9 +239,13 @@ static inline int vfs_dq_alloc_inode(str
+  */
+ static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
+ {
++	if (dl_claim_space(inode, nr))
++		return 1;
+ 	if (sb_any_quota_active(inode->i_sb)) {
+-		if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA)
++		if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA) {
++			dl_release_space(inode, nr);
+ 			return 1;
++		}
+ 	} else
+ 		inode_claim_rsv_space(inode, nr);
+ 
+@@ -242,6 +263,7 @@ void vfs_dq_release_reservation_space(st
+ 		inode->i_sb->dq_op->release_rsv(inode, nr);
+ 	else
+ 		inode_sub_rsv_space(inode, nr);
++	dl_release_space(inode, nr);
+ }
+ 
+ static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
+@@ -250,6 +272,7 @@ static inline void vfs_dq_free_space_nod
+ 		inode->i_sb->dq_op->free_space(inode, nr);
+ 	else
+ 		inode_sub_bytes(inode, nr);
++	dl_free_space(inode, nr);
+ }
+ 
+ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr)
+@@ -262,6 +285,7 @@ static inline void vfs_dq_free_inode(str
+ {
+ 	if (sb_any_quota_active(inode->i_sb))
+ 		inode->i_sb->dq_op->free_inode(inode, 1);
++	dl_free_inode(inode);
+ }
+ 
+ /* Cannot be called inside a transaction */
+@@ -365,6 +389,8 @@ static inline int vfs_dq_transfer(struct
+ 
+ static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr)
+ {
++	if (dl_alloc_space(inode, nr))
++		return 1;
+ 	inode_add_bytes(inode, nr);
+ 	return 0;
+ }
+@@ -378,6 +404,8 @@ static inline int vfs_dq_prealloc_space(
+ 
+ static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr)
+ {
++	if (dl_alloc_space(inode, nr))
++		return 1;
+ 	inode_add_bytes(inode, nr);
+ 	return 0;
+ }
+@@ -391,22 +419,28 @@ static inline int vfs_dq_alloc_space(str
+ 
+ static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr)
+ {
++	if (dl_reserve_space(inode, nr))
++		return 1;
+ 	return 0;
+ }
+ 
+ static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr)
+ {
++	if (dl_claim_space(inode, nr))
++		return 1;
+ 	return vfs_dq_alloc_space(inode, nr);
+ }
+ 
+ static inline
+ int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr)
+ {
++	dl_release_space(inode, nr);
+ 	return 0;
+ }
+ 
+ static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr)
+ {
++	dl_free_space(inode, nr);
+ 	inode_sub_bytes(inode, nr);
+ }
+ 
+--- a/include/linux/reboot.h	2008-12-25 00:26:37.000000000 +0100
++++ a/include/linux/reboot.h	2011-06-10 13:03:02.000000000 +0200
+@@ -33,6 +33,7 @@
+ #define	LINUX_REBOOT_CMD_RESTART2	0xA1B2C3D4
+ #define	LINUX_REBOOT_CMD_SW_SUSPEND	0xD000FCE2
+ #define	LINUX_REBOOT_CMD_KEXEC		0x45584543
++#define	LINUX_REBOOT_CMD_OOM		0xDEADBEEF
+ 
+ 
+ #ifdef __KERNEL__
+--- a/include/linux/reiserfs_fs.h	2009-09-10 15:26:26.000000000 +0200
++++ a/include/linux/reiserfs_fs.h	2011-06-10 13:03:02.000000000 +0200
+@@ -899,6 +899,11 @@ struct stat_data_v1 {
+ #define REISERFS_COMPR_FL     FS_COMPR_FL
+ #define REISERFS_NOTAIL_FL    FS_NOTAIL_FL
+ 
++/* unfortunately reiserfs sdattr is only 16 bit */
++#define REISERFS_IXUNLINK_FL  (FS_IXUNLINK_FL >> 16)
++#define REISERFS_BARRIER_FL   (FS_BARRIER_FL >> 16)
++#define REISERFS_COW_FL       (FS_COW_FL >> 16)
++
+ /* persistent flags that file inherits from the parent directory */
+ #define REISERFS_INHERIT_MASK ( REISERFS_IMMUTABLE_FL |	\
+ 				REISERFS_SYNC_FL |	\
+@@ -908,6 +913,9 @@ struct stat_data_v1 {
+ 				REISERFS_COMPR_FL |	\
+ 				REISERFS_NOTAIL_FL )
+ 
++#define REISERFS_FL_USER_VISIBLE	0x80FF
++#define REISERFS_FL_USER_MODIFIABLE	0x80FF
++
+ /* Stat Data on disk (reiserfs version of UFS disk inode minus the
+    address blocks) */
+ struct stat_data {
+@@ -1989,6 +1997,7 @@ static inline void reiserfs_update_sd(st
+ void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode);
+ void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs);
+ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr);
++int reiserfs_sync_flags(struct inode *inode, int, int);
+ 
+ /* namei.c */
+ void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
+--- a/include/linux/reiserfs_fs_sb.h	2009-09-10 15:26:26.000000000 +0200
++++ a/include/linux/reiserfs_fs_sb.h	2011-06-10 13:03:02.000000000 +0200
+@@ -456,6 +456,7 @@ enum reiserfs_mount_options {
+ 	REISERFS_EXPOSE_PRIVROOT,
+ 	REISERFS_BARRIER_NONE,
+ 	REISERFS_BARRIER_FLUSH,
++	REISERFS_TAGGED,
+ 
+ 	/* Actions on error */
+ 	REISERFS_ERROR_PANIC,
+--- a/include/linux/sched.h	2011-05-29 23:42:27.000000000 +0200
++++ a/include/linux/sched.h	2011-06-10 13:03:02.000000000 +0200
+@@ -389,25 +389,28 @@ extern void arch_unmap_area_topdown(stru
+  * The mm counters are not protected by its page_table_lock,
+  * so must be incremented atomically.
+  */
+-#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
+-#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
+-#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
+-#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
+-#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
++#define __set_mm_counter(mm, member, value) \
++	atomic_long_set(&(mm)->_##member, value)
++#define get_mm_counter(mm, member) \
++	((unsigned long)atomic_long_read(&(mm)->_##member))
+ 
+ #else  /* !USE_SPLIT_PTLOCKS */
+ /*
+  * The mm counters are protected by its page_table_lock,
+  * so can be incremented directly.
+  */
+-#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
++#define __set_mm_counter(mm, member, value) (mm)->_##member = (value)
+ #define get_mm_counter(mm, member) ((mm)->_##member)
+-#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
+-#define inc_mm_counter(mm, member) (mm)->_##member++
+-#define dec_mm_counter(mm, member) (mm)->_##member--
+ 
+ #endif /* !USE_SPLIT_PTLOCKS */
+ 
++#define set_mm_counter(mm, member, value) \
++	vx_ ## member ## pages_sub((mm), (get_mm_counter(mm, member) - value))
++#define add_mm_counter(mm, member, value) \
++	vx_ ## member ## pages_add((mm), (value))
++#define inc_mm_counter(mm, member) vx_ ## member ## pages_inc((mm))
++#define dec_mm_counter(mm, member) vx_ ## member ## pages_dec((mm))
++
+ #define get_mm_rss(mm)					\
+ 	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
+ #define update_hiwater_rss(mm)	do {			\
+@@ -1185,6 +1188,12 @@ struct sched_entity {
+ 	u64			nr_wakeups_affine_attempts;
+ 	u64			nr_wakeups_passive;
+ 	u64			nr_wakeups_idle;
++#ifdef CONFIG_CFS_HARD_LIMITS
++	u64			throttle_start;
++	u64			throttle_max;
++	u64			throttle_count;
++	u64			throttle_sum;
++#endif
+ #endif
+ 
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+@@ -1395,6 +1404,14 @@ struct task_struct {
+ #endif
+ 	seccomp_t seccomp;
+ 
++/* vserver context data */
++	struct vx_info *vx_info;
++	struct nx_info *nx_info;
++
++	xid_t xid;
++	nid_t nid;
++	tag_t tag;
++
+ /* Thread group tracking */
+    	u32 parent_exec_id;
+    	u32 self_exec_id;
+@@ -1619,6 +1636,11 @@ struct pid_namespace;
+ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+ 			struct pid_namespace *ns);
+ 
++#include <linux/vserver/base.h>
++#include <linux/vserver/context.h>
++#include <linux/vserver/debug.h>
++#include <linux/vserver/pid.h>
++
+ static inline pid_t task_pid_nr(struct task_struct *tsk)
+ {
+ 	return tsk->pid;
+@@ -1632,7 +1654,8 @@ static inline pid_t task_pid_nr_ns(struc
+ 
+ static inline pid_t task_pid_vnr(struct task_struct *tsk)
+ {
+-	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
++	// return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
++	return vx_map_pid(__task_pid_nr_ns(tsk, PIDTYPE_PID, NULL));
+ }
+ 
+ 
+@@ -1645,7 +1668,7 @@ pid_t task_tgid_nr_ns(struct task_struct
+ 
+ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+ {
+-	return pid_vnr(task_tgid(tsk));
++	return vx_map_tgid(pid_vnr(task_tgid(tsk)));
+ }
+ 
+ 
+--- a/include/linux/shmem_fs.h	2009-12-03 20:02:56.000000000 +0100
++++ a/include/linux/shmem_fs.h	2011-06-10 13:03:02.000000000 +0200
+@@ -8,6 +8,9 @@
+ 
+ #define SHMEM_NR_DIRECT 16
+ 
++#define TMPFS_SUPER_MAGIC	0x01021994
++
++
+ struct shmem_inode_info {
+ 	spinlock_t		lock;
+ 	unsigned long		flags;
+--- a/include/linux/stat.h	2008-12-25 00:26:37.000000000 +0100
++++ a/include/linux/stat.h	2011-06-10 13:03:02.000000000 +0200
+@@ -66,6 +66,7 @@ struct kstat {
+ 	unsigned int	nlink;
+ 	uid_t		uid;
+ 	gid_t		gid;
++	tag_t		tag;
+ 	dev_t		rdev;
+ 	loff_t		size;
+ 	struct timespec  atime;
+--- a/include/linux/sunrpc/auth.h	2009-12-03 20:02:56.000000000 +0100
++++ a/include/linux/sunrpc/auth.h	2011-06-10 13:03:02.000000000 +0200
+@@ -25,6 +25,7 @@
+ struct auth_cred {
+ 	uid_t	uid;
+ 	gid_t	gid;
++	tag_t	tag;
+ 	struct group_info *group_info;
+ 	unsigned char machine_cred : 1;
+ };
+--- a/include/linux/sunrpc/clnt.h	2009-12-03 20:02:56.000000000 +0100
++++ a/include/linux/sunrpc/clnt.h	2011-06-10 13:03:02.000000000 +0200
+@@ -49,7 +49,8 @@ struct rpc_clnt {
+ 	unsigned int		cl_softrtry : 1,/* soft timeouts */
+ 				cl_discrtry : 1,/* disconnect before retry */
+ 				cl_autobind : 1,/* use getport() */
+-				cl_chatty   : 1;/* be verbose */
++				cl_chatty   : 1,/* be verbose */
++				cl_tag      : 1;/* context tagging */
+ 
+ 	struct rpc_rtt *	cl_rtt;		/* RTO estimator data */
+ 	const struct rpc_timeout *cl_timeout;	/* Timeout strategy */
+--- a/include/linux/syscalls.h	2011-05-29 23:42:27.000000000 +0200
++++ a/include/linux/syscalls.h	2011-06-10 13:03:02.000000000 +0200
+@@ -548,6 +548,8 @@ asmlinkage long sys_symlink(const char _
+ asmlinkage long sys_unlink(const char __user *pathname);
+ asmlinkage long sys_rename(const char __user *oldname,
+ 				const char __user *newname);
++asmlinkage long sys_copyfile(const char __user *from, const char __user *to,
++				umode_t mode);
+ asmlinkage long sys_chmod(const char __user *filename, mode_t mode);
+ asmlinkage long sys_fchmod(unsigned int fd, mode_t mode);
+ 
+--- a/include/linux/sysctl.h	2011-05-29 23:42:27.000000000 +0200
++++ a/include/linux/sysctl.h	2011-06-10 13:03:02.000000000 +0200
+@@ -69,6 +69,7 @@ enum
+ 	CTL_ABI=9,		/* Binary emulation */
+ 	CTL_CPU=10,		/* CPU stuff (speed scaling, etc) */
+ 	CTL_ARLAN=254,		/* arlan wireless driver */
++	CTL_VSERVER=4242,	/* Linux-VServer debug */
+ 	CTL_S390DBF=5677,	/* s390 debug */
+ 	CTL_SUNRPC=7249,	/* sunrpc debug */
+ 	CTL_PM=9899,		/* frv power management */
+@@ -103,6 +104,7 @@ enum
+ 
+ 	KERN_PANIC=15,		/* int: panic timeout */
+ 	KERN_REALROOTDEV=16,	/* real root device to mount after initrd */
++	KERN_VSHELPER=17,	/* string: path to vshelper policy agent */
+ 
+ 	KERN_SPARC_REBOOT=21,	/* reboot command on Sparc */
+ 	KERN_CTLALTDEL=22,	/* int: allow ctl-alt-del to reboot */
+--- a/include/linux/sysfs.h	2008-12-25 00:26:37.000000000 +0100
++++ a/include/linux/sysfs.h	2011-06-10 13:03:02.000000000 +0200
+@@ -17,6 +17,8 @@
+ #include <linux/list.h>
+ #include <asm/atomic.h>
+ 
++#define SYSFS_SUPER_MAGIC	0x62656572
++
+ struct kobject;
+ struct module;
+ 
+--- a/include/linux/time.h	2011-05-29 23:42:27.000000000 +0200
++++ a/include/linux/time.h	2011-06-10 13:03:02.000000000 +0200
+@@ -238,6 +238,9 @@ static __always_inline void timespec_add
+ 	a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
+ 	a->tv_nsec = ns;
+ }
++
++#include <linux/vs_time.h>
++
+ #endif /* __KERNEL__ */
+ 
+ #define NFDBITS			__NFDBITS
+--- a/include/linux/types.h	2009-09-10 15:26:26.000000000 +0200
++++ a/include/linux/types.h	2011-06-10 13:03:02.000000000 +0200
+@@ -37,6 +37,9 @@ typedef __kernel_uid32_t	uid_t;
+ typedef __kernel_gid32_t	gid_t;
+ typedef __kernel_uid16_t        uid16_t;
+ typedef __kernel_gid16_t        gid16_t;
++typedef unsigned int		xid_t;
++typedef unsigned int		nid_t;
++typedef unsigned int		tag_t;
+ 
+ typedef unsigned long		uintptr_t;
+ 
+--- a/include/linux/vroot.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vroot.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,51 @@
++
++/*
++ * include/linux/vroot.h
++ *
++ * written by Herbert Pötzl, 9/11/2002
++ * ported to 2.6 by Herbert Pötzl, 30/12/2004
++ *
++ * Copyright (C) 2002-2007 by Herbert Pötzl.
++ * Redistribution of this file is permitted under the
++ * GNU General Public License.
++ */
++
++#ifndef _LINUX_VROOT_H
++#define _LINUX_VROOT_H
++
++
++#ifdef __KERNEL__
++
++/* Possible states of device */
++enum {
++	Vr_unbound,
++	Vr_bound,
++};
++
++struct vroot_device {
++	int		vr_number;
++	int		vr_refcnt;
++
++	struct semaphore	vr_ctl_mutex;
++	struct block_device    *vr_device;
++	int			vr_state;
++};
++
++
++typedef struct block_device *(vroot_grb_func)(struct block_device *);
++
++extern int register_vroot_grb(vroot_grb_func *);
++extern int unregister_vroot_grb(vroot_grb_func *);
++
++#endif /* __KERNEL__ */
++
++#define MAX_VROOT_DEFAULT	8
++
++/*
++ * IOCTL commands --- we will commandeer 0x56 ('V')
++ */
++
++#define VROOT_SET_DEV		0x5600
++#define VROOT_CLR_DEV		0x5601
++
++#endif /* _LINUX_VROOT_H */
+--- a/include/linux/vs_base.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_base.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,10 @@
++#ifndef _VS_BASE_H
++#define _VS_BASE_H
++
++#include "vserver/base.h"
++#include "vserver/check.h"
++#include "vserver/debug.h"
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_context.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_context.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,242 @@
++#ifndef _VS_CONTEXT_H
++#define _VS_CONTEXT_H
++
++#include "vserver/base.h"
++#include "vserver/check.h"
++#include "vserver/context.h"
++#include "vserver/history.h"
++#include "vserver/debug.h"
++
++#include <linux/sched.h>
++
++
++#define get_vx_info(i) __get_vx_info(i, __FILE__, __LINE__, __HERE__)
++
++static inline struct vx_info *__get_vx_info(struct vx_info *vxi,
++	const char *_file, int _line, void *_here)
++{
++	if (!vxi)
++		return NULL;
++
++	vxlprintk(VXD_CBIT(xid, 2), "get_vx_info(%p[#%d.%d])",
++		vxi, vxi ? vxi->vx_id : 0,
++		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++		_file, _line);
++	__vxh_get_vx_info(vxi, _here);
++
++	atomic_inc(&vxi->vx_usecnt);
++	return vxi;
++}
++
++
++extern void free_vx_info(struct vx_info *);
++
++#define put_vx_info(i) __put_vx_info(i, __FILE__, __LINE__, __HERE__)
++
++static inline void __put_vx_info(struct vx_info *vxi,
++	const char *_file, int _line, void *_here)
++{
++	if (!vxi)
++		return;
++
++	vxlprintk(VXD_CBIT(xid, 2), "put_vx_info(%p[#%d.%d])",
++		vxi, vxi ? vxi->vx_id : 0,
++		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++		_file, _line);
++	__vxh_put_vx_info(vxi, _here);
++
++	if (atomic_dec_and_test(&vxi->vx_usecnt))
++		free_vx_info(vxi);
++}
++
++
++#define init_vx_info(p, i) \
++	__init_vx_info(p, i, __FILE__, __LINE__, __HERE__)
++
++static inline void __init_vx_info(struct vx_info **vxp, struct vx_info *vxi,
++	const char *_file, int _line, void *_here)
++{
++	if (vxi) {
++		vxlprintk(VXD_CBIT(xid, 3),
++			"init_vx_info(%p[#%d.%d])",
++			vxi, vxi ? vxi->vx_id : 0,
++			vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++			_file, _line);
++		__vxh_init_vx_info(vxi, vxp, _here);
++
++		atomic_inc(&vxi->vx_usecnt);
++	}
++	*vxp = vxi;
++}
++
++
++#define set_vx_info(p, i) \
++	__set_vx_info(p, i, __FILE__, __LINE__, __HERE__)
++
++static inline void __set_vx_info(struct vx_info **vxp, struct vx_info *vxi,
++	const char *_file, int _line, void *_here)
++{
++	struct vx_info *vxo;
++
++	if (!vxi)
++		return;
++
++	vxlprintk(VXD_CBIT(xid, 3), "set_vx_info(%p[#%d.%d])",
++		vxi, vxi ? vxi->vx_id : 0,
++		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++		_file, _line);
++	__vxh_set_vx_info(vxi, vxp, _here);
++
++	atomic_inc(&vxi->vx_usecnt);
++	vxo = xchg(vxp, vxi);
++	BUG_ON(vxo);
++}
++
++
++#define clr_vx_info(p) __clr_vx_info(p, __FILE__, __LINE__, __HERE__)
++
++static inline void __clr_vx_info(struct vx_info **vxp,
++	const char *_file, int _line, void *_here)
++{
++	struct vx_info *vxo;
++
++	vxo = xchg(vxp, NULL);
++	if (!vxo)
++		return;
++
++	vxlprintk(VXD_CBIT(xid, 3), "clr_vx_info(%p[#%d.%d])",
++		vxo, vxo ? vxo->vx_id : 0,
++		vxo ? atomic_read(&vxo->vx_usecnt) : 0,
++		_file, _line);
++	__vxh_clr_vx_info(vxo, vxp, _here);
++
++	if (atomic_dec_and_test(&vxo->vx_usecnt))
++		free_vx_info(vxo);
++}
++
++
++#define claim_vx_info(v, p) \
++	__claim_vx_info(v, p, __FILE__, __LINE__, __HERE__)
++
++static inline void __claim_vx_info(struct vx_info *vxi,
++	struct task_struct *task,
++	const char *_file, int _line, void *_here)
++{
++	vxlprintk(VXD_CBIT(xid, 3), "claim_vx_info(%p[#%d.%d.%d]) %p",
++		vxi, vxi ? vxi->vx_id : 0,
++		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++		vxi ? atomic_read(&vxi->vx_tasks) : 0,
++		task, _file, _line);
++	__vxh_claim_vx_info(vxi, task, _here);
++
++	atomic_inc(&vxi->vx_tasks);
++}
++
++
++extern void unhash_vx_info(struct vx_info *);
++
++#define release_vx_info(v, p) \
++	__release_vx_info(v, p, __FILE__, __LINE__, __HERE__)
++
++static inline void __release_vx_info(struct vx_info *vxi,
++	struct task_struct *task,
++	const char *_file, int _line, void *_here)
++{
++	vxlprintk(VXD_CBIT(xid, 3), "release_vx_info(%p[#%d.%d.%d]) %p",
++		vxi, vxi ? vxi->vx_id : 0,
++		vxi ? atomic_read(&vxi->vx_usecnt) : 0,
++		vxi ? atomic_read(&vxi->vx_tasks) : 0,
++		task, _file, _line);
++	__vxh_release_vx_info(vxi, task, _here);
++
++	might_sleep();
++
++	if (atomic_dec_and_test(&vxi->vx_tasks))
++		unhash_vx_info(vxi);
++}
++
++
++#define task_get_vx_info(p) \
++	__task_get_vx_info(p, __FILE__, __LINE__, __HERE__)
++
++static inline struct vx_info *__task_get_vx_info(struct task_struct *p,
++	const char *_file, int _line, void *_here)
++{
++	struct vx_info *vxi;
++
++	task_lock(p);
++	vxlprintk(VXD_CBIT(xid, 5), "task_get_vx_info(%p)",
++		p, _file, _line);
++	vxi = __get_vx_info(p->vx_info, _file, _line, _here);
++	task_unlock(p);
++	return vxi;
++}
++
++
++static inline void __wakeup_vx_info(struct vx_info *vxi)
++{
++	if (waitqueue_active(&vxi->vx_wait))
++		wake_up_interruptible(&vxi->vx_wait);
++}
++
++
++#define enter_vx_info(v, s) __enter_vx_info(v, s, __FILE__, __LINE__)
++
++static inline void __enter_vx_info(struct vx_info *vxi,
++	struct vx_info_save *vxis, const char *_file, int _line)
++{
++	vxlprintk(VXD_CBIT(xid, 5), "enter_vx_info(%p[#%d],%p) %p[#%d,%p]",
++		vxi, vxi ? vxi->vx_id : 0, vxis, current,
++		current->xid, current->vx_info, _file, _line);
++	vxis->vxi = xchg(&current->vx_info, vxi);
++	vxis->xid = current->xid;
++	current->xid = vxi ? vxi->vx_id : 0;
++}
++
++#define leave_vx_info(s) __leave_vx_info(s, __FILE__, __LINE__)
++
++static inline void __leave_vx_info(struct vx_info_save *vxis,
++	const char *_file, int _line)
++{
++	vxlprintk(VXD_CBIT(xid, 5), "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]",
++		vxis, vxis->xid, vxis->vxi, current,
++		current->xid, current->vx_info, _file, _line);
++	(void)xchg(&current->vx_info, vxis->vxi);
++	current->xid = vxis->xid;
++}
++
++
++static inline void __enter_vx_admin(struct vx_info_save *vxis)
++{
++	vxis->vxi = xchg(&current->vx_info, NULL);
++	vxis->xid = xchg(&current->xid, (xid_t)0);
++}
++
++static inline void __leave_vx_admin(struct vx_info_save *vxis)
++{
++	(void)xchg(&current->xid, vxis->xid);
++	(void)xchg(&current->vx_info, vxis->vxi);
++}
++
++#define task_is_init(p) \
++	__task_is_init(p, __FILE__, __LINE__, __HERE__)
++
++static inline int __task_is_init(struct task_struct *p,
++	const char *_file, int _line, void *_here)
++{
++	int is_init = is_global_init(p);
++
++	task_lock(p);
++	if (p->vx_info)
++		is_init = p->vx_info->vx_initpid == p->pid;
++	task_unlock(p);
++	return is_init;
++}
++
++extern void exit_vx_info(struct task_struct *, int);
++extern void exit_vx_info_early(struct task_struct *, int);
++
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_cowbl.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_cowbl.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,47 @@
++#ifndef _VS_COWBL_H
++#define _VS_COWBL_H
++
++#include <linux/fs.h>
++#include <linux/dcache.h>
++#include <linux/namei.h>
++
++extern struct dentry *cow_break_link(const char *pathname);
++
++static inline int cow_check_and_break(struct path *path)
++{
++	struct inode *inode = path->dentry->d_inode;
++	int error = 0;
++
++	/* do we need this check? */
++	if (IS_RDONLY(inode))
++		return -EROFS;
++
++	if (IS_COW(inode)) {
++		if (IS_COW_LINK(inode)) {
++			struct dentry *new_dentry, *old_dentry = path->dentry;
++			char *pp, *buf;
++
++			buf = kmalloc(PATH_MAX, GFP_KERNEL);
++			if (!buf) {
++				return -ENOMEM;
++			}
++			pp = d_path(path, buf, PATH_MAX);
++			new_dentry = cow_break_link(pp);
++			kfree(buf);
++			if (!IS_ERR(new_dentry)) {
++				path->dentry = new_dentry;
++				dput(old_dentry);
++			} else
++				error = PTR_ERR(new_dentry);
++		} else {
++			inode->i_flags &= ~(S_IXUNLINK | S_IMMUTABLE);
++			inode->i_ctime = CURRENT_TIME;
++			mark_inode_dirty(inode);
++		}
++	}
++	return error;
++}
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_cvirt.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_cvirt.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,50 @@
++#ifndef _VS_CVIRT_H
++#define _VS_CVIRT_H
++
++#include "vserver/cvirt.h"
++#include "vserver/context.h"
++#include "vserver/base.h"
++#include "vserver/check.h"
++#include "vserver/debug.h"
++
++
++static inline void vx_activate_task(struct task_struct *p)
++{
++	struct vx_info *vxi;
++
++	if ((vxi = p->vx_info)) {
++		vx_update_load(vxi);
++		atomic_inc(&vxi->cvirt.nr_running);
++	}
++}
++
++static inline void vx_deactivate_task(struct task_struct *p)
++{
++	struct vx_info *vxi;
++
++	if ((vxi = p->vx_info)) {
++		vx_update_load(vxi);
++		atomic_dec(&vxi->cvirt.nr_running);
++	}
++}
++
++static inline void vx_uninterruptible_inc(struct task_struct *p)
++{
++	struct vx_info *vxi;
++
++	if ((vxi = p->vx_info))
++		atomic_inc(&vxi->cvirt.nr_uninterruptible);
++}
++
++static inline void vx_uninterruptible_dec(struct task_struct *p)
++{
++	struct vx_info *vxi;
++
++	if ((vxi = p->vx_info))
++		atomic_dec(&vxi->cvirt.nr_uninterruptible);
++}
++
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_device.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_device.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,45 @@
++#ifndef _VS_DEVICE_H
++#define _VS_DEVICE_H
++
++#include "vserver/base.h"
++#include "vserver/device.h"
++#include "vserver/debug.h"
++
++
++#ifdef CONFIG_VSERVER_DEVICE
++
++int vs_map_device(struct vx_info *, dev_t, dev_t *, umode_t);
++
++#define vs_device_perm(v, d, m, p) \
++	((vs_map_device(current_vx_info(), d, NULL, m) & (p)) == (p))
++
++#else
++
++static inline
++int vs_map_device(struct vx_info *vxi,
++	dev_t device, dev_t *target, umode_t mode)
++{
++	if (target)
++		*target = device;
++	return ~0;
++}
++
++#define vs_device_perm(v, d, m, p) ((p) == (p))
++
++#endif
++
++
++#define vs_map_chrdev(d, t, p) \
++	((vs_map_device(current_vx_info(), d, t, S_IFCHR) & (p)) == (p))
++#define vs_map_blkdev(d, t, p) \
++	((vs_map_device(current_vx_info(), d, t, S_IFBLK) & (p)) == (p))
++
++#define vs_chrdev_perm(d, p) \
++	vs_device_perm(current_vx_info(), d, S_IFCHR, p)
++#define vs_blkdev_perm(d, p) \
++	vs_device_perm(current_vx_info(), d, S_IFBLK, p)
++
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_dlimit.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_dlimit.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,215 @@
++#ifndef _VS_DLIMIT_H
++#define _VS_DLIMIT_H
++
++#include <linux/fs.h>
++
++#include "vserver/dlimit.h"
++#include "vserver/base.h"
++#include "vserver/debug.h"
++
++
++#define get_dl_info(i)	__get_dl_info(i, __FILE__, __LINE__)
++
++static inline struct dl_info *__get_dl_info(struct dl_info *dli,
++	const char *_file, int _line)
++{
++	if (!dli)
++		return NULL;
++	vxlprintk(VXD_CBIT(dlim, 4), "get_dl_info(%p[#%d.%d])",
++		dli, dli ? dli->dl_tag : 0,
++		dli ? atomic_read(&dli->dl_usecnt) : 0,
++		_file, _line);
++	atomic_inc(&dli->dl_usecnt);
++	return dli;
++}
++
++
++#define free_dl_info(i) \
++	call_rcu(&(i)->dl_rcu, rcu_free_dl_info)
++
++#define put_dl_info(i)	__put_dl_info(i, __FILE__, __LINE__)
++
++static inline void __put_dl_info(struct dl_info *dli,
++	const char *_file, int _line)
++{
++	if (!dli)
++		return;
++	vxlprintk(VXD_CBIT(dlim, 4), "put_dl_info(%p[#%d.%d])",
++		dli, dli ? dli->dl_tag : 0,
++		dli ? atomic_read(&dli->dl_usecnt) : 0,
++		_file, _line);
++	if (atomic_dec_and_test(&dli->dl_usecnt))
++		free_dl_info(dli);
++}
++
++
++#define __dlimit_char(d)	((d) ? '*' : ' ')
++
++static inline int __dl_alloc_space(struct super_block *sb,
++	tag_t tag, dlsize_t nr, const char *file, int line)
++{
++	struct dl_info *dli = NULL;
++	int ret = 0;
++
++	if (nr == 0)
++		goto out;
++	dli = locate_dl_info(sb, tag);
++	if (!dli)
++		goto out;
++
++	spin_lock(&dli->dl_lock);
++	ret = (dli->dl_space_used + nr > dli->dl_space_total);
++	if (!ret)
++		dli->dl_space_used += nr;
++	spin_unlock(&dli->dl_lock);
++	put_dl_info(dli);
++out:
++	vxlprintk(VXD_CBIT(dlim, 1),
++		"ALLOC (%p,#%d)%c %lld bytes (%d)",
++		sb, tag, __dlimit_char(dli), (long long)nr,
++		ret, file, line);
++	return ret;
++}
++
++static inline void __dl_free_space(struct super_block *sb,
++	tag_t tag, dlsize_t nr, const char *_file, int _line)
++{
++	struct dl_info *dli = NULL;
++
++	if (nr == 0)
++		goto out;
++	dli = locate_dl_info(sb, tag);
++	if (!dli)
++		goto out;
++
++	spin_lock(&dli->dl_lock);
++	if (dli->dl_space_used > nr)
++		dli->dl_space_used -= nr;
++	else
++		dli->dl_space_used = 0;
++	spin_unlock(&dli->dl_lock);
++	put_dl_info(dli);
++out:
++	vxlprintk(VXD_CBIT(dlim, 1),
++		"FREE  (%p,#%d)%c %lld bytes",
++		sb, tag, __dlimit_char(dli), (long long)nr,
++		_file, _line);
++}
++
++static inline int __dl_alloc_inode(struct super_block *sb,
++	tag_t tag, const char *_file, int _line)
++{
++	struct dl_info *dli;
++	int ret = 0;
++
++	dli = locate_dl_info(sb, tag);
++	if (!dli)
++		goto out;
++
++	spin_lock(&dli->dl_lock);
++	dli->dl_inodes_used++;
++	ret = (dli->dl_inodes_used > dli->dl_inodes_total);
++	spin_unlock(&dli->dl_lock);
++	put_dl_info(dli);
++out:
++	vxlprintk(VXD_CBIT(dlim, 0),
++		"ALLOC (%p,#%d)%c inode (%d)",
++		sb, tag, __dlimit_char(dli), ret, _file, _line);
++	return ret;
++}
++
++static inline void __dl_free_inode(struct super_block *sb,
++	tag_t tag, const char *_file, int _line)
++{
++	struct dl_info *dli;
++
++	dli = locate_dl_info(sb, tag);
++	if (!dli)
++		goto out;
++
++	spin_lock(&dli->dl_lock);
++	if (dli->dl_inodes_used > 1)
++		dli->dl_inodes_used--;
++	else
++		dli->dl_inodes_used = 0;
++	spin_unlock(&dli->dl_lock);
++	put_dl_info(dli);
++out:
++	vxlprintk(VXD_CBIT(dlim, 0),
++		"FREE  (%p,#%d)%c inode",
++		sb, tag, __dlimit_char(dli), _file, _line);
++}
++
++static inline void __dl_adjust_block(struct super_block *sb, tag_t tag,
++	unsigned long long *free_blocks, unsigned long long *root_blocks,
++	const char *_file, int _line)
++{
++	struct dl_info *dli;
++	uint64_t broot, bfree;
++
++	dli = locate_dl_info(sb, tag);
++	if (!dli)
++		return;
++
++	spin_lock(&dli->dl_lock);
++	broot = (dli->dl_space_total -
++		(dli->dl_space_total >> 10) * dli->dl_nrlmult)
++		>> sb->s_blocksize_bits;
++	bfree = (dli->dl_space_total - dli->dl_space_used)
++			>> sb->s_blocksize_bits;
++	spin_unlock(&dli->dl_lock);
++
++	vxlprintk(VXD_CBIT(dlim, 2),
++		"ADJUST: %lld,%lld on %lld,%lld [mult=%d]",
++		(long long)bfree, (long long)broot,
++		*free_blocks, *root_blocks, dli->dl_nrlmult,
++		_file, _line);
++	if (free_blocks) {
++		if (*free_blocks > bfree)
++			*free_blocks = bfree;
++	}
++	if (root_blocks) {
++		if (*root_blocks > broot)
++			*root_blocks = broot;
++	}
++	put_dl_info(dli);
++}
++
++#define dl_prealloc_space(in, bytes) \
++	__dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
++		__FILE__, __LINE__ )
++
++#define dl_alloc_space(in, bytes) \
++	__dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
++		__FILE__, __LINE__ )
++
++#define dl_reserve_space(in, bytes) \
++	__dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
++		__FILE__, __LINE__ )
++
++#define dl_claim_space(in, bytes) (0)
++
++#define dl_release_space(in, bytes) \
++	__dl_free_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
++		__FILE__, __LINE__ )
++
++#define dl_free_space(in, bytes) \
++	__dl_free_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
++		__FILE__, __LINE__ )
++
++
++
++#define dl_alloc_inode(in) \
++	__dl_alloc_inode((in)->i_sb, (in)->i_tag, __FILE__, __LINE__ )
++
++#define dl_free_inode(in) \
++	__dl_free_inode((in)->i_sb, (in)->i_tag, __FILE__, __LINE__ )
++
++
++#define dl_adjust_block(sb, tag, fb, rb) \
++	__dl_adjust_block(sb, tag, fb, rb, __FILE__, __LINE__ )
++
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_inet.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_inet.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,342 @@
++#ifndef _VS_INET_H
++#define _VS_INET_H
++
++#include "vserver/base.h"
++#include "vserver/network.h"
++#include "vserver/debug.h"
++
++#define IPI_LOOPBACK	htonl(INADDR_LOOPBACK)
++
++#define NXAV4(a)	NIPQUAD((a)->ip[0]), NIPQUAD((a)->ip[1]), \
++			NIPQUAD((a)->mask), (a)->type
++#define NXAV4_FMT	"[" NIPQUAD_FMT "-" NIPQUAD_FMT "/" NIPQUAD_FMT ":%04x]"
++
++
++static inline
++int v4_addr_match(struct nx_addr_v4 *nxa, __be32 addr, uint16_t tmask)
++{
++	__be32 ip = nxa->ip[0].s_addr;
++	__be32 mask = nxa->mask.s_addr;
++	__be32 bcast = ip | ~mask;
++	int ret = 0;
++
++	switch (nxa->type & tmask) {
++	case NXA_TYPE_MASK:
++		ret = (ip == (addr & mask));
++		break;
++	case NXA_TYPE_ADDR:
++		ret = 3;
++		if (addr == ip)
++			break;
++		/* fall through to broadcast */
++	case NXA_MOD_BCAST:
++		ret = ((tmask & NXA_MOD_BCAST) && (addr == bcast));
++		break;
++	case NXA_TYPE_RANGE:
++		ret = ((nxa->ip[0].s_addr <= addr) &&
++			(nxa->ip[1].s_addr > addr));
++		break;
++	case NXA_TYPE_ANY:
++		ret = 2;
++		break;
++	}
++
++	vxdprintk(VXD_CBIT(net, 0),
++		"v4_addr_match(%p" NXAV4_FMT "," NIPQUAD_FMT ",%04x) = %d",
++		nxa, NXAV4(nxa), NIPQUAD(addr), tmask, ret);
++	return ret;
++}
++
++static inline
++int v4_addr_in_nx_info(struct nx_info *nxi, __be32 addr, uint16_t tmask)
++{
++	struct nx_addr_v4 *nxa;
++	int ret = 1;
++
++	if (!nxi)
++		goto out;
++
++	ret = 2;
++	/* allow 127.0.0.1 when remapping lback */
++	if ((tmask & NXA_LOOPBACK) &&
++		(addr == IPI_LOOPBACK) &&
++		nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
++		goto out;
++	ret = 3;
++	/* check for lback address */
++	if ((tmask & NXA_MOD_LBACK) &&
++		(nxi->v4_lback.s_addr == addr))
++		goto out;
++	ret = 4;
++	/* check for broadcast address */
++	if ((tmask & NXA_MOD_BCAST) &&
++		(nxi->v4_bcast.s_addr == addr))
++		goto out;
++	ret = 5;
++	/* check for v4 addresses */
++	for (nxa = &nxi->v4; nxa; nxa = nxa->next)
++		if (v4_addr_match(nxa, addr, tmask))
++			goto out;
++	ret = 0;
++out:
++	vxdprintk(VXD_CBIT(net, 0),
++		"v4_addr_in_nx_info(%p[#%u]," NIPQUAD_FMT ",%04x) = %d",
++		nxi, nxi ? nxi->nx_id : 0, NIPQUAD(addr), tmask, ret);
++	return ret;
++}
++
++static inline
++int v4_nx_addr_match(struct nx_addr_v4 *nxa, struct nx_addr_v4 *addr, uint16_t mask)
++{
++	/* FIXME: needs full range checks */
++	return v4_addr_match(nxa, addr->ip[0].s_addr, mask);
++}
++
++static inline
++int v4_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v4 *nxa, uint16_t mask)
++{
++	struct nx_addr_v4 *ptr;
++
++	for (ptr = &nxi->v4; ptr; ptr = ptr->next)
++		if (v4_nx_addr_match(ptr, nxa, mask))
++			return 1;
++	return 0;
++}
++
++#include <net/inet_sock.h>
++
++/*
++ *	Check if a given address matches for a socket
++ *
++ *	nxi:		the socket's nx_info if any
++ *	addr:		to be verified address
++ */
++static inline
++int v4_sock_addr_match (
++	struct nx_info *nxi,
++	struct inet_sock *inet,
++	__be32 addr)
++{
++	__be32 saddr = inet->rcv_saddr;
++	__be32 bcast = nxi ? nxi->v4_bcast.s_addr : INADDR_BROADCAST;
++
++	if (addr && (saddr == addr || bcast == addr))
++		return 1;
++	if (!saddr)
++		return v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND);
++	return 0;
++}
++
++
++/* inet related checks and helpers */
++
++
++struct in_ifaddr;
++struct net_device;
++struct sock;
++
++#ifdef CONFIG_INET
++
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <net/inet_sock.h>
++#include <net/inet_timewait_sock.h>
++
++
++int dev_in_nx_info(struct net_device *, struct nx_info *);
++int v4_dev_in_nx_info(struct net_device *, struct nx_info *);
++int nx_v4_addr_conflict(struct nx_info *, struct nx_info *);
++
++
++/*
++ *	check if address is covered by socket
++ *
++ *	sk:	the socket to check against
++ *	addr:	the address in question (must be != 0)
++ */
++
++static inline
++int __v4_addr_match_socket(const struct sock *sk, struct nx_addr_v4 *nxa)
++{
++	struct nx_info *nxi = sk->sk_nx_info;
++	__be32 saddr = inet_rcv_saddr(sk);
++
++	vxdprintk(VXD_CBIT(net, 5),
++		"__v4_addr_in_socket(%p," NXAV4_FMT ") %p:" NIPQUAD_FMT " %p;%lx",
++		sk, NXAV4(nxa), nxi, NIPQUAD(saddr), sk->sk_socket,
++		(sk->sk_socket?sk->sk_socket->flags:0));
++
++	if (saddr) {		/* direct address match */
++		return v4_addr_match(nxa, saddr, -1);
++	} else if (nxi) {	/* match against nx_info */
++		return v4_nx_addr_in_nx_info(nxi, nxa, -1);
++	} else {		/* unrestricted any socket */
++		return 1;
++	}
++}
++
++
++
++static inline
++int nx_dev_visible(struct nx_info *nxi, struct net_device *dev)
++{
++	vxdprintk(VXD_CBIT(net, 1), "nx_dev_visible(%p[#%u],%p »%s«) %d",
++		nxi, nxi ? nxi->nx_id : 0, dev, dev->name,
++		nxi ? dev_in_nx_info(dev, nxi) : 0);
++
++	if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
++		return 1;
++	if (dev_in_nx_info(dev, nxi))
++		return 1;
++	return 0;
++}
++
++
++static inline
++int v4_ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi)
++{
++	if (!nxi)
++		return 1;
++	if (!ifa)
++		return 0;
++	return v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW);
++}
++
++static inline
++int nx_v4_ifa_visible(struct nx_info *nxi, struct in_ifaddr *ifa)
++{
++	vxdprintk(VXD_CBIT(net, 1), "nx_v4_ifa_visible(%p[#%u],%p) %d",
++		nxi, nxi ? nxi->nx_id : 0, ifa,
++		nxi ? v4_ifa_in_nx_info(ifa, nxi) : 0);
++
++	if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
++		return 1;
++	if (v4_ifa_in_nx_info(ifa, nxi))
++		return 1;
++	return 0;
++}
++
++
++struct nx_v4_sock_addr {
++	__be32 saddr;	/* Address used for validation */
++	__be32 baddr;	/* Address used for socket bind */
++};
++
++static inline
++int v4_map_sock_addr(struct inet_sock *inet, struct sockaddr_in *addr,
++	struct nx_v4_sock_addr *nsa)
++{
++	struct sock *sk = &inet->sk;
++	struct nx_info *nxi = sk->sk_nx_info;
++	__be32 saddr = addr->sin_addr.s_addr;
++	__be32 baddr = saddr;
++
++	vxdprintk(VXD_CBIT(net, 3),
++		"inet_bind(%p)* %p,%p;%lx " NIPQUAD_FMT,
++		sk, sk->sk_nx_info, sk->sk_socket,
++		(sk->sk_socket ? sk->sk_socket->flags : 0),
++		NIPQUAD(saddr));
++
++	if (nxi) {
++		if (saddr == INADDR_ANY) {
++			if (nx_info_flags(nxi, NXF_SINGLE_IP, 0))
++				baddr = nxi->v4.ip[0].s_addr;
++		} else if (saddr == IPI_LOOPBACK) {
++			if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
++				baddr = nxi->v4_lback.s_addr;
++		} else {	/* normal address bind */
++			if (!v4_addr_in_nx_info(nxi, saddr, NXA_MASK_BIND))
++				return -EADDRNOTAVAIL;
++		}
++	}
++
++	vxdprintk(VXD_CBIT(net, 3),
++		"inet_bind(%p) " NIPQUAD_FMT ", " NIPQUAD_FMT,
++		sk, NIPQUAD(saddr), NIPQUAD(baddr));
++
++	nsa->saddr = saddr;
++	nsa->baddr = baddr;
++	return 0;
++}
++
++static inline
++void v4_set_sock_addr(struct inet_sock *inet, struct nx_v4_sock_addr *nsa)
++{
++	inet->saddr = nsa->baddr;
++	inet->rcv_saddr = nsa->baddr;
++}
++
++
++/*
++ *      helper to simplify inet_lookup_listener
++ *
++ *      nxi:	the socket's nx_info if any
++ *      addr:	to be verified address
++ *      saddr:	socket address
++ */
++static inline int v4_inet_addr_match (
++	struct nx_info *nxi,
++	__be32 addr,
++	__be32 saddr)
++{
++	if (addr && (saddr == addr))
++		return 1;
++	if (!saddr)
++		return nxi ? v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND) : 1;
++	return 0;
++}
++
++static inline __be32 nx_map_sock_lback(struct nx_info *nxi, __be32 addr)
++{
++	if (nx_info_flags(nxi, NXF_HIDE_LBACK, 0) &&
++		(addr == nxi->v4_lback.s_addr))
++		return IPI_LOOPBACK;
++	return addr;
++}
++
++static inline
++int nx_info_has_v4(struct nx_info *nxi)
++{
++	if (!nxi)
++		return 1;
++	if (NX_IPV4(nxi))
++		return 1;
++	if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
++		return 1;
++	return 0;
++}
++
++#else /* CONFIG_INET */
++
++static inline
++int nx_dev_visible(struct nx_info *n, struct net_device *d)
++{
++	return 1;
++}
++
++static inline
++int nx_v4_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s)
++{
++	return 1;
++}
++
++static inline
++int v4_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n)
++{
++	return 1;
++}
++
++static inline
++int nx_info_has_v4(struct nx_info *nxi)
++{
++	return 0;
++}
++
++#endif /* CONFIG_INET */
++
++#define current_nx_info_has_v4() \
++	nx_info_has_v4(current_nx_info())
++
++#else
++// #warning duplicate inclusion
++#endif
+--- a/include/linux/vs_inet6.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_inet6.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,246 @@
++#ifndef _VS_INET6_H
++#define _VS_INET6_H
++
++#include "vserver/base.h"
++#include "vserver/network.h"
++#include "vserver/debug.h"
++
++#include <net/ipv6.h>
++
++#define NXAV6(a)	&(a)->ip, &(a)->mask, (a)->prefix, (a)->type
++#define NXAV6_FMT	"[%pI6/%pI6/%d:%04x]"
++
++
++#ifdef	CONFIG_IPV6
++
++static inline
++int v6_addr_match(struct nx_addr_v6 *nxa,
++	const struct in6_addr *addr, uint16_t mask)
++{
++	int ret = 0;
++
++	switch (nxa->type & mask) {
++	case NXA_TYPE_MASK:
++		ret = ipv6_masked_addr_cmp(&nxa->ip, &nxa->mask, addr);
++		break;
++	case NXA_TYPE_ADDR:
++		ret = ipv6_addr_equal(&nxa->ip, addr);
++		break;
++	case NXA_TYPE_ANY:
++		ret = 1;
++		break;
++	}
++	vxdprintk(VXD_CBIT(net, 0),
++		"v6_addr_match(%p" NXAV6_FMT ",%pI6,%04x) = %d",
++		nxa, NXAV6(nxa), addr, mask, ret);
++	return ret;
++}
++
++static inline
++int v6_addr_in_nx_info(struct nx_info *nxi,
++	const struct in6_addr *addr, uint16_t mask)
++{
++	struct nx_addr_v6 *nxa;
++	int ret = 1;
++
++	if (!nxi)
++		goto out;
++	for (nxa = &nxi->v6; nxa; nxa = nxa->next)
++		if (v6_addr_match(nxa, addr, mask))
++			goto out;
++	ret = 0;
++out:
++	vxdprintk(VXD_CBIT(net, 0),
++		"v6_addr_in_nx_info(%p[#%u],%pI6,%04x) = %d",
++		nxi, nxi ? nxi->nx_id : 0, addr, mask, ret);
++	return ret;
++}
++
++static inline
++int v6_nx_addr_match(struct nx_addr_v6 *nxa, struct nx_addr_v6 *addr, uint16_t mask)
++{
++	/* FIXME: needs full range checks */
++	return v6_addr_match(nxa, &addr->ip, mask);
++}
++
++static inline
++int v6_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v6 *nxa, uint16_t mask)
++{
++	struct nx_addr_v6 *ptr;
++
++	for (ptr = &nxi->v6; ptr; ptr = ptr->next)
++		if (v6_nx_addr_match(ptr, nxa, mask))
++			return 1;
++	return 0;
++}
++
++
++/*
++ *	Check if a given address matches for a socket
++ *
++ *	nxi:		the socket's nx_info if any
++ *	addr:		to be verified address
++ */
++static inline
++int v6_sock_addr_match (
++	struct nx_info *nxi,
++	struct inet_sock *inet,
++	struct in6_addr *addr)
++{
++	struct sock *sk = &inet->sk;
++	struct in6_addr *saddr = inet6_rcv_saddr(sk);
++
++	if (!ipv6_addr_any(addr) &&
++		ipv6_addr_equal(saddr, addr))
++		return 1;
++	if (ipv6_addr_any(saddr))
++		return v6_addr_in_nx_info(nxi, addr, -1);
++	return 0;
++}
++
++/*
++ *	check if address is covered by socket
++ *
++ *	sk:	the socket to check against
++ *	addr:	the address in question (must be != 0)
++ */
++
++static inline
++int __v6_addr_match_socket(const struct sock *sk, struct nx_addr_v6 *nxa)
++{
++	struct nx_info *nxi = sk->sk_nx_info;
++	struct in6_addr *saddr = inet6_rcv_saddr(sk);
++
++	vxdprintk(VXD_CBIT(net, 5),
++		"__v6_addr_in_socket(%p," NXAV6_FMT ") %p:%pI6 %p;%lx",
++		sk, NXAV6(nxa), nxi, saddr, sk->sk_socket,
++		(sk->sk_socket?sk->sk_socket->flags:0));
++
++	if (!ipv6_addr_any(saddr)) {	/* direct address match */
++		return v6_addr_match(nxa, saddr, -1);
++	} else if (nxi) {		/* match against nx_info */
++		return v6_nx_addr_in_nx_info(nxi, nxa, -1);
++	} else {			/* unrestricted any socket */
++		return 1;
++	}
++}
++
++
++/* inet related checks and helpers */
++
++
++struct in_ifaddr;
++struct net_device;
++struct sock;
++
++
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <net/inet_timewait_sock.h>
++
++
++int dev_in_nx_info(struct net_device *, struct nx_info *);
++int v6_dev_in_nx_info(struct net_device *, struct nx_info *);
++int nx_v6_addr_conflict(struct nx_info *, struct nx_info *);
++
++
++
++static inline
++int v6_ifa_in_nx_info(struct inet6_ifaddr *ifa, struct nx_info *nxi)
++{
++	if (!nxi)
++		return 1;
++	if (!ifa)
++		return 0;
++	return v6_addr_in_nx_info(nxi, &ifa->addr, -1);
++}
++
++static inline
++int nx_v6_ifa_visible(struct nx_info *nxi, struct inet6_ifaddr *ifa)
++{
++	vxdprintk(VXD_CBIT(net, 1), "nx_v6_ifa_visible(%p[#%u],%p) %d",
++		nxi, nxi ? nxi->nx_id : 0, ifa,
++		nxi ? v6_ifa_in_nx_info(ifa, nxi) : 0);
++
++	if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
++		return 1;
++	if (v6_ifa_in_nx_info(ifa, nxi))
++		return 1;
++	return 0;
++}
++
++
++struct nx_v6_sock_addr {
++	struct in6_addr saddr;	/* Address used for validation */
++	struct in6_addr baddr;	/* Address used for socket bind */
++};
++
++static inline
++int v6_map_sock_addr(struct inet_sock *inet, struct sockaddr_in6 *addr,
++	struct nx_v6_sock_addr *nsa)
++{
++	// struct sock *sk = &inet->sk;
++	// struct nx_info *nxi = sk->sk_nx_info;
++	struct in6_addr saddr = addr->sin6_addr;
++	struct in6_addr baddr = saddr;
++
++	nsa->saddr = saddr;
++	nsa->baddr = baddr;
++	return 0;
++}
++
++static inline
++void v6_set_sock_addr(struct inet_sock *inet, struct nx_v6_sock_addr *nsa)
++{
++	// struct sock *sk = &inet->sk;
++	// struct in6_addr *saddr = inet6_rcv_saddr(sk);
++
++	// *saddr = nsa->baddr;
++	// inet->saddr = nsa->baddr;
++}
++
++static inline
++int nx_info_has_v6(struct nx_info *nxi)
++{
++	if (!nxi)
++		return 1;
++	if (NX_IPV6(nxi))
++		return 1;
++	return 0;
++}
++
++#else /* CONFIG_IPV6 */
++
++static inline
++int nx_v6_dev_visible(struct nx_info *n, struct net_device *d)
++{
++	return 1;
++}
++
++
++static inline
++int nx_v6_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s)
++{
++	return 1;
++}
++
++static inline
++int v6_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n)
++{
++	return 1;
++}
++
++static inline
++int nx_info_has_v6(struct nx_info *nxi)
++{
++	return 0;
++}
++
++#endif /* CONFIG_IPV6 */
++
++#define current_nx_info_has_v6() \
++	nx_info_has_v6(current_nx_info())
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_limit.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_limit.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,140 @@
++#ifndef _VS_LIMIT_H
++#define _VS_LIMIT_H
++
++#include "vserver/limit.h"
++#include "vserver/base.h"
++#include "vserver/context.h"
++#include "vserver/debug.h"
++#include "vserver/context.h"
++#include "vserver/limit_int.h"
++
++
++#define vx_acc_cres(v, d, p, r) \
++	__vx_acc_cres(v, r, d, p, __FILE__, __LINE__)
++
++#define vx_acc_cres_cond(x, d, p, r) \
++	__vx_acc_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \
++	r, d, p, __FILE__, __LINE__)
++
++
++#define vx_add_cres(v, a, p, r) \
++	__vx_add_cres(v, r, a, p, __FILE__, __LINE__)
++#define vx_sub_cres(v, a, p, r)		vx_add_cres(v, -(a), p, r)
++
++#define vx_add_cres_cond(x, a, p, r) \
++	__vx_add_cres(((x) == vx_current_xid()) ? current_vx_info() : 0, \
++	r, a, p, __FILE__, __LINE__)
++#define vx_sub_cres_cond(x, a, p, r)	vx_add_cres_cond(x, -(a), p, r)
++
++
++/* process and file limits */
++
++#define vx_nproc_inc(p) \
++	vx_acc_cres((p)->vx_info, 1, p, RLIMIT_NPROC)
++
++#define vx_nproc_dec(p) \
++	vx_acc_cres((p)->vx_info,-1, p, RLIMIT_NPROC)
++
++#define vx_files_inc(f) \
++	vx_acc_cres_cond((f)->f_xid, 1, f, RLIMIT_NOFILE)
++
++#define vx_files_dec(f) \
++	vx_acc_cres_cond((f)->f_xid,-1, f, RLIMIT_NOFILE)
++
++#define vx_locks_inc(l) \
++	vx_acc_cres_cond((l)->fl_xid, 1, l, RLIMIT_LOCKS)
++
++#define vx_locks_dec(l) \
++	vx_acc_cres_cond((l)->fl_xid,-1, l, RLIMIT_LOCKS)
++
++#define vx_openfd_inc(f) \
++	vx_acc_cres(current_vx_info(), 1, (void *)(long)(f), VLIMIT_OPENFD)
++
++#define vx_openfd_dec(f) \
++	vx_acc_cres(current_vx_info(),-1, (void *)(long)(f), VLIMIT_OPENFD)
++
++
++#define vx_cres_avail(v, n, r) \
++	__vx_cres_avail(v, r, n, __FILE__, __LINE__)
++
++
++#define vx_nproc_avail(n) \
++	vx_cres_avail(current_vx_info(), n, RLIMIT_NPROC)
++
++#define vx_files_avail(n) \
++	vx_cres_avail(current_vx_info(), n, RLIMIT_NOFILE)
++
++#define vx_locks_avail(n) \
++	vx_cres_avail(current_vx_info(), n, RLIMIT_LOCKS)
++
++#define vx_openfd_avail(n) \
++	vx_cres_avail(current_vx_info(), n, VLIMIT_OPENFD)
++
++
++/* dentry limits */
++
++#define vx_dentry_inc(d) do {						\
++	if (atomic_read(&d->d_count) == 1)				\
++		vx_acc_cres(current_vx_info(), 1, d, VLIMIT_DENTRY);	\
++	} while (0)
++
++#define vx_dentry_dec(d) do {						\
++	if (atomic_read(&d->d_count) == 0)				\
++		vx_acc_cres(current_vx_info(),-1, d, VLIMIT_DENTRY);	\
++	} while (0)
++
++#define vx_dentry_avail(n) \
++	vx_cres_avail(current_vx_info(), n, VLIMIT_DENTRY)
++
++
++/* socket limits */
++
++#define vx_sock_inc(s) \
++	vx_acc_cres((s)->sk_vx_info, 1, s, VLIMIT_NSOCK)
++
++#define vx_sock_dec(s) \
++	vx_acc_cres((s)->sk_vx_info,-1, s, VLIMIT_NSOCK)
++
++#define vx_sock_avail(n) \
++	vx_cres_avail(current_vx_info(), n, VLIMIT_NSOCK)
++
++
++/* ipc resource limits */
++
++#define vx_ipcmsg_add(v, u, a) \
++	vx_add_cres(v, a, u, RLIMIT_MSGQUEUE)
++
++#define vx_ipcmsg_sub(v, u, a) \
++	vx_sub_cres(v, a, u, RLIMIT_MSGQUEUE)
++
++#define vx_ipcmsg_avail(v, a) \
++	vx_cres_avail(v, a, RLIMIT_MSGQUEUE)
++
++
++#define vx_ipcshm_add(v, k, a) \
++	vx_add_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM)
++
++#define vx_ipcshm_sub(v, k, a) \
++	vx_sub_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM)
++
++#define vx_ipcshm_avail(v, a) \
++	vx_cres_avail(v, a, VLIMIT_SHMEM)
++
++
++#define vx_semary_inc(a) \
++	vx_acc_cres(current_vx_info(), 1, a, VLIMIT_SEMARY)
++
++#define vx_semary_dec(a) \
++	vx_acc_cres(current_vx_info(), -1, a, VLIMIT_SEMARY)
++
++
++#define vx_nsems_add(a,n) \
++	vx_add_cres(current_vx_info(), n, a, VLIMIT_NSEMS)
++
++#define vx_nsems_sub(a,n) \
++	vx_sub_cres(current_vx_info(), n, a, VLIMIT_NSEMS)
++
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_memory.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_memory.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,159 @@
++#ifndef _VS_MEMORY_H
++#define _VS_MEMORY_H
++
++#include "vserver/limit.h"
++#include "vserver/base.h"
++#include "vserver/context.h"
++#include "vserver/debug.h"
++#include "vserver/context.h"
++#include "vserver/limit_int.h"
++
++
++#define __acc_add_long(a, v)	(*(v) += (a))
++#define __acc_inc_long(v)	(++*(v))
++#define __acc_dec_long(v)	(--*(v))
++
++#if	NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++#define __acc_add_atomic(a, v)	atomic_long_add(a, v)
++#define __acc_inc_atomic(v)	atomic_long_inc(v)
++#define __acc_dec_atomic(v)	atomic_long_dec(v)
++#else  /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
++#define __acc_add_atomic(a, v)	__acc_add_long(a, v)
++#define __acc_inc_atomic(v)	__acc_inc_long(v)
++#define __acc_dec_atomic(v)	__acc_dec_long(v)
++#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
++
++
++#define vx_acc_page(m, d, v, r) do {					\
++	if ((d) > 0)							\
++		__acc_inc_long(&(m)->v);				\
++	else								\
++		__acc_dec_long(&(m)->v);				\
++	__vx_acc_cres(m->mm_vx_info, r, d, m, __FILE__, __LINE__);	\
++} while (0)
++
++#define vx_acc_page_atomic(m, d, v, r) do {				\
++	if ((d) > 0)							\
++		__acc_inc_atomic(&(m)->v);				\
++	else								\
++		__acc_dec_atomic(&(m)->v);				\
++	__vx_acc_cres(m->mm_vx_info, r, d, m, __FILE__, __LINE__);	\
++} while (0)
++
++
++#define vx_acc_pages(m, p, v, r) do {					\
++	unsigned long __p = (p);					\
++	__acc_add_long(__p, &(m)->v);					\
++	__vx_add_cres(m->mm_vx_info, r, __p, m, __FILE__, __LINE__);	\
++} while (0)
++
++#define vx_acc_pages_atomic(m, p, v, r) do {				\
++	unsigned long __p = (p);					\
++	__acc_add_atomic(__p, &(m)->v);					\
++	__vx_add_cres(m->mm_vx_info, r, __p, m, __FILE__, __LINE__);	\
++} while (0)
++
++
++
++#define vx_acc_vmpage(m, d) \
++	vx_acc_page(m, d, total_vm,  RLIMIT_AS)
++#define vx_acc_vmlpage(m, d) \
++	vx_acc_page(m, d, locked_vm, RLIMIT_MEMLOCK)
++#define vx_acc_file_rsspage(m, d) \
++	vx_acc_page_atomic(m, d, _file_rss, VLIMIT_MAPPED)
++#define vx_acc_anon_rsspage(m, d) \
++	vx_acc_page_atomic(m, d, _anon_rss, VLIMIT_ANON)
++
++#define vx_acc_vmpages(m, p) \
++	vx_acc_pages(m, p, total_vm,  RLIMIT_AS)
++#define vx_acc_vmlpages(m, p) \
++	vx_acc_pages(m, p, locked_vm, RLIMIT_MEMLOCK)
++#define vx_acc_file_rsspages(m, p) \
++	vx_acc_pages_atomic(m, p, _file_rss, VLIMIT_MAPPED)
++#define vx_acc_anon_rsspages(m, p) \
++	vx_acc_pages_atomic(m, p, _anon_rss, VLIMIT_ANON)
++
++#define vx_pages_add(s, r, p)	__vx_add_cres(s, r, p, 0, __FILE__, __LINE__)
++#define vx_pages_sub(s, r, p)	vx_pages_add(s, r, -(p))
++
++#define vx_vmpages_inc(m)		vx_acc_vmpage(m, 1)
++#define vx_vmpages_dec(m)		vx_acc_vmpage(m, -1)
++#define vx_vmpages_add(m, p)		vx_acc_vmpages(m, p)
++#define vx_vmpages_sub(m, p)		vx_acc_vmpages(m, -(p))
++
++#define vx_vmlocked_inc(m)		vx_acc_vmlpage(m, 1)
++#define vx_vmlocked_dec(m)		vx_acc_vmlpage(m, -1)
++#define vx_vmlocked_add(m, p)		vx_acc_vmlpages(m, p)
++#define vx_vmlocked_sub(m, p)		vx_acc_vmlpages(m, -(p))
++
++#define vx_file_rsspages_inc(m)		vx_acc_file_rsspage(m, 1)
++#define vx_file_rsspages_dec(m)		vx_acc_file_rsspage(m, -1)
++#define vx_file_rsspages_add(m, p)	vx_acc_file_rsspages(m, p)
++#define vx_file_rsspages_sub(m, p)	vx_acc_file_rsspages(m, -(p))
++
++#define vx_anon_rsspages_inc(m)		vx_acc_anon_rsspage(m, 1)
++#define vx_anon_rsspages_dec(m)		vx_acc_anon_rsspage(m, -1)
++#define vx_anon_rsspages_add(m, p)	vx_acc_anon_rsspages(m, p)
++#define vx_anon_rsspages_sub(m, p)	vx_acc_anon_rsspages(m, -(p))
++
++
++#define vx_pages_avail(m, p, r) \
++	__vx_cres_avail((m)->mm_vx_info, r, p, __FILE__, __LINE__)
++
++#define vx_vmpages_avail(m, p)	vx_pages_avail(m, p, RLIMIT_AS)
++#define vx_vmlocked_avail(m, p)	vx_pages_avail(m, p, RLIMIT_MEMLOCK)
++#define vx_anon_avail(m, p)	vx_pages_avail(m, p, VLIMIT_ANON)
++#define vx_mapped_avail(m, p)	vx_pages_avail(m, p, VLIMIT_MAPPED)
++
++#define vx_rss_avail(m, p) \
++	__vx_cres_array_avail((m)->mm_vx_info, VLA_RSS, p, __FILE__, __LINE__)
++
++
++enum {
++	VXPT_UNKNOWN = 0,
++	VXPT_ANON,
++	VXPT_NONE,
++	VXPT_FILE,
++	VXPT_SWAP,
++	VXPT_WRITE
++};
++
++#if 0
++#define	vx_page_fault(mm, vma, type, ret)
++#else
++
++static inline
++void __vx_page_fault(struct mm_struct *mm,
++	struct vm_area_struct *vma, int type, int ret)
++{
++	struct vx_info *vxi = mm->mm_vx_info;
++	int what;
++/*
++	static char *page_type[6] =
++		{ "UNKNOWN", "ANON", "NONE", "FILE", "SWAP", "WRITE" };
++	static char *page_what[4] =
++		{ "FAULT_OOM", "FAULT_SIGBUS", "FAULT_MINOR", "FAULT_MAJOR" };
++*/
++
++	if (!vxi)
++		return;
++
++	what = (ret & 0x3);
++
++/*	printk("[%d] page[%d][%d] %2x %s %s\n", vxi->vx_id,
++		type, what, ret, page_type[type], page_what[what]);
++*/
++	if (ret & VM_FAULT_WRITE)
++		what |= 0x4;
++	atomic_inc(&vxi->cacct.page[type][what]);
++}
++
++#define	vx_page_fault(mm, vma, type, ret)	__vx_page_fault(mm, vma, type, ret)
++#endif
++
++
++extern unsigned long vx_badness(struct task_struct *task, struct mm_struct *mm);
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_network.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_network.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,169 @@
++#ifndef _NX_VS_NETWORK_H
++#define _NX_VS_NETWORK_H
++
++#include "vserver/context.h"
++#include "vserver/network.h"
++#include "vserver/base.h"
++#include "vserver/check.h"
++#include "vserver/debug.h"
++
++#include <linux/sched.h>
++
++
++#define get_nx_info(i) __get_nx_info(i, __FILE__, __LINE__)
++
++static inline struct nx_info *__get_nx_info(struct nx_info *nxi,
++	const char *_file, int _line)
++{
++	if (!nxi)
++		return NULL;
++
++	vxlprintk(VXD_CBIT(nid, 2), "get_nx_info(%p[#%d.%d])",
++		nxi, nxi ? nxi->nx_id : 0,
++		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
++		_file, _line);
++
++	atomic_inc(&nxi->nx_usecnt);
++	return nxi;
++}
++
++
++extern void free_nx_info(struct nx_info *);
++
++#define put_nx_info(i) __put_nx_info(i, __FILE__, __LINE__)
++
++static inline void __put_nx_info(struct nx_info *nxi, const char *_file, int _line)
++{
++	if (!nxi)
++		return;
++
++	vxlprintk(VXD_CBIT(nid, 2), "put_nx_info(%p[#%d.%d])",
++		nxi, nxi ? nxi->nx_id : 0,
++		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
++		_file, _line);
++
++	if (atomic_dec_and_test(&nxi->nx_usecnt))
++		free_nx_info(nxi);
++}
++
++
++#define init_nx_info(p, i) __init_nx_info(p, i, __FILE__, __LINE__)
++
++static inline void __init_nx_info(struct nx_info **nxp, struct nx_info *nxi,
++		const char *_file, int _line)
++{
++	if (nxi) {
++		vxlprintk(VXD_CBIT(nid, 3),
++			"init_nx_info(%p[#%d.%d])",
++			nxi, nxi ? nxi->nx_id : 0,
++			nxi ? atomic_read(&nxi->nx_usecnt) : 0,
++			_file, _line);
++
++		atomic_inc(&nxi->nx_usecnt);
++	}
++	*nxp = nxi;
++}
++
++
++#define set_nx_info(p, i) __set_nx_info(p, i, __FILE__, __LINE__)
++
++static inline void __set_nx_info(struct nx_info **nxp, struct nx_info *nxi,
++	const char *_file, int _line)
++{
++	struct nx_info *nxo;
++
++	if (!nxi)
++		return;
++
++	vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d])",
++		nxi, nxi ? nxi->nx_id : 0,
++		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
++		_file, _line);
++
++	atomic_inc(&nxi->nx_usecnt);
++	nxo = xchg(nxp, nxi);
++	BUG_ON(nxo);
++}
++
++#define clr_nx_info(p) __clr_nx_info(p, __FILE__, __LINE__)
++
++static inline void __clr_nx_info(struct nx_info **nxp,
++	const char *_file, int _line)
++{
++	struct nx_info *nxo;
++
++	nxo = xchg(nxp, NULL);
++	if (!nxo)
++		return;
++
++	vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d])",
++		nxo, nxo ? nxo->nx_id : 0,
++		nxo ? atomic_read(&nxo->nx_usecnt) : 0,
++		_file, _line);
++
++	if (atomic_dec_and_test(&nxo->nx_usecnt))
++		free_nx_info(nxo);
++}
++
++
++#define claim_nx_info(v, p) __claim_nx_info(v, p, __FILE__, __LINE__)
++
++static inline void __claim_nx_info(struct nx_info *nxi,
++	struct task_struct *task, const char *_file, int _line)
++{
++	vxlprintk(VXD_CBIT(nid, 3), "claim_nx_info(%p[#%d.%d.%d]) %p",
++		nxi, nxi ? nxi->nx_id : 0,
++		nxi?atomic_read(&nxi->nx_usecnt):0,
++		nxi?atomic_read(&nxi->nx_tasks):0,
++		task, _file, _line);
++
++	atomic_inc(&nxi->nx_tasks);
++}
++
++
++extern void unhash_nx_info(struct nx_info *);
++
++#define release_nx_info(v, p) __release_nx_info(v, p, __FILE__, __LINE__)
++
++static inline void __release_nx_info(struct nx_info *nxi,
++	struct task_struct *task, const char *_file, int _line)
++{
++	vxlprintk(VXD_CBIT(nid, 3), "release_nx_info(%p[#%d.%d.%d]) %p",
++		nxi, nxi ? nxi->nx_id : 0,
++		nxi ? atomic_read(&nxi->nx_usecnt) : 0,
++		nxi ? atomic_read(&nxi->nx_tasks) : 0,
++		task, _file, _line);
++
++	might_sleep();
++
++	if (atomic_dec_and_test(&nxi->nx_tasks))
++		unhash_nx_info(nxi);
++}
++
++
++#define task_get_nx_info(i)	__task_get_nx_info(i, __FILE__, __LINE__)
++
++static __inline__ struct nx_info *__task_get_nx_info(struct task_struct *p,
++	const char *_file, int _line)
++{
++	struct nx_info *nxi;
++
++	task_lock(p);
++	vxlprintk(VXD_CBIT(nid, 5), "task_get_nx_info(%p)",
++		p, _file, _line);
++	nxi = __get_nx_info(p->nx_info, _file, _line);
++	task_unlock(p);
++	return nxi;
++}
++
++
++static inline void exit_nx_info(struct task_struct *p)
++{
++	if (p->nx_info)
++		release_nx_info(p->nx_info, p);
++}
++
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_pid.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_pid.h	2011-06-10 14:04:27.000000000 +0200
+@@ -0,0 +1,50 @@
++#ifndef _VS_PID_H
++#define _VS_PID_H
++
++#include "vserver/base.h"
++#include "vserver/check.h"
++#include "vserver/context.h"
++#include "vserver/debug.h"
++#include "vserver/pid.h"
++#include <linux/pid_namespace.h>
++
++
++#define VXF_FAKE_INIT	(VXF_INFO_INIT | VXF_STATE_INIT)
++
++static inline
++int vx_proc_task_visible(struct task_struct *task)
++{
++	if ((task->pid == 1) &&
++		!vx_flags(VXF_FAKE_INIT, VXF_FAKE_INIT))
++		/* show a blend through init */
++		goto visible;
++	if (vx_check(vx_task_xid(task), VS_WATCH | VS_IDENT))
++		goto visible;
++	return 0;
++visible:
++	return 1;
++}
++
++#define find_task_by_real_pid(pid) find_task_by_pid_ns(pid, &init_pid_ns)
++
++
++static inline
++struct task_struct *vx_get_proc_task(struct inode *inode, struct pid *pid)
++{
++	struct task_struct *task = get_pid_task(pid, PIDTYPE_PID);
++
++	if (task && !vx_proc_task_visible(task)) {
++		vxdprintk(VXD_CBIT(misc, 6),
++			"dropping task (get) %p[#%u,%u] for %p[#%u,%u]",
++			task, task->xid, task->pid,
++			current, current->xid, current->pid);
++		put_task_struct(task);
++		task = NULL;
++	}
++	return task;
++}
++
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_sched.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_sched.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,110 @@
++#ifndef _VS_SCHED_H
++#define _VS_SCHED_H
++
++#include "vserver/base.h"
++#include "vserver/context.h"
++#include "vserver/sched.h"
++
++
++#define VAVAVOOM_RATIO		 50
++
++#define MAX_PRIO_BIAS		 20
++#define MIN_PRIO_BIAS		-20
++
++
++#ifdef CONFIG_VSERVER_HARDCPU
++
++/*
++ * effective_prio - return the priority that is based on the static
++ * priority but is modified by bonuses/penalties.
++ *
++ * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
++ * into a -4 ... 0 ... +4 bonus/penalty range.
++ *
++ * Additionally, we scale another amount based on the number of
++ * CPU tokens currently held by the context, if the process is
++ * part of a context (and the appropriate SCHED flag is set).
++ * This ranges from -5 ... 0 ... +15, quadratically.
++ *
++ * So, the total bonus is -9 .. 0 .. +19
++ * We use ~50% of the full 0...39 priority range so that:
++ *
++ * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
++ * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
++ *    unless that context is far exceeding its CPU allocation.
++ *
++ * Both properties are important to certain workloads.
++ */
++static inline
++int vx_effective_vavavoom(struct _vx_sched_pc *sched_pc, int max_prio)
++{
++	int vavavoom, max;
++
++	/* lots of tokens = lots of vavavoom
++	 *      no tokens = no vavavoom      */
++	if ((vavavoom = sched_pc->tokens) >= 0) {
++		max = sched_pc->tokens_max;
++		vavavoom = max - vavavoom;
++		max = max * max;
++		vavavoom = max_prio * VAVAVOOM_RATIO / 100
++			* (vavavoom*vavavoom - (max >> 2)) / max;
++		return vavavoom;
++	}
++	return 0;
++}
++
++
++static inline
++int vx_adjust_prio(struct task_struct *p, int prio, int max_user)
++{
++	struct vx_info *vxi = p->vx_info;
++	struct _vx_sched_pc *sched_pc;
++
++	if (!vxi)
++		return prio;
++
++	sched_pc = &vx_cpu(vxi, sched_pc);
++	if (vx_info_flags(vxi, VXF_SCHED_PRIO, 0)) {
++		int vavavoom = vx_effective_vavavoom(sched_pc, max_user);
++
++		sched_pc->vavavoom = vavavoom;
++		prio += vavavoom;
++	}
++	prio += sched_pc->prio_bias;
++	return prio;
++}
++
++#else /* !CONFIG_VSERVER_HARDCPU */
++
++static inline
++int vx_adjust_prio(struct task_struct *p, int prio, int max_user)
++{
++	struct vx_info *vxi = p->vx_info;
++
++	if (vxi)
++		prio += vx_cpu(vxi, sched_pc).prio_bias;
++	return prio;
++}
++
++#endif /* CONFIG_VSERVER_HARDCPU */
++
++
++static inline void vx_account_user(struct vx_info *vxi,
++	cputime_t cputime, int nice)
++{
++	if (!vxi)
++		return;
++	vx_cpu(vxi, sched_pc).user_ticks += cputime;
++}
++
++static inline void vx_account_system(struct vx_info *vxi,
++	cputime_t cputime, int idle)
++{
++	if (!vxi)
++		return;
++	vx_cpu(vxi, sched_pc).sys_ticks += cputime;
++}
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_socket.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_socket.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,67 @@
++#ifndef _VS_SOCKET_H
++#define _VS_SOCKET_H
++
++#include "vserver/debug.h"
++#include "vserver/base.h"
++#include "vserver/cacct.h"
++#include "vserver/context.h"
++#include "vserver/tag.h"
++
++
++/* socket accounting */
++
++#include <linux/socket.h>
++
++static inline int vx_sock_type(int family)
++{
++	switch (family) {
++	case PF_UNSPEC:
++		return VXA_SOCK_UNSPEC;
++	case PF_UNIX:
++		return VXA_SOCK_UNIX;
++	case PF_INET:
++		return VXA_SOCK_INET;
++	case PF_INET6:
++		return VXA_SOCK_INET6;
++	case PF_PACKET:
++		return VXA_SOCK_PACKET;
++	default:
++		return VXA_SOCK_OTHER;
++	}
++}
++
++#define vx_acc_sock(v, f, p, s) \
++	__vx_acc_sock(v, f, p, s, __FILE__, __LINE__)
++
++static inline void __vx_acc_sock(struct vx_info *vxi,
++	int family, int pos, int size, char *file, int line)
++{
++	if (vxi) {
++		int type = vx_sock_type(family);
++
++		atomic_long_inc(&vxi->cacct.sock[type][pos].count);
++		atomic_long_add(size, &vxi->cacct.sock[type][pos].total);
++	}
++}
++
++#define vx_sock_recv(sk, s) \
++	vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 0, s)
++#define vx_sock_send(sk, s) \
++	vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 1, s)
++#define vx_sock_fail(sk, s) \
++	vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 2, s)
++
++
++#define sock_vx_init(s) do {		\
++	(s)->sk_xid = 0;		\
++	(s)->sk_vx_info = NULL;		\
++	} while (0)
++
++#define sock_nx_init(s) do {		\
++	(s)->sk_nid = 0;		\
++	(s)->sk_nx_info = NULL;		\
++	} while (0)
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_tag.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_tag.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,47 @@
++#ifndef _VS_TAG_H
++#define _VS_TAG_H
++
++#include <linux/vserver/tag.h>
++
++/* check conditions */
++
++#define DX_ADMIN	0x0001
++#define DX_WATCH	0x0002
++#define DX_HOSTID	0x0008
++
++#define DX_IDENT	0x0010
++
++#define DX_ARG_MASK	0x0010
++
++
++#define dx_task_tag(t)	((t)->tag)
++
++#define dx_current_tag() dx_task_tag(current)
++
++#define dx_check(c, m)	__dx_check(dx_current_tag(), c, m)
++
++#define dx_weak_check(c, m)	((m) ? dx_check(c, m) : 1)
++
++
++/*
++ * check current context for ADMIN/WATCH and
++ * optionally against supplied argument
++ */
++static inline int __dx_check(tag_t cid, tag_t id, unsigned int mode)
++{
++	if (mode & DX_ARG_MASK) {
++		if ((mode & DX_IDENT) && (id == cid))
++			return 1;
++	}
++	return (((mode & DX_ADMIN) && (cid == 0)) ||
++		((mode & DX_WATCH) && (cid == 1)) ||
++		((mode & DX_HOSTID) && (id == 0)));
++}
++
++struct inode;
++int dx_permission(const struct inode *inode, int mask);
++
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vs_time.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vs_time.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,19 @@
++#ifndef _VS_TIME_H
++#define _VS_TIME_H
++
++
++/* time faking stuff */
++
++#ifdef CONFIG_VSERVER_VTIME
++
++extern void vx_gettimeofday(struct timeval *tv);
++extern int vx_settimeofday(struct timespec *ts);
++
++#else
++#define	vx_gettimeofday(t)	do_gettimeofday(t)
++#define	vx_settimeofday(t)	do_settimeofday(t)
++#endif
++
++#else
++#warning duplicate inclusion
++#endif
+--- a/include/linux/vserver/base.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/base.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,170 @@
++#ifndef _VX_BASE_H
++#define _VX_BASE_H
++
++
++/* context state changes */
++
++enum {
++	VSC_STARTUP = 1,
++	VSC_SHUTDOWN,
++
++	VSC_NETUP,
++	VSC_NETDOWN,
++};
++
++
++
++#define vx_task_xid(t)	((t)->xid)
++
++#define vx_current_xid() vx_task_xid(current)
++
++#define current_vx_info() (current->vx_info)
++
++
++#define nx_task_nid(t)	((t)->nid)
++
++#define nx_current_nid() nx_task_nid(current)
++
++#define current_nx_info() (current->nx_info)
++
++
++/* generic flag merging */
++
++#define vs_check_flags(v, m, f)	(((v) & (m)) ^ (f))
++
++#define vs_mask_flags(v, f, m)	(((v) & ~(m)) | ((f) & (m)))
++
++#define vs_mask_mask(v, f, m)	(((v) & ~(m)) | ((v) & (f) & (m)))
++
++#define vs_check_bit(v, n)	((v) & (1LL << (n)))
++
++
++/* context flags */
++
++#define __vx_flags(v)	((v) ? (v)->vx_flags : 0)
++
++#define vx_current_flags()	__vx_flags(current_vx_info())
++
++#define vx_info_flags(v, m, f) \
++	vs_check_flags(__vx_flags(v), m, f)
++
++#define task_vx_flags(t, m, f) \
++	((t) && vx_info_flags((t)->vx_info, m, f))
++
++#define vx_flags(m, f)	vx_info_flags(current_vx_info(), m, f)
++
++
++/* context caps */
++
++#define __vx_ccaps(v)	((v) ? (v)->vx_ccaps : 0)
++
++#define vx_current_ccaps()	__vx_ccaps(current_vx_info())
++
++#define vx_info_ccaps(v, c)	(__vx_ccaps(v) & (c))
++
++#define vx_ccaps(c)	vx_info_ccaps(current_vx_info(), (c))
++
++
++
++/* network flags */
++
++#define __nx_flags(n)	((n) ? (n)->nx_flags : 0)
++
++#define nx_current_flags()	__nx_flags(current_nx_info())
++
++#define nx_info_flags(n, m, f) \
++	vs_check_flags(__nx_flags(n), m, f)
++
++#define task_nx_flags(t, m, f) \
++	((t) && nx_info_flags((t)->nx_info, m, f))
++
++#define nx_flags(m, f)	nx_info_flags(current_nx_info(), m, f)
++
++
++/* network caps */
++
++#define __nx_ncaps(n)	((n) ? (n)->nx_ncaps : 0)
++
++#define nx_current_ncaps()	__nx_ncaps(current_nx_info())
++
++#define nx_info_ncaps(n, c)	(__nx_ncaps(n) & (c))
++
++#define nx_ncaps(c)	nx_info_ncaps(current_nx_info(), c)
++
++
++/* context mask capabilities */
++
++#define __vx_mcaps(v)	((v) ? (v)->vx_ccaps >> 32UL : ~0 )
++
++#define vx_info_mcaps(v, c)	(__vx_mcaps(v) & (c))
++
++#define vx_mcaps(c)	vx_info_mcaps(current_vx_info(), c)
++
++
++/* context bcap mask */
++
++#define __vx_bcaps(v)		((v)->vx_bcaps)
++
++#define vx_current_bcaps()	__vx_bcaps(current_vx_info())
++
++
++/* mask given bcaps */
++
++#define vx_info_mbcaps(v, c)	((v) ? cap_intersect(__vx_bcaps(v), c) : c)
++
++#define vx_mbcaps(c)		vx_info_mbcaps(current_vx_info(), c)
++
++
++/* masked cap_bset */
++
++#define vx_info_cap_bset(v)	vx_info_mbcaps(v, current->cap_bset)
++
++#define vx_current_cap_bset()	vx_info_cap_bset(current_vx_info())
++
++#if 0
++#define vx_info_mbcap(v, b) \
++	(!vx_info_flags(v, VXF_STATE_SETUP, 0) ? \
++	vx_info_bcaps(v, b) : (b))
++
++#define task_vx_mbcap(t, b) \
++	vx_info_mbcap((t)->vx_info, (t)->b)
++
++#define vx_mbcap(b)	task_vx_mbcap(current, b)
++#endif
++
++#define vx_cap_raised(v, c, f)	cap_raised(vx_info_mbcaps(v, c), f)
++
++#define vx_capable(b, c) (capable(b) || \
++	(cap_raised(current_cap(), b) && vx_ccaps(c)))
++
++#define nx_capable(b, c) (capable(b) || \
++	(cap_raised(current_cap(), b) && nx_ncaps(c)))
++
++#define vx_task_initpid(t, n) \
++	((t)->vx_info && \
++	((t)->vx_info->vx_initpid == (n)))
++
++#define vx_current_initpid(n)	vx_task_initpid(current, n)
++
++
++/* context unshare mask */
++
++#define __vx_umask(v)		((v)->vx_umask)
++
++#define vx_current_umask()	__vx_umask(current_vx_info())
++
++#define vx_can_unshare(b, f) (capable(b) || \
++	(cap_raised(current_cap(), b) && \
++	!((f) & ~vx_current_umask())))
++
++
++#define __vx_state(v)	((v) ? ((v)->vx_state) : 0)
++
++#define vx_info_state(v, m)	(__vx_state(v) & (m))
++
++
++#define __nx_state(n)	((n) ? ((n)->nx_state) : 0)
++
++#define nx_info_state(n, m)	(__nx_state(n) & (m))
++
++#endif
+--- a/include/linux/vserver/cacct.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/cacct.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,15 @@
++#ifndef _VX_CACCT_H
++#define _VX_CACCT_H
++
++
++enum sock_acc_field {
++	VXA_SOCK_UNSPEC = 0,
++	VXA_SOCK_UNIX,
++	VXA_SOCK_INET,
++	VXA_SOCK_INET6,
++	VXA_SOCK_PACKET,
++	VXA_SOCK_OTHER,
++	VXA_SOCK_SIZE	/* array size */
++};
++
++#endif	/* _VX_CACCT_H */
+--- a/include/linux/vserver/cacct_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/cacct_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,23 @@
++#ifndef _VX_CACCT_CMD_H
++#define _VX_CACCT_CMD_H
++
++
++/* virtual host info name commands */
++
++#define VCMD_sock_stat		VC_CMD(VSTAT, 5, 0)
++
++struct	vcmd_sock_stat_v0 {
++	uint32_t field;
++	uint32_t count[3];
++	uint64_t total[3];
++};
++
++
++#ifdef	__KERNEL__
++
++#include <linux/compiler.h>
++
++extern int vc_sock_stat(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_CACCT_CMD_H */
+--- a/include/linux/vserver/cacct_def.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/cacct_def.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,43 @@
++#ifndef _VX_CACCT_DEF_H
++#define _VX_CACCT_DEF_H
++
++#include <asm/atomic.h>
++#include <linux/vserver/cacct.h>
++
++
++struct _vx_sock_acc {
++	atomic_long_t count;
++	atomic_long_t total;
++};
++
++/* context sub struct */
++
++struct _vx_cacct {
++	struct _vx_sock_acc sock[VXA_SOCK_SIZE][3];
++	atomic_t slab[8];
++	atomic_t page[6][8];
++};
++
++#ifdef CONFIG_VSERVER_DEBUG
++
++static inline void __dump_vx_cacct(struct _vx_cacct *cacct)
++{
++	int i, j;
++
++	printk("\t_vx_cacct:");
++	for (i = 0; i < 6; i++) {
++		struct _vx_sock_acc *ptr = cacct->sock[i];
++
++		printk("\t [%d] =", i);
++		for (j = 0; j < 3; j++) {
++			printk(" [%d] = %8lu, %8lu", j,
++				atomic_long_read(&ptr[j].count),
++				atomic_long_read(&ptr[j].total));
++		}
++		printk("\n");
++	}
++}
++
++#endif
++
++#endif	/* _VX_CACCT_DEF_H */
+--- a/include/linux/vserver/cacct_int.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/cacct_int.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,21 @@
++#ifndef _VX_CACCT_INT_H
++#define _VX_CACCT_INT_H
++
++
++#ifdef	__KERNEL__
++
++static inline
++unsigned long vx_sock_count(struct _vx_cacct *cacct, int type, int pos)
++{
++	return atomic_long_read(&cacct->sock[type][pos].count);
++}
++
++
++static inline
++unsigned long vx_sock_total(struct _vx_cacct *cacct, int type, int pos)
++{
++	return atomic_long_read(&cacct->sock[type][pos].total);
++}
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_CACCT_INT_H */
+--- a/include/linux/vserver/check.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/check.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,89 @@
++#ifndef _VS_CHECK_H
++#define _VS_CHECK_H
++
++
++#define MAX_S_CONTEXT	65535	/* Arbitrary limit */
++
++#ifdef	CONFIG_VSERVER_DYNAMIC_IDS
++#define MIN_D_CONTEXT	49152	/* dynamic contexts start here */
++#else
++#define MIN_D_CONTEXT	65536
++#endif
++
++/* check conditions */
++
++#define VS_ADMIN	0x0001
++#define VS_WATCH	0x0002
++#define VS_HIDE		0x0004
++#define VS_HOSTID	0x0008
++
++#define VS_IDENT	0x0010
++#define VS_EQUIV	0x0020
++#define VS_PARENT	0x0040
++#define VS_CHILD	0x0080
++
++#define VS_ARG_MASK	0x00F0
++
++#define VS_DYNAMIC	0x0100
++#define VS_STATIC	0x0200
++
++#define VS_ATR_MASK	0x0F00
++
++#ifdef	CONFIG_VSERVER_PRIVACY
++#define VS_ADMIN_P	(0)
++#define VS_WATCH_P	(0)
++#else
++#define VS_ADMIN_P	VS_ADMIN
++#define VS_WATCH_P	VS_WATCH
++#endif
++
++#define VS_HARDIRQ	0x1000
++#define VS_SOFTIRQ	0x2000
++#define VS_IRQ		0x4000
++
++#define VS_IRQ_MASK	0xF000
++
++#include <linux/hardirq.h>
++
++/*
++ * check current context for ADMIN/WATCH and
++ * optionally against supplied argument
++ */
++static inline int __vs_check(int cid, int id, unsigned int mode)
++{
++	if (mode & VS_ARG_MASK) {
++		if ((mode & VS_IDENT) && (id == cid))
++			return 1;
++	}
++	if (mode & VS_ATR_MASK) {
++		if ((mode & VS_DYNAMIC) &&
++			(id >= MIN_D_CONTEXT) &&
++			(id <= MAX_S_CONTEXT))
++			return 1;
++		if ((mode & VS_STATIC) &&
++			(id > 1) && (id < MIN_D_CONTEXT))
++			return 1;
++	}
++	if (mode & VS_IRQ_MASK) {
++		if ((mode & VS_IRQ) && unlikely(in_interrupt()))
++			return 1;
++		if ((mode & VS_HARDIRQ) && unlikely(in_irq()))
++			return 1;
++		if ((mode & VS_SOFTIRQ) && unlikely(in_softirq()))
++			return 1;
++	}
++	return (((mode & VS_ADMIN) && (cid == 0)) ||
++		((mode & VS_WATCH) && (cid == 1)) ||
++		((mode & VS_HOSTID) && (id == 0)));
++}
++
++#define vx_check(c, m)	__vs_check(vx_current_xid(), c, (m) | VS_IRQ)
++
++#define vx_weak_check(c, m)	((m) ? vx_check(c, m) : 1)
++
++
++#define nx_check(c, m)	__vs_check(nx_current_nid(), c, m)
++
++#define nx_weak_check(c, m)	((m) ? nx_check(c, m) : 1)
++
++#endif
+--- a/include/linux/vserver/context.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/context.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,184 @@
++#ifndef _VX_CONTEXT_H
++#define _VX_CONTEXT_H
++
++#include <linux/types.h>
++#include <linux/capability.h>
++
++
++/* context flags */
++
++#define VXF_INFO_SCHED		0x00000002
++#define VXF_INFO_NPROC		0x00000004
++#define VXF_INFO_PRIVATE	0x00000008
++
++#define VXF_INFO_INIT		0x00000010
++#define VXF_INFO_HIDE		0x00000020
++#define VXF_INFO_ULIMIT		0x00000040
++#define VXF_INFO_NSPACE		0x00000080
++
++#define VXF_SCHED_HARD		0x00000100
++#define VXF_SCHED_PRIO		0x00000200
++#define VXF_SCHED_PAUSE		0x00000400
++
++#define VXF_VIRT_MEM		0x00010000
++#define VXF_VIRT_UPTIME		0x00020000
++#define VXF_VIRT_CPU		0x00040000
++#define VXF_VIRT_LOAD		0x00080000
++#define VXF_VIRT_TIME		0x00100000
++
++#define VXF_HIDE_MOUNT		0x01000000
++/* was	VXF_HIDE_NETIF		0x02000000 */
++#define VXF_HIDE_VINFO		0x04000000
++
++#define VXF_STATE_SETUP		(1ULL << 32)
++#define VXF_STATE_INIT		(1ULL << 33)
++#define VXF_STATE_ADMIN		(1ULL << 34)
++
++#define VXF_SC_HELPER		(1ULL << 36)
++#define VXF_REBOOT_KILL		(1ULL << 37)
++#define VXF_PERSISTENT		(1ULL << 38)
++
++#define VXF_FORK_RSS		(1ULL << 48)
++#define VXF_PROLIFIC		(1ULL << 49)
++
++#define VXF_IGNEG_NICE		(1ULL << 52)
++
++#define VXF_ONE_TIME		(0x0007ULL << 32)
++
++#define VXF_INIT_SET		(VXF_STATE_SETUP | VXF_STATE_INIT | VXF_STATE_ADMIN)
++
++
++/* context migration */
++
++#define VXM_SET_INIT		0x00000001
++#define VXM_SET_REAPER		0x00000002
++
++/* context caps */
++
++#define VXC_CAP_MASK		0x00000000
++
++#define VXC_SET_UTSNAME		0x00000001
++#define VXC_SET_RLIMIT		0x00000002
++#define VXC_FS_SECURITY		0x00000004
++#define VXC_FS_TRUSTED		0x00000008
++#define VXC_TIOCSTI		0x00000010
++
++/* was	VXC_RAW_ICMP		0x00000100 */
++#define VXC_SYSLOG		0x00001000
++#define VXC_OOM_ADJUST		0x00002000
++#define VXC_AUDIT_CONTROL	0x00004000
++
++#define VXC_SECURE_MOUNT	0x00010000
++#define VXC_SECURE_REMOUNT	0x00020000
++#define VXC_BINARY_MOUNT	0x00040000
++
++#define VXC_QUOTA_CTL		0x00100000
++#define VXC_ADMIN_MAPPER	0x00200000
++#define VXC_ADMIN_CLOOP		0x00400000
++
++#define VXC_KTHREAD		0x01000000
++#define VXC_NAMESPACE		0x02000000
++
++
++#ifdef	__KERNEL__
++
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/rcupdate.h>
++
++#include "limit_def.h"
++#include "sched_def.h"
++#include "cvirt_def.h"
++#include "cacct_def.h"
++#include "device_def.h"
++
++#define VX_SPACES	2
++
++struct _vx_info_pc {
++	struct _vx_sched_pc sched_pc;
++	struct _vx_cvirt_pc cvirt_pc;
++};
++
++struct vx_info {
++	struct hlist_node vx_hlist;		/* linked list of contexts */
++	xid_t vx_id;				/* context id */
++	atomic_t vx_usecnt;			/* usage count */
++	atomic_t vx_tasks;			/* tasks count */
++	struct vx_info *vx_parent;		/* parent context */
++	int vx_state;				/* context state */
++
++	unsigned long vx_nsmask[VX_SPACES];	/* assignment mask */
++	struct nsproxy *vx_nsproxy[VX_SPACES];	/* private namespaces */
++	struct fs_struct *vx_fs[VX_SPACES];	/* private namespace fs */
++
++	uint64_t vx_flags;			/* context flags */
++	uint64_t vx_ccaps;			/* context caps (vserver) */
++	kernel_cap_t vx_bcaps;			/* bounding caps (system) */
++	unsigned long vx_umask;			/* unshare mask (guest) */
++
++	struct task_struct *vx_reaper;		/* guest reaper process */
++	pid_t vx_initpid;			/* PID of guest init */
++	int64_t vx_badness_bias;		/* OOM points bias */
++
++	struct _vx_limit limit;			/* vserver limits */
++	struct _vx_sched sched;			/* vserver scheduler */
++	struct _vx_cvirt cvirt;			/* virtual/bias stuff */
++	struct _vx_cacct cacct;			/* context accounting */
++
++	struct _vx_device dmap;			/* default device map targets */
++
++#ifndef CONFIG_SMP
++	struct _vx_info_pc info_pc;		/* per cpu data */
++#else
++	struct _vx_info_pc *ptr_pc;		/* per cpu array */
++#endif
++
++	wait_queue_head_t vx_wait;		/* context exit waitqueue */
++	int reboot_cmd;				/* last sys_reboot() cmd */
++	int exit_code;				/* last process exit code */
++
++	char vx_name[65];			/* vserver name */
++};
++
++#ifndef CONFIG_SMP
++#define	vx_ptr_pc(vxi)		(&(vxi)->info_pc)
++#define	vx_per_cpu(vxi, v, id)	vx_ptr_pc(vxi)->v
++#else
++#define	vx_ptr_pc(vxi)		((vxi)->ptr_pc)
++#define	vx_per_cpu(vxi, v, id)	per_cpu_ptr(vx_ptr_pc(vxi), id)->v
++#endif
++
++#define	vx_cpu(vxi, v)		vx_per_cpu(vxi, v, smp_processor_id())
++
++
++struct vx_info_save {
++	struct vx_info *vxi;
++	xid_t xid;
++};
++
++
++/* status flags */
++
++#define VXS_HASHED	0x0001
++#define VXS_PAUSED	0x0010
++#define VXS_SHUTDOWN	0x0100
++#define VXS_HELPER	0x1000
++#define VXS_RELEASED	0x8000
++
++
++extern void claim_vx_info(struct vx_info *, struct task_struct *);
++extern void release_vx_info(struct vx_info *, struct task_struct *);
++
++extern struct vx_info *lookup_vx_info(int);
++extern struct vx_info *lookup_or_create_vx_info(int);
++
++extern int get_xid_list(int, unsigned int *, int);
++extern int xid_is_hashed(xid_t);
++
++extern int vx_migrate_task(struct task_struct *, struct vx_info *, int);
++
++extern long vs_state_change(struct vx_info *, unsigned int);
++
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_CONTEXT_H */
+--- a/include/linux/vserver/context_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/context_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,145 @@
++#ifndef _VX_CONTEXT_CMD_H
++#define _VX_CONTEXT_CMD_H
++
++
++/* vinfo commands */
++
++#define VCMD_task_xid		VC_CMD(VINFO, 1, 0)
++
++#ifdef	__KERNEL__
++extern int vc_task_xid(uint32_t);
++
++#endif	/* __KERNEL__ */
++
++#define VCMD_vx_info		VC_CMD(VINFO, 5, 0)
++
++struct	vcmd_vx_info_v0 {
++	uint32_t xid;
++	uint32_t initpid;
++	/* more to come */
++};
++
++#ifdef	__KERNEL__
++extern int vc_vx_info(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++#define VCMD_ctx_stat		VC_CMD(VSTAT, 0, 0)
++
++struct	vcmd_ctx_stat_v0 {
++	uint32_t usecnt;
++	uint32_t tasks;
++	/* more to come */
++};
++
++#ifdef	__KERNEL__
++extern int vc_ctx_stat(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++/* context commands */
++
++#define VCMD_ctx_create_v0	VC_CMD(VPROC, 1, 0)
++#define VCMD_ctx_create		VC_CMD(VPROC, 1, 1)
++
++struct	vcmd_ctx_create {
++	uint64_t flagword;
++};
++
++#define VCMD_ctx_migrate_v0	VC_CMD(PROCMIG, 1, 0)
++#define VCMD_ctx_migrate	VC_CMD(PROCMIG, 1, 1)
++
++struct	vcmd_ctx_migrate {
++	uint64_t flagword;
++};
++
++#ifdef	__KERNEL__
++extern int vc_ctx_create(uint32_t, void __user *);
++extern int vc_ctx_migrate(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++
++/* flag commands */
++
++#define VCMD_get_cflags		VC_CMD(FLAGS, 1, 0)
++#define VCMD_set_cflags		VC_CMD(FLAGS, 2, 0)
++
++struct	vcmd_ctx_flags_v0 {
++	uint64_t flagword;
++	uint64_t mask;
++};
++
++#ifdef	__KERNEL__
++extern int vc_get_cflags(struct vx_info *, void __user *);
++extern int vc_set_cflags(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++
++/* context caps commands */
++
++#define VCMD_get_ccaps		VC_CMD(FLAGS, 3, 1)
++#define VCMD_set_ccaps		VC_CMD(FLAGS, 4, 1)
++
++struct	vcmd_ctx_caps_v1 {
++	uint64_t ccaps;
++	uint64_t cmask;
++};
++
++#ifdef	__KERNEL__
++extern int vc_get_ccaps(struct vx_info *, void __user *);
++extern int vc_set_ccaps(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++
++/* bcaps commands */
++
++#define VCMD_get_bcaps		VC_CMD(FLAGS, 9, 0)
++#define VCMD_set_bcaps		VC_CMD(FLAGS, 10, 0)
++
++struct	vcmd_bcaps {
++	uint64_t bcaps;
++	uint64_t bmask;
++};
++
++#ifdef	__KERNEL__
++extern int vc_get_bcaps(struct vx_info *, void __user *);
++extern int vc_set_bcaps(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++
++/* umask commands */
++
++#define VCMD_get_umask		VC_CMD(FLAGS, 13, 0)
++#define VCMD_set_umask		VC_CMD(FLAGS, 14, 0)
++
++struct	vcmd_umask {
++	uint64_t umask;
++	uint64_t mask;
++};
++
++#ifdef	__KERNEL__
++extern int vc_get_umask(struct vx_info *, void __user *);
++extern int vc_set_umask(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++
++/* OOM badness */
++
++#define VCMD_get_badness	VC_CMD(MEMCTRL, 5, 0)
++#define VCMD_set_badness	VC_CMD(MEMCTRL, 6, 0)
++
++struct	vcmd_badness_v0 {
++	int64_t bias;
++};
++
++#ifdef	__KERNEL__
++extern int vc_get_badness(struct vx_info *, void __user *);
++extern int vc_set_badness(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_CONTEXT_CMD_H */
+--- a/include/linux/vserver/cvirt.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/cvirt.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,20 @@
++#ifndef _VX_CVIRT_H
++#define _VX_CVIRT_H
++
++
++#ifdef	__KERNEL__
++
++struct timespec;
++
++void vx_vsi_uptime(struct timespec *, struct timespec *);
++
++
++struct vx_info;
++
++void vx_update_load(struct vx_info *);
++
++
++int vx_do_syslog(int, char __user *, int);
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_CVIRT_H */
+--- a/include/linux/vserver/cvirt_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/cvirt_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,53 @@
++#ifndef _VX_CVIRT_CMD_H
++#define _VX_CVIRT_CMD_H
++
++
++/* virtual host info name commands */
++
++#define VCMD_set_vhi_name	VC_CMD(VHOST, 1, 0)
++#define VCMD_get_vhi_name	VC_CMD(VHOST, 2, 0)
++
++struct	vcmd_vhi_name_v0 {
++	uint32_t field;
++	char name[65];
++};
++
++
++enum vhi_name_field {
++	VHIN_CONTEXT = 0,
++	VHIN_SYSNAME,
++	VHIN_NODENAME,
++	VHIN_RELEASE,
++	VHIN_VERSION,
++	VHIN_MACHINE,
++	VHIN_DOMAINNAME,
++};
++
++
++#ifdef	__KERNEL__
++
++#include <linux/compiler.h>
++
++extern int vc_set_vhi_name(struct vx_info *, void __user *);
++extern int vc_get_vhi_name(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++#define VCMD_virt_stat		VC_CMD(VSTAT, 3, 0)
++
++struct	vcmd_virt_stat_v0 {
++	uint64_t offset;
++	uint64_t uptime;
++	uint32_t nr_threads;
++	uint32_t nr_running;
++	uint32_t nr_uninterruptible;
++	uint32_t nr_onhold;
++	uint32_t nr_forks;
++	uint32_t load[3];
++};
++
++#ifdef	__KERNEL__
++extern int vc_virt_stat(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_CVIRT_CMD_H */
+--- a/include/linux/vserver/cvirt_def.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/cvirt_def.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,80 @@
++#ifndef _VX_CVIRT_DEF_H
++#define _VX_CVIRT_DEF_H
++
++#include <linux/jiffies.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include <linux/time.h>
++#include <asm/atomic.h>
++
++
++struct _vx_usage_stat {
++	uint64_t user;
++	uint64_t nice;
++	uint64_t system;
++	uint64_t softirq;
++	uint64_t irq;
++	uint64_t idle;
++	uint64_t iowait;
++};
++
++struct _vx_syslog {
++	wait_queue_head_t log_wait;
++	spinlock_t logbuf_lock;		/* lock for the log buffer */
++
++	unsigned long log_start;	/* next char to be read by syslog() */
++	unsigned long con_start;	/* next char to be sent to consoles */
++	unsigned long log_end;	/* most-recently-written-char + 1 */
++	unsigned long logged_chars;	/* #chars since last read+clear operation */
++
++	char log_buf[1024];
++};
++
++
++/* context sub struct */
++
++struct _vx_cvirt {
++	atomic_t nr_threads;		/* number of current threads */
++	atomic_t nr_running;		/* number of running threads */
++	atomic_t nr_uninterruptible;	/* number of uninterruptible threads */
++
++	atomic_t nr_onhold;		/* processes on hold */
++	uint32_t onhold_last;		/* jiffies when put on hold */
++
++	struct timeval bias_tv;		/* time offset to the host */
++	struct timespec bias_idle;
++	struct timespec bias_uptime;	/* context creation point */
++	uint64_t bias_clock;		/* offset in clock_t */
++
++	spinlock_t load_lock;		/* lock for the load averages */
++	atomic_t load_updates;		/* nr of load updates done so far */
++	uint32_t load_last;		/* last time load was calculated */
++	uint32_t load[3];		/* load averages 1,5,15 */
++
++	atomic_t total_forks;		/* number of forks so far */
++
++	struct _vx_syslog syslog;
++};
++
++struct _vx_cvirt_pc {
++	struct _vx_usage_stat cpustat;
++};
++
++
++#ifdef CONFIG_VSERVER_DEBUG
++
++static inline void __dump_vx_cvirt(struct _vx_cvirt *cvirt)
++{
++	printk("\t_vx_cvirt:\n");
++	printk("\t threads: %4d, %4d, %4d, %4d\n",
++		atomic_read(&cvirt->nr_threads),
++		atomic_read(&cvirt->nr_running),
++		atomic_read(&cvirt->nr_uninterruptible),
++		atomic_read(&cvirt->nr_onhold));
++	/* add rest here */
++	printk("\t total_forks = %d\n", atomic_read(&cvirt->total_forks));
++}
++
++#endif
++
++#endif	/* _VX_CVIRT_DEF_H */
+--- a/include/linux/vserver/debug.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/debug.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,127 @@
++#ifndef _VX_DEBUG_H
++#define _VX_DEBUG_H
++
++
++#define VXD_CBIT(n, m)	(vx_debug_ ## n & (1 << (m)))
++#define VXD_CMIN(n, m)	(vx_debug_ ## n > (m))
++#define VXD_MASK(n, m)	(vx_debug_ ## n & (m))
++
++#define VXD_DEV(d)	(d), (d)->bd_inode->i_ino,		\
++			imajor((d)->bd_inode), iminor((d)->bd_inode)
++#define VXF_DEV		"%p[%lu,%d:%d]"
++
++
++#define vxd_path(p)						\
++	({ static char _buffer[PATH_MAX];			\
++	   d_path(p, _buffer, sizeof(_buffer)); })
++
++#define vxd_cond_path(n)					\
++	((n) ? vxd_path(&(n)->path) : "<null>" )
++
++
++#ifdef	CONFIG_VSERVER_DEBUG
++
++extern unsigned int vx_debug_switch;
++extern unsigned int vx_debug_xid;
++extern unsigned int vx_debug_nid;
++extern unsigned int vx_debug_tag;
++extern unsigned int vx_debug_net;
++extern unsigned int vx_debug_limit;
++extern unsigned int vx_debug_cres;
++extern unsigned int vx_debug_dlim;
++extern unsigned int vx_debug_quota;
++extern unsigned int vx_debug_cvirt;
++extern unsigned int vx_debug_space;
++extern unsigned int vx_debug_misc;
++
++
++#define VX_LOGLEVEL	"vxD: "
++#define VX_PROC_FMT	"%p: "
++#define VX_PROCESS	current
++
++#define vxdprintk(c, f, x...)					\
++	do {							\
++		if (c)						\
++			printk(VX_LOGLEVEL VX_PROC_FMT f "\n",	\
++				VX_PROCESS , ##x);		\
++	} while (0)
++
++#define vxlprintk(c, f, x...)					\
++	do {							\
++		if (c)						\
++			printk(VX_LOGLEVEL f " @%s:%d\n", x);	\
++	} while (0)
++
++#define vxfprintk(c, f, x...)					\
++	do {							\
++		if (c)						\
++			printk(VX_LOGLEVEL f " %s@%s:%d\n", x); \
++	} while (0)
++
++
++struct vx_info;
++
++void dump_vx_info(struct vx_info *, int);
++void dump_vx_info_inactive(int);
++
++#else	/* CONFIG_VSERVER_DEBUG */
++
++#define vx_debug_switch 0
++#define vx_debug_xid	0
++#define vx_debug_nid	0
++#define vx_debug_tag	0
++#define vx_debug_net	0
++#define vx_debug_limit	0
++#define vx_debug_cres	0
++#define vx_debug_dlim	0
++#define vx_debug_cvirt	0
++
++#define vxdprintk(x...) do { } while (0)
++#define vxlprintk(x...) do { } while (0)
++#define vxfprintk(x...) do { } while (0)
++
++#endif	/* CONFIG_VSERVER_DEBUG */
++
++
++#ifdef	CONFIG_VSERVER_WARN
++
++#define VX_WARNLEVEL	KERN_WARNING "vxW: "
++#define VX_WARN_TASK	"[»%s«,%u:#%u|%u|%u] "
++#define VX_WARN_XID	"[xid #%u] "
++#define VX_WARN_NID	"[nid #%u] "
++#define VX_WARN_TAG	"[tag #%u] "
++
++#define vxwprintk(c, f, x...)					\
++	do {							\
++		if (c)						\
++			printk(VX_WARNLEVEL f "\n", ##x);	\
++	} while (0)
++
++#else	/* CONFIG_VSERVER_WARN */
++
++#define vxwprintk(x...) do { } while (0)
++
++#endif	/* CONFIG_VSERVER_WARN */
++
++#define vxwprintk_task(c, f, x...)				\
++	vxwprintk(c, VX_WARN_TASK f,				\
++		current->comm, current->pid,			\
++		current->xid, current->nid, current->tag, ##x)
++#define vxwprintk_xid(c, f, x...)				\
++	vxwprintk(c, VX_WARN_XID f, current->xid, x)
++#define vxwprintk_nid(c, f, x...)				\
++	vxwprintk(c, VX_WARN_NID f, current->nid, x)
++#define vxwprintk_tag(c, f, x...)				\
++	vxwprintk(c, VX_WARN_TAG f, current->tag, x)
++
++#ifdef	CONFIG_VSERVER_DEBUG
++#define vxd_assert_lock(l)	assert_spin_locked(l)
++#define vxd_assert(c, f, x...)	vxlprintk(!(c), \
++	"assertion [" f "] failed.", ##x, __FILE__, __LINE__)
++#else
++#define vxd_assert_lock(l)	do { } while (0)
++#define vxd_assert(c, f, x...)	do { } while (0)
++#endif
++
++
++#endif /* _VX_DEBUG_H */
+--- a/include/linux/vserver/debug_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/debug_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,58 @@
++#ifndef _VX_DEBUG_CMD_H
++#define _VX_DEBUG_CMD_H
++
++
++/* debug commands */
++
++#define VCMD_dump_history	VC_CMD(DEBUG, 1, 0)
++
++#define VCMD_read_history	VC_CMD(DEBUG, 5, 0)
++#define VCMD_read_monitor	VC_CMD(DEBUG, 6, 0)
++
++struct  vcmd_read_history_v0 {
++	uint32_t index;
++	uint32_t count;
++	char __user *data;
++};
++
++struct  vcmd_read_monitor_v0 {
++	uint32_t index;
++	uint32_t count;
++	char __user *data;
++};
++
++
++#ifdef	__KERNEL__
++
++#ifdef	CONFIG_COMPAT
++
++#include <asm/compat.h>
++
++struct	vcmd_read_history_v0_x32 {
++	uint32_t index;
++	uint32_t count;
++	compat_uptr_t data_ptr;
++};
++
++struct	vcmd_read_monitor_v0_x32 {
++	uint32_t index;
++	uint32_t count;
++	compat_uptr_t data_ptr;
++};
++
++#endif  /* CONFIG_COMPAT */
++
++extern int vc_dump_history(uint32_t);
++
++extern int vc_read_history(uint32_t, void __user *);
++extern int vc_read_monitor(uint32_t, void __user *);
++
++#ifdef	CONFIG_COMPAT
++
++extern int vc_read_history_x32(uint32_t, void __user *);
++extern int vc_read_monitor_x32(uint32_t, void __user *);
++
++#endif  /* CONFIG_COMPAT */
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_DEBUG_CMD_H */
+--- a/include/linux/vserver/device.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/device.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,15 @@
++#ifndef _VX_DEVICE_H
++#define _VX_DEVICE_H
++
++
++#define DATTR_CREATE	0x00000001
++#define DATTR_OPEN	0x00000002
++
++#define DATTR_REMAP	0x00000010
++
++#define DATTR_MASK	0x00000013
++
++
++#else	/* _VX_DEVICE_H */
++#warning duplicate inclusion
++#endif	/* _VX_DEVICE_H */
+--- a/include/linux/vserver/device_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/device_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,44 @@
++#ifndef _VX_DEVICE_CMD_H
++#define _VX_DEVICE_CMD_H
++
++
++/*  device vserver commands */
++
++#define VCMD_set_mapping	VC_CMD(DEVICE, 1, 0)
++#define VCMD_unset_mapping	VC_CMD(DEVICE, 2, 0)
++
++struct	vcmd_set_mapping_v0 {
++	const char __user *device;
++	const char __user *target;
++	uint32_t flags;
++};
++
++
++#ifdef	__KERNEL__
++
++#ifdef	CONFIG_COMPAT
++
++#include <asm/compat.h>
++
++struct	vcmd_set_mapping_v0_x32 {
++	compat_uptr_t device_ptr;
++	compat_uptr_t target_ptr;
++	uint32_t flags;
++};
++
++#endif	/* CONFIG_COMPAT */
++
++#include <linux/compiler.h>
++
++extern int vc_set_mapping(struct vx_info *, void __user *);
++extern int vc_unset_mapping(struct vx_info *, void __user *);
++
++#ifdef	CONFIG_COMPAT
++
++extern int vc_set_mapping_x32(struct vx_info *, void __user *);
++extern int vc_unset_mapping_x32(struct vx_info *, void __user *);
++
++#endif	/* CONFIG_COMPAT */
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_DEVICE_CMD_H */
+--- a/include/linux/vserver/device_def.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/device_def.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,17 @@
++#ifndef _VX_DEVICE_DEF_H
++#define _VX_DEVICE_DEF_H
++
++#include <linux/types.h>
++
++struct vx_dmap_target {
++	dev_t target;
++	uint32_t flags;
++};
++
++struct _vx_device {
++#ifdef CONFIG_VSERVER_DEVICE
++	struct vx_dmap_target targets[2];
++#endif
++};
++
++#endif	/* _VX_DEVICE_DEF_H */
+--- a/include/linux/vserver/dlimit.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/dlimit.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,54 @@
++#ifndef _VX_DLIMIT_H
++#define _VX_DLIMIT_H
++
++#include "switch.h"
++
++
++#ifdef	__KERNEL__
++
++/*      keep in sync with CDLIM_INFINITY	*/
++
++#define DLIM_INFINITY		(~0ULL)
++
++#include <linux/spinlock.h>
++#include <linux/rcupdate.h>
++
++struct super_block;
++
++struct dl_info {
++	struct hlist_node dl_hlist;		/* linked list of contexts */
++	struct rcu_head dl_rcu;			/* the rcu head */
++	tag_t dl_tag;				/* context tag */
++	atomic_t dl_usecnt;			/* usage count */
++	atomic_t dl_refcnt;			/* reference count */
++
++	struct super_block *dl_sb;		/* associated superblock */
++
++	spinlock_t dl_lock;			/* protect the values */
++
++	unsigned long long dl_space_used;	/* used space in bytes */
++	unsigned long long dl_space_total;	/* maximum space in bytes */
++	unsigned long dl_inodes_used;		/* used inodes */
++	unsigned long dl_inodes_total;		/* maximum inodes */
++
++	unsigned int dl_nrlmult;		/* non root limit mult */
++};
++
++struct rcu_head;
++
++extern void rcu_free_dl_info(struct rcu_head *);
++extern void unhash_dl_info(struct dl_info *);
++
++extern struct dl_info *locate_dl_info(struct super_block *, tag_t);
++
++
++struct kstatfs;
++
++extern void vx_vsi_statfs(struct super_block *, struct kstatfs *);
++
++typedef uint64_t dlsize_t;
++
++#endif	/* __KERNEL__ */
++#else	/* _VX_DLIMIT_H */
++#warning duplicate inclusion
++#endif	/* _VX_DLIMIT_H */
+--- a/include/linux/vserver/dlimit_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/dlimit_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,109 @@
++#ifndef _VX_DLIMIT_CMD_H
++#define _VX_DLIMIT_CMD_H
++
++
++/*  dlimit vserver commands */
++
++#define VCMD_add_dlimit		VC_CMD(DLIMIT, 1, 0)
++#define VCMD_rem_dlimit		VC_CMD(DLIMIT, 2, 0)
++
++#define VCMD_set_dlimit		VC_CMD(DLIMIT, 5, 0)
++#define VCMD_get_dlimit		VC_CMD(DLIMIT, 6, 0)
++
++struct	vcmd_ctx_dlimit_base_v0 {
++	const char __user *name;
++	uint32_t flags;
++};
++
++struct	vcmd_ctx_dlimit_v0 {
++	const char __user *name;
++	uint32_t space_used;			/* used space in kbytes */
++	uint32_t space_total;			/* maximum space in kbytes */
++	uint32_t inodes_used;			/* used inodes */
++	uint32_t inodes_total;			/* maximum inodes */
++	uint32_t reserved;			/* reserved for root in % */
++	uint32_t flags;
++};
++
++#define CDLIM_UNSET		((uint32_t)0UL)
++#define CDLIM_INFINITY		((uint32_t)~0UL)
++#define CDLIM_KEEP		((uint32_t)~1UL)
++
++#define DLIME_UNIT	0
++#define DLIME_KILO	1
++#define DLIME_MEGA	2
++#define DLIME_GIGA	3
++
++#define DLIMF_SHIFT	0x10
++
++#define DLIMS_USED	0
++#define DLIMS_TOTAL	2
++
++static inline
++uint64_t dlimit_space_32to64(uint32_t val, uint32_t flags, int shift)
++{
++	int exp = (flags & DLIMF_SHIFT) ?
++		(flags >> shift) & DLIME_GIGA : DLIME_KILO;
++	return ((uint64_t)val) << (10 * exp);
++}
++
++static inline
++uint32_t dlimit_space_64to32(uint64_t val, uint32_t *flags, int shift)
++{
++	int exp = 0;
++
++	if (*flags & DLIMF_SHIFT) {
++		while (val > (1LL << 32) && (exp < 3)) {
++			val >>= 10;
++			exp++;
++		}
++		*flags &= ~(DLIME_GIGA << shift);
++		*flags |= exp << shift;
++	} else
++		val >>= 10;
++	return val;
++}
++
++#ifdef	__KERNEL__
++
++#ifdef	CONFIG_COMPAT
++
++#include <asm/compat.h>
++
++struct	vcmd_ctx_dlimit_base_v0_x32 {
++	compat_uptr_t name_ptr;
++	uint32_t flags;
++};
++
++struct	vcmd_ctx_dlimit_v0_x32 {
++	compat_uptr_t name_ptr;
++	uint32_t space_used;			/* used space in kbytes */
++	uint32_t space_total;			/* maximum space in kbytes */
++	uint32_t inodes_used;			/* used inodes */
++	uint32_t inodes_total;			/* maximum inodes */
++	uint32_t reserved;			/* reserved for root in % */
++	uint32_t flags;
++};
++
++#endif	/* CONFIG_COMPAT */
++
++#include <linux/compiler.h>
++
++extern int vc_add_dlimit(uint32_t, void __user *);
++extern int vc_rem_dlimit(uint32_t, void __user *);
++
++extern int vc_set_dlimit(uint32_t, void __user *);
++extern int vc_get_dlimit(uint32_t, void __user *);
++
++#ifdef	CONFIG_COMPAT
++
++extern int vc_add_dlimit_x32(uint32_t, void __user *);
++extern int vc_rem_dlimit_x32(uint32_t, void __user *);
++
++extern int vc_set_dlimit_x32(uint32_t, void __user *);
++extern int vc_get_dlimit_x32(uint32_t, void __user *);
++
++#endif	/* CONFIG_COMPAT */
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_DLIMIT_CMD_H */
+--- a/include/linux/vserver/global.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/global.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,19 @@
++#ifndef _VX_GLOBAL_H
++#define _VX_GLOBAL_H
++
++
++extern atomic_t vx_global_ctotal;
++extern atomic_t vx_global_cactive;
++
++extern atomic_t nx_global_ctotal;
++extern atomic_t nx_global_cactive;
++
++extern atomic_t vs_global_nsproxy;
++extern atomic_t vs_global_fs;
++extern atomic_t vs_global_mnt_ns;
++extern atomic_t vs_global_uts_ns;
++extern atomic_t vs_global_user_ns;
++extern atomic_t vs_global_pid_ns;
++
++
++#endif /* _VX_GLOBAL_H */
+--- a/include/linux/vserver/history.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/history.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,197 @@
++#ifndef _VX_HISTORY_H
++#define _VX_HISTORY_H
++
++
++enum {
++	VXH_UNUSED = 0,
++	VXH_THROW_OOPS = 1,
++
++	VXH_GET_VX_INFO,
++	VXH_PUT_VX_INFO,
++	VXH_INIT_VX_INFO,
++	VXH_SET_VX_INFO,
++	VXH_CLR_VX_INFO,
++	VXH_CLAIM_VX_INFO,
++	VXH_RELEASE_VX_INFO,
++	VXH_ALLOC_VX_INFO,
++	VXH_DEALLOC_VX_INFO,
++	VXH_HASH_VX_INFO,
++	VXH_UNHASH_VX_INFO,
++	VXH_LOC_VX_INFO,
++	VXH_LOOKUP_VX_INFO,
++	VXH_CREATE_VX_INFO,
++};
++
++struct _vxhe_vxi {
++	struct vx_info *ptr;
++	unsigned xid;
++	unsigned usecnt;
++	unsigned tasks;
++};
++
++struct _vxhe_set_clr {
++	void *data;
++};
++
++struct _vxhe_loc_lookup {
++	unsigned arg;
++};
++
++struct _vx_hist_entry {
++	void *loc;
++	unsigned short seq;
++	unsigned short type;
++	struct _vxhe_vxi vxi;
++	union {
++		struct _vxhe_set_clr sc;
++		struct _vxhe_loc_lookup ll;
++	};
++};
++
++#ifdef	CONFIG_VSERVER_HISTORY
++
++extern unsigned volatile int vxh_active;
++
++struct _vx_hist_entry *vxh_advance(void *loc);
++
++
++static inline
++void	__vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi)
++{
++	entry->vxi.ptr = vxi;
++	if (vxi) {
++		entry->vxi.usecnt = atomic_read(&vxi->vx_usecnt);
++		entry->vxi.tasks = atomic_read(&vxi->vx_tasks);
++		entry->vxi.xid = vxi->vx_id;
++	}
++}
++
++
++#define	__HERE__ current_text_addr()
++
++#define __VXH_BODY(__type, __data, __here)	\
++	struct _vx_hist_entry *entry;		\
++						\
++	preempt_disable();			\
++	entry = vxh_advance(__here);		\
++	__data;					\
++	entry->type = __type;			\
++	preempt_enable();
++
++
++	/* pass vxi only */
++
++#define __VXH_SMPL				\
++	__vxh_copy_vxi(entry, vxi)
++
++static inline
++void	__vxh_smpl(struct vx_info *vxi, int __type, void *__here)
++{
++	__VXH_BODY(__type, __VXH_SMPL, __here)
++}
++
++	/* pass vxi and data (void *) */
++
++#define __VXH_DATA				\
++	__vxh_copy_vxi(entry, vxi);		\
++	entry->sc.data = data
++
++static inline
++void	__vxh_data(struct vx_info *vxi, void *data,
++			int __type, void *__here)
++{
++	__VXH_BODY(__type, __VXH_DATA, __here)
++}
++
++	/* pass vxi and arg (long) */
++
++#define __VXH_LONG				\
++	__vxh_copy_vxi(entry, vxi);		\
++	entry->ll.arg = arg
++
++static inline
++void	__vxh_long(struct vx_info *vxi, long arg,
++			int __type, void *__here)
++{
++	__VXH_BODY(__type, __VXH_LONG, __here)
++}
++
++
++static inline
++void	__vxh_throw_oops(void *__here)
++{
++	__VXH_BODY(VXH_THROW_OOPS, {}, __here);
++	/* prevent further acquisition */
++	vxh_active = 0;
++}
++
++
++#define vxh_throw_oops()	__vxh_throw_oops(__HERE__);
++
++#define __vxh_get_vx_info(v, h)	__vxh_smpl(v, VXH_GET_VX_INFO, h);
++#define __vxh_put_vx_info(v, h)	__vxh_smpl(v, VXH_PUT_VX_INFO, h);
++
++#define __vxh_init_vx_info(v, d, h) \
++	__vxh_data(v, d, VXH_INIT_VX_INFO, h);
++#define __vxh_set_vx_info(v, d, h) \
++	__vxh_data(v, d, VXH_SET_VX_INFO, h);
++#define __vxh_clr_vx_info(v, d, h) \
++	__vxh_data(v, d, VXH_CLR_VX_INFO, h);
++
++#define __vxh_claim_vx_info(v, d, h) \
++	__vxh_data(v, d, VXH_CLAIM_VX_INFO, h);
++#define __vxh_release_vx_info(v, d, h) \
++	__vxh_data(v, d, VXH_RELEASE_VX_INFO, h);
++
++#define vxh_alloc_vx_info(v) \
++	__vxh_smpl(v, VXH_ALLOC_VX_INFO, __HERE__);
++#define vxh_dealloc_vx_info(v) \
++	__vxh_smpl(v, VXH_DEALLOC_VX_INFO, __HERE__);
++
++#define vxh_hash_vx_info(v) \
++	__vxh_smpl(v, VXH_HASH_VX_INFO, __HERE__);
++#define vxh_unhash_vx_info(v) \
++	__vxh_smpl(v, VXH_UNHASH_VX_INFO, __HERE__);
++
++#define vxh_loc_vx_info(v, l) \
++	__vxh_long(v, l, VXH_LOC_VX_INFO, __HERE__);
++#define vxh_lookup_vx_info(v, l) \
++	__vxh_long(v, l, VXH_LOOKUP_VX_INFO, __HERE__);
++#define vxh_create_vx_info(v, l) \
++	__vxh_long(v, l, VXH_CREATE_VX_INFO, __HERE__);
++
++extern void vxh_dump_history(void);
++
++
++#else  /* CONFIG_VSERVER_HISTORY */
++
++#define	__HERE__	0
++
++#define vxh_throw_oops()		do { } while (0)
++
++#define __vxh_get_vx_info(v, h)		do { } while (0)
++#define __vxh_put_vx_info(v, h)		do { } while (0)
++
++#define __vxh_init_vx_info(v, d, h)	do { } while (0)
++#define __vxh_set_vx_info(v, d, h)	do { } while (0)
++#define __vxh_clr_vx_info(v, d, h)	do { } while (0)
++
++#define __vxh_claim_vx_info(v, d, h)	do { } while (0)
++#define __vxh_release_vx_info(v, d, h)	do { } while (0)
++
++#define vxh_alloc_vx_info(v)		do { } while (0)
++#define vxh_dealloc_vx_info(v)		do { } while (0)
++
++#define vxh_hash_vx_info(v)		do { } while (0)
++#define vxh_unhash_vx_info(v)		do { } while (0)
++
++#define vxh_loc_vx_info(v, l)		do { } while (0)
++#define vxh_lookup_vx_info(v, l)	do { } while (0)
++#define vxh_create_vx_info(v, l)	do { } while (0)
++
++#define vxh_dump_history()		do { } while (0)
++
++
++#endif /* CONFIG_VSERVER_HISTORY */
++
++#endif /* _VX_HISTORY_H */
+--- a/include/linux/vserver/inode.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/inode.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,39 @@
++#ifndef _VX_INODE_H
++#define _VX_INODE_H
++
++
++#define IATTR_TAG	0x01000000
++
++#define IATTR_ADMIN	0x00000001
++#define IATTR_WATCH	0x00000002
++#define IATTR_HIDE	0x00000004
++#define IATTR_FLAGS	0x00000007
++
++#define IATTR_BARRIER	0x00010000
++#define IATTR_IXUNLINK	0x00020000
++#define IATTR_IMMUTABLE 0x00040000
++#define IATTR_COW	0x00080000
++
++#ifdef	__KERNEL__
++
++
++#ifdef	CONFIG_VSERVER_PROC_SECURE
++#define IATTR_PROC_DEFAULT	( IATTR_ADMIN | IATTR_HIDE )
++#define IATTR_PROC_SYMLINK	( IATTR_ADMIN )
++#else
++#define IATTR_PROC_DEFAULT	( IATTR_ADMIN )
++#define IATTR_PROC_SYMLINK	( IATTR_ADMIN )
++#endif
++
++#define vx_hide_check(c, m)	(((m) & IATTR_HIDE) ? vx_check(c, m) : 1)
++
++#endif	/* __KERNEL__ */
++
++/* inode ioctls */
++
++#define FIOC_GETXFLG	_IOR('x', 5, long)
++#define FIOC_SETXFLG	_IOW('x', 6, long)
++
++#else	/* _VX_INODE_H */
++#warning duplicate inclusion
++#endif	/* _VX_INODE_H */
+--- a/include/linux/vserver/inode_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/inode_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,59 @@
++#ifndef _VX_INODE_CMD_H
++#define _VX_INODE_CMD_H
++
++
++/*  inode vserver commands */
++
++#define VCMD_get_iattr		VC_CMD(INODE, 1, 1)
++#define VCMD_set_iattr		VC_CMD(INODE, 2, 1)
++
++#define VCMD_fget_iattr		VC_CMD(INODE, 3, 0)
++#define VCMD_fset_iattr		VC_CMD(INODE, 4, 0)
++
++struct	vcmd_ctx_iattr_v1 {
++	const char __user *name;
++	uint32_t tag;
++	uint32_t flags;
++	uint32_t mask;
++};
++
++struct	vcmd_ctx_fiattr_v0 {
++	uint32_t tag;
++	uint32_t flags;
++	uint32_t mask;
++};
++
++
++#ifdef	__KERNEL__
++
++
++#ifdef	CONFIG_COMPAT
++
++#include <asm/compat.h>
++
++struct	vcmd_ctx_iattr_v1_x32 {
++	compat_uptr_t name_ptr;
++	uint32_t tag;
++	uint32_t flags;
++	uint32_t mask;
++};
++
++#endif	/* CONFIG_COMPAT */
++
++#include <linux/compiler.h>
++
++extern int vc_get_iattr(void __user *);
++extern int vc_set_iattr(void __user *);
++
++extern int vc_fget_iattr(uint32_t, void __user *);
++extern int vc_fset_iattr(uint32_t, void __user *);
++
++#ifdef	CONFIG_COMPAT
++
++extern int vc_get_iattr_x32(void __user *);
++extern int vc_set_iattr_x32(void __user *);
++
++#endif	/* CONFIG_COMPAT */
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_INODE_CMD_H */
+--- a/include/linux/vserver/limit.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/limit.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,71 @@
++#ifndef _VX_LIMIT_H
++#define _VX_LIMIT_H
++
++#define VLIMIT_NSOCK	16
++#define VLIMIT_OPENFD	17
++#define VLIMIT_ANON	18
++#define VLIMIT_SHMEM	19
++#define VLIMIT_SEMARY	20
++#define VLIMIT_NSEMS	21
++#define VLIMIT_DENTRY	22
++#define VLIMIT_MAPPED	23
++
++
++#ifdef	__KERNEL__
++
++#define	VLIM_NOCHECK	((1L << VLIMIT_DENTRY) | (1L << RLIMIT_RSS))
++
++/*	keep in sync with CRLIM_INFINITY */
++
++#define	VLIM_INFINITY	(~0ULL)
++
++#include <asm/atomic.h>
++#include <asm/resource.h>
++
++#ifndef RLIM_INFINITY
++#warning RLIM_INFINITY is undefined
++#endif
++
++#define __rlim_val(l, r, v)	((l)->res[r].v)
++
++#define __rlim_soft(l, r)	__rlim_val(l, r, soft)
++#define __rlim_hard(l, r)	__rlim_val(l, r, hard)
++
++#define __rlim_rcur(l, r)	__rlim_val(l, r, rcur)
++#define __rlim_rmin(l, r)	__rlim_val(l, r, rmin)
++#define __rlim_rmax(l, r)	__rlim_val(l, r, rmax)
++
++#define __rlim_lhit(l, r)	__rlim_val(l, r, lhit)
++#define __rlim_hit(l, r)	atomic_inc(&__rlim_lhit(l, r))
++
++typedef atomic_long_t rlim_atomic_t;
++typedef unsigned long rlim_t;
++
++#define __rlim_get(l, r)	atomic_long_read(&__rlim_rcur(l, r))
++#define __rlim_set(l, r, v)	atomic_long_set(&__rlim_rcur(l, r), v)
++#define __rlim_inc(l, r)	atomic_long_inc(&__rlim_rcur(l, r))
++#define __rlim_dec(l, r)	atomic_long_dec(&__rlim_rcur(l, r))
++#define __rlim_add(l, r, v)	atomic_long_add(v, &__rlim_rcur(l, r))
++#define __rlim_sub(l, r, v)	atomic_long_sub(v, &__rlim_rcur(l, r))
++
++
++#if	(RLIM_INFINITY == VLIM_INFINITY)
++#define	VX_VLIM(r) ((long long)(long)(r))
++#define	VX_RLIM(v) ((rlim_t)(v))
++#else
++#define	VX_VLIM(r) (((r) == RLIM_INFINITY) \
++		? VLIM_INFINITY : (long long)(r))
++#define	VX_RLIM(v) (((v) == VLIM_INFINITY) \
++		? RLIM_INFINITY : (rlim_t)(v))
++#endif
++
++struct sysinfo;
++
++void vx_vsi_meminfo(struct sysinfo *);
++void vx_vsi_swapinfo(struct sysinfo *);
++long vx_vsi_cached(struct sysinfo *);
++
++#define NUM_LIMITS	24
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_LIMIT_H */
+--- a/include/linux/vserver/limit_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/limit_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,71 @@
++#ifndef _VX_LIMIT_CMD_H
++#define _VX_LIMIT_CMD_H
++
++
++/*  rlimit vserver commands */
++
++#define VCMD_get_rlimit		VC_CMD(RLIMIT, 1, 0)
++#define VCMD_set_rlimit		VC_CMD(RLIMIT, 2, 0)
++#define VCMD_get_rlimit_mask	VC_CMD(RLIMIT, 3, 0)
++#define VCMD_reset_hits		VC_CMD(RLIMIT, 7, 0)
++#define VCMD_reset_minmax	VC_CMD(RLIMIT, 9, 0)
++
++struct	vcmd_ctx_rlimit_v0 {
++	uint32_t id;
++	uint64_t minimum;
++	uint64_t softlimit;
++	uint64_t maximum;
++};
++
++struct	vcmd_ctx_rlimit_mask_v0 {
++	uint32_t minimum;
++	uint32_t softlimit;
++	uint32_t maximum;
++};
++
++#define VCMD_rlimit_stat	VC_CMD(VSTAT, 1, 0)
++
++struct	vcmd_rlimit_stat_v0 {
++	uint32_t id;
++	uint32_t hits;
++	uint64_t value;
++	uint64_t minimum;
++	uint64_t maximum;
++};
++
++#define CRLIM_UNSET		(0ULL)
++#define CRLIM_INFINITY		(~0ULL)
++#define CRLIM_KEEP		(~1ULL)
++
++#ifdef	__KERNEL__
++
++#ifdef	CONFIG_IA32_EMULATION
++
++struct	vcmd_ctx_rlimit_v0_x32 {
++	uint32_t id;
++	uint64_t minimum;
++	uint64_t softlimit;
++	uint64_t maximum;
++} __attribute__ ((packed));
++
++#endif	/* CONFIG_IA32_EMULATION */
++
++#include <linux/compiler.h>
++
++extern int vc_get_rlimit_mask(uint32_t, void __user *);
++extern int vc_get_rlimit(struct vx_info *, void __user *);
++extern int vc_set_rlimit(struct vx_info *, void __user *);
++extern int vc_reset_hits(struct vx_info *, void __user *);
++extern int vc_reset_minmax(struct vx_info *, void __user *);
++
++extern int vc_rlimit_stat(struct vx_info *, void __user *);
++
++#ifdef	CONFIG_IA32_EMULATION
++
++extern int vc_get_rlimit_x32(struct vx_info *, void __user *);
++extern int vc_set_rlimit_x32(struct vx_info *, void __user *);
++
++#endif	/* CONFIG_IA32_EMULATION */
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_LIMIT_CMD_H */
+--- a/include/linux/vserver/limit_def.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/limit_def.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,47 @@
++#ifndef _VX_LIMIT_DEF_H
++#define _VX_LIMIT_DEF_H
++
++#include <asm/atomic.h>
++#include <asm/resource.h>
++
++#include "limit.h"
++
++
++struct _vx_res_limit {
++	rlim_t soft;		/* Context soft limit */
++	rlim_t hard;		/* Context hard limit */
++
++	rlim_atomic_t rcur;	/* Current value */
++	rlim_t rmin;		/* Context minimum */
++	rlim_t rmax;		/* Context maximum */
++
++	atomic_t lhit;		/* Limit hits */
++};
++
++/* context sub struct */
++
++struct _vx_limit {
++	struct _vx_res_limit res[NUM_LIMITS];
++};
++
++#ifdef CONFIG_VSERVER_DEBUG
++
++static inline void __dump_vx_limit(struct _vx_limit *limit)
++{
++	int i;
++
++	printk("\t_vx_limit:");
++	for (i = 0; i < NUM_LIMITS; i++) {
++		printk("\t [%2d] = %8lu %8lu/%8lu, %8ld/%8ld, %8d\n",
++			i, (unsigned long)__rlim_get(limit, i),
++			(unsigned long)__rlim_rmin(limit, i),
++			(unsigned long)__rlim_rmax(limit, i),
++			(long)__rlim_soft(limit, i),
++			(long)__rlim_hard(limit, i),
++			atomic_read(&__rlim_lhit(limit, i)));
++	}
++}
++
++#endif
++
++#endif	/* _VX_LIMIT_DEF_H */
+--- a/include/linux/vserver/limit_int.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/limit_int.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,198 @@
++#ifndef _VX_LIMIT_INT_H
++#define _VX_LIMIT_INT_H
++
++#include "context.h"
++
++#ifdef	__KERNEL__
++
++#define VXD_RCRES_COND(r)	VXD_CBIT(cres, r)
++#define VXD_RLIMIT_COND(r)	VXD_CBIT(limit, r)
++
++extern const char *vlimit_name[NUM_LIMITS];
++
++static inline void __vx_acc_cres(struct vx_info *vxi,
++	int res, int dir, void *_data, char *_file, int _line)
++{
++	if (VXD_RCRES_COND(res))
++		vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5ld%s (%p)",
++			(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
++			(vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
++			(dir > 0) ? "++" : "--", _data, _file, _line);
++	if (!vxi)
++		return;
++
++	if (dir > 0)
++		__rlim_inc(&vxi->limit, res);
++	else
++		__rlim_dec(&vxi->limit, res);
++}
++
++static inline void __vx_add_cres(struct vx_info *vxi,
++	int res, int amount, void *_data, char *_file, int _line)
++{
++	if (VXD_RCRES_COND(res))
++		vxlprintk(1, "vx_add_cres[%5d,%s,%2d]: %5ld += %5d (%p)",
++			(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
++			(vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
++			amount, _data, _file, _line);
++	if (amount == 0)
++		return;
++	if (!vxi)
++		return;
++	__rlim_add(&vxi->limit, res, amount);
++}
++
++static inline
++int __vx_cres_adjust_max(struct _vx_limit *limit, int res, rlim_t value)
++{
++	int cond = (value > __rlim_rmax(limit, res));
++
++	if (cond)
++		__rlim_rmax(limit, res) = value;
++	return cond;
++}
++
++static inline
++int __vx_cres_adjust_min(struct _vx_limit *limit, int res, rlim_t value)
++{
++	int cond = (value < __rlim_rmin(limit, res));
++
++	if (cond)
++		__rlim_rmin(limit, res) = value;
++	return cond;
++}
++
++static inline
++void __vx_cres_fixup(struct _vx_limit *limit, int res, rlim_t value)
++{
++	if (!__vx_cres_adjust_max(limit, res, value))
++		__vx_cres_adjust_min(limit, res, value);
++}
++
++
++/*	return values:
++	 +1 ... no limit hit
++	 -1 ... over soft limit
++	  0 ... over hard limit		*/
++
++static inline int __vx_cres_avail(struct vx_info *vxi,
++	int res, int num, char *_file, int _line)
++{
++	struct _vx_limit *limit;
++	rlim_t value;
++
++	if (VXD_RLIMIT_COND(res))
++		vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld/%5ld > %5ld + %5d",
++			(vxi ? vxi->vx_id : -1), vlimit_name[res], res,
++			(vxi ? (long)__rlim_soft(&vxi->limit, res) : -1),
++			(vxi ? (long)__rlim_hard(&vxi->limit, res) : -1),
++			(vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
++			num, _file, _line);
++	if (!vxi)
++		return 1;
++
++	limit = &vxi->limit;
++	value = __rlim_get(limit, res);
++
++	if (!__vx_cres_adjust_max(limit, res, value))
++		__vx_cres_adjust_min(limit, res, value);
++
++	if (num == 0)
++		return 1;
++
++	if (__rlim_soft(limit, res) == RLIM_INFINITY)
++		return -1;
++	if (value + num <= __rlim_soft(limit, res))
++		return -1;
++
++	if (__rlim_hard(limit, res) == RLIM_INFINITY)
++		return 1;
++	if (value + num <= __rlim_hard(limit, res))
++		return 1;
++
++	__rlim_hit(limit, res);
++	return 0;
++}
++
++
++static const int VLA_RSS[] = { RLIMIT_RSS, VLIMIT_ANON, VLIMIT_MAPPED, 0 };
++
++static inline
++rlim_t __vx_cres_array_sum(struct _vx_limit *limit, const int *array)
++{
++	rlim_t value, sum = 0;
++	int res;
++
++	while ((res = *array++)) {
++		value = __rlim_get(limit, res);
++		__vx_cres_fixup(limit, res, value);
++		sum += value;
++	}
++	return sum;
++}
++
++static inline
++rlim_t __vx_cres_array_fixup(struct _vx_limit *limit, const int *array)
++{
++	rlim_t value = __vx_cres_array_sum(limit, array + 1);
++	int res = *array;
++
++	if (value == __rlim_get(limit, res))
++		return value;
++
++	__rlim_set(limit, res, value);
++	/* now adjust min/max */
++	if (!__vx_cres_adjust_max(limit, res, value))
++		__vx_cres_adjust_min(limit, res, value);
++
++	return value;
++}
++
++static inline int __vx_cres_array_avail(struct vx_info *vxi,
++	const int *array, int num, char *_file, int _line)
++{
++	struct _vx_limit *limit;
++	rlim_t value = 0;
++	int res;
++
++	if (num == 0)
++		return 1;
++	if (!vxi)
++		return 1;
++
++	limit = &vxi->limit;
++	res = *array;
++	value = __vx_cres_array_sum(limit, array + 1);
++
++	__rlim_set(limit, res, value);
++	__vx_cres_fixup(limit, res, value);
++
++	return __vx_cres_avail(vxi, res, num, _file, _line);
++}
++
++
++static inline void vx_limit_fixup(struct _vx_limit *limit, int id)
++{
++	rlim_t value;
++	int res;
++
++	/* complex resources first */
++	if ((id < 0) || (id == RLIMIT_RSS))
++		__vx_cres_array_fixup(limit, VLA_RSS);
++
++	for (res = 0; res < NUM_LIMITS; res++) {
++		if ((id > 0) && (res != id))
++			continue;
++
++		value = __rlim_get(limit, res);
++		__vx_cres_fixup(limit, res, value);
++
++		/* not supposed to happen, maybe warn? */
++		if (__rlim_rmax(limit, res) > __rlim_hard(limit, res))
++			__rlim_rmax(limit, res) = __rlim_hard(limit, res);
++	}
++}
++
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_LIMIT_INT_H */
+--- a/include/linux/vserver/monitor.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/monitor.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,96 @@
++#ifndef _VX_MONITOR_H
++#define _VX_MONITOR_H
++
++#include <linux/types.h>
++
++enum {
++	VXM_UNUSED = 0,
++
++	VXM_SYNC = 0x10,
++
++	VXM_UPDATE = 0x20,
++	VXM_UPDATE_1,
++	VXM_UPDATE_2,
++
++	VXM_RQINFO_1 = 0x24,
++	VXM_RQINFO_2,
++
++	VXM_ACTIVATE = 0x40,
++	VXM_DEACTIVATE,
++	VXM_IDLE,
++
++	VXM_HOLD = 0x44,
++	VXM_UNHOLD,
++
++	VXM_MIGRATE = 0x48,
++	VXM_RESCHED,
++
++	/* all other bits are flags */
++	VXM_SCHED = 0x80,
++};
++
++struct _vxm_update_1 {
++	uint32_t tokens_max;
++	uint32_t fill_rate;
++	uint32_t interval;
++};
++
++struct _vxm_update_2 {
++	uint32_t tokens_min;
++	uint32_t fill_rate;
++	uint32_t interval;
++};
++
++struct _vxm_rqinfo_1 {
++	uint16_t running;
++	uint16_t onhold;
++	uint16_t iowait;
++	uint16_t uintr;
++	uint32_t idle_tokens;
++};
++
++struct _vxm_rqinfo_2 {
++	uint32_t norm_time;
++	uint32_t idle_time;
++	uint32_t idle_skip;
++};
++
++struct _vxm_sched {
++	uint32_t tokens;
++	uint32_t norm_time;
++	uint32_t idle_time;
++};
++
++struct _vxm_task {
++	uint16_t pid;
++	uint16_t state;
++};
++
++struct _vxm_event {
++	uint32_t jif;
++	union {
++		uint32_t seq;
++		uint32_t sec;
++	};
++	union {
++		uint32_t tokens;
++		uint32_t nsec;
++		struct _vxm_task tsk;
++	};
++};
++
++struct _vx_mon_entry {
++	uint16_t type;
++	uint16_t xid;
++	union {
++		struct _vxm_event ev;
++		struct _vxm_sched sd;
++		struct _vxm_update_1 u1;
++		struct _vxm_update_2 u2;
++		struct _vxm_rqinfo_1 q1;
++		struct _vxm_rqinfo_2 q2;
++	};
++};
++
++
++#endif /* _VX_MONITOR_H */
+--- a/include/linux/vserver/network.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/network.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,146 @@
++#ifndef _VX_NETWORK_H
++#define _VX_NETWORK_H
++
++#include <linux/types.h>
++
++
++#define MAX_N_CONTEXT	65535	/* Arbitrary limit */
++
++
++/* network flags */
++
++#define NXF_INFO_PRIVATE	0x00000008
++
++#define NXF_SINGLE_IP		0x00000100
++#define NXF_LBACK_REMAP		0x00000200
++#define NXF_LBACK_ALLOW		0x00000400
++
++#define NXF_HIDE_NETIF		0x02000000
++#define NXF_HIDE_LBACK		0x04000000
++
++#define NXF_STATE_SETUP		(1ULL << 32)
++#define NXF_STATE_ADMIN		(1ULL << 34)
++
++#define NXF_SC_HELPER		(1ULL << 36)
++#define NXF_PERSISTENT		(1ULL << 38)
++
++#define NXF_ONE_TIME		(0x0005ULL << 32)
++
++
++#define	NXF_INIT_SET		(__nxf_init_set())
++
++static inline uint64_t __nxf_init_set(void) {
++	return	  NXF_STATE_ADMIN
++#ifdef	CONFIG_VSERVER_AUTO_LBACK
++		| NXF_LBACK_REMAP
++		| NXF_HIDE_LBACK
++#endif
++#ifdef	CONFIG_VSERVER_AUTO_SINGLE
++		| NXF_SINGLE_IP
++#endif
++		| NXF_HIDE_NETIF;
++}
++
++
++/* network caps */
++
++#define NXC_TUN_CREATE		0x00000001
++
++#define NXC_RAW_ICMP		0x00000100
++
++
++/* address types */
++
++#define NXA_TYPE_IPV4		0x0001
++#define NXA_TYPE_IPV6		0x0002
++
++#define NXA_TYPE_NONE		0x0000
++#define NXA_TYPE_ANY		0x00FF
++
++#define NXA_TYPE_ADDR		0x0010
++#define NXA_TYPE_MASK		0x0020
++#define NXA_TYPE_RANGE		0x0040
++
++#define NXA_MASK_ALL		(NXA_TYPE_ADDR | NXA_TYPE_MASK | NXA_TYPE_RANGE)
++
++#define NXA_MOD_BCAST		0x0100
++#define NXA_MOD_LBACK		0x0200
++
++#define NXA_LOOPBACK		0x1000
++
++#define NXA_MASK_BIND		(NXA_MASK_ALL | NXA_MOD_BCAST | NXA_MOD_LBACK)
++#define NXA_MASK_SHOW		(NXA_MASK_ALL | NXA_LOOPBACK)
++
++#ifdef	__KERNEL__
++
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/rcupdate.h>
++#include <linux/in.h>
++#include <linux/in6.h>
++#include <asm/atomic.h>
++
++struct nx_addr_v4 {
++	struct nx_addr_v4 *next;
++	struct in_addr ip[2];
++	struct in_addr mask;
++	uint16_t type;
++	uint16_t flags;
++};
++
++struct nx_addr_v6 {
++	struct nx_addr_v6 *next;
++	struct in6_addr ip;
++	struct in6_addr mask;
++	uint32_t prefix;
++	uint16_t type;
++	uint16_t flags;
++};
++
++struct nx_info {
++	struct hlist_node nx_hlist;	/* linked list of nxinfos */
++	nid_t nx_id;			/* vnet id */
++	atomic_t nx_usecnt;		/* usage count */
++	atomic_t nx_tasks;		/* tasks count */
++	int nx_state;			/* context state */
++
++	uint64_t nx_flags;		/* network flag word */
++	uint64_t nx_ncaps;		/* network capabilities */
++
++	struct in_addr v4_lback;	/* Loopback address */
++	struct in_addr v4_bcast;	/* Broadcast address */
++	struct nx_addr_v4 v4;		/* First/Single ipv4 address */
++#ifdef	CONFIG_IPV6
++	struct nx_addr_v6 v6;		/* First/Single ipv6 address */
++#endif
++	char nx_name[65];		/* network context name */
++};
++
++
++/* status flags */
++
++#define NXS_HASHED      0x0001
++#define NXS_SHUTDOWN    0x0100
++#define NXS_RELEASED    0x8000
++
++extern struct nx_info *lookup_nx_info(int);
++
++extern int get_nid_list(int, unsigned int *, int);
++extern int nid_is_hashed(nid_t);
++
++extern int nx_migrate_task(struct task_struct *, struct nx_info *);
++
++extern long vs_net_change(struct nx_info *, unsigned int);
++
++struct sock;
++
++
++#define NX_IPV4(n)	((n)->v4.type != NXA_TYPE_NONE)
++#ifdef  CONFIG_IPV6
++#define NX_IPV6(n)	((n)->v6.type != NXA_TYPE_NONE)
++#else
++#define NX_IPV6(n)	(0)
++#endif
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_NETWORK_H */
+--- a/include/linux/vserver/network_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/network_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,150 @@
++#ifndef _VX_NETWORK_CMD_H
++#define _VX_NETWORK_CMD_H
++
++
++/* vinfo commands */
++
++#define VCMD_task_nid		VC_CMD(VINFO, 2, 0)
++
++#ifdef	__KERNEL__
++extern int vc_task_nid(uint32_t);
++
++#endif	/* __KERNEL__ */
++
++#define VCMD_nx_info		VC_CMD(VINFO, 6, 0)
++
++struct	vcmd_nx_info_v0 {
++	uint32_t nid;
++	/* more to come */
++};
++
++#ifdef	__KERNEL__
++extern int vc_nx_info(struct nx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++#include <linux/in.h>
++#include <linux/in6.h>
++
++#define VCMD_net_create_v0	VC_CMD(VNET, 1, 0)
++#define VCMD_net_create		VC_CMD(VNET, 1, 1)
++
++struct  vcmd_net_create {
++	uint64_t flagword;
++};
++
++#define VCMD_net_migrate	VC_CMD(NETMIG, 1, 0)
++
++#define VCMD_net_add		VC_CMD(NETALT, 1, 0)
++#define VCMD_net_remove		VC_CMD(NETALT, 2, 0)
++
++struct	vcmd_net_addr_v0 {
++	uint16_t type;
++	uint16_t count;
++	struct in_addr ip[4];
++	struct in_addr mask[4];
++};
++
++#define VCMD_net_add_ipv4	VC_CMD(NETALT, 1, 1)
++#define VCMD_net_remove_ipv4	VC_CMD(NETALT, 2, 1)
++
++struct	vcmd_net_addr_ipv4_v1 {
++	uint16_t type;
++	uint16_t flags;
++	struct in_addr ip;
++	struct in_addr mask;
++};
++
++#define VCMD_net_add_ipv6	VC_CMD(NETALT, 3, 1)
++#define VCMD_net_remove_ipv6	VC_CMD(NETALT, 4, 1)
++
++struct	vcmd_net_addr_ipv6_v1 {
++	uint16_t type;
++	uint16_t flags;
++	uint32_t prefix;
++	struct in6_addr ip;
++	struct in6_addr mask;
++};
++
++#define VCMD_add_match_ipv4	VC_CMD(NETALT, 5, 0)
++#define VCMD_get_match_ipv4	VC_CMD(NETALT, 6, 0)
++
++struct	vcmd_match_ipv4_v0 {
++	uint16_t type;
++	uint16_t flags;
++	uint16_t parent;
++	uint16_t prefix;
++	struct in_addr ip;
++	struct in_addr ip2;
++	struct in_addr mask;
++};
++
++#define VCMD_add_match_ipv6	VC_CMD(NETALT, 7, 0)
++#define VCMD_get_match_ipv6	VC_CMD(NETALT, 8, 0)
++
++struct	vcmd_match_ipv6_v0 {
++	uint16_t type;
++	uint16_t flags;
++	uint16_t parent;
++	uint16_t prefix;
++	struct in6_addr ip;
++	struct in6_addr ip2;
++	struct in6_addr mask;
++};
++
++
++#ifdef	__KERNEL__
++extern int vc_net_create(uint32_t, void __user *);
++extern int vc_net_migrate(struct nx_info *, void __user *);
++
++extern int vc_net_add(struct nx_info *, void __user *);
++extern int vc_net_remove(struct nx_info *, void __user *);
++
++extern int vc_net_add_ipv4(struct nx_info *, void __user *);
++extern int vc_net_remove_ipv4(struct nx_info *, void __user *);
++
++extern int vc_net_add_ipv6(struct nx_info *, void __user *);
++extern int vc_net_remove_ipv6(struct nx_info *, void __user *);
++
++extern int vc_add_match_ipv4(struct nx_info *, void __user *);
++extern int vc_get_match_ipv4(struct nx_info *, void __user *);
++
++extern int vc_add_match_ipv6(struct nx_info *, void __user *);
++extern int vc_get_match_ipv6(struct nx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++
++/* flag commands */
++
++#define VCMD_get_nflags		VC_CMD(FLAGS, 5, 0)
++#define VCMD_set_nflags		VC_CMD(FLAGS, 6, 0)
++
++struct	vcmd_net_flags_v0 {
++	uint64_t flagword;
++	uint64_t mask;
++};
++
++#ifdef	__KERNEL__
++extern int vc_get_nflags(struct nx_info *, void __user *);
++extern int vc_set_nflags(struct nx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++
++/* network caps commands */
++
++#define VCMD_get_ncaps		VC_CMD(FLAGS, 7, 0)
++#define VCMD_set_ncaps		VC_CMD(FLAGS, 8, 0)
++
++struct	vcmd_net_caps_v0 {
++	uint64_t ncaps;
++	uint64_t cmask;
++};
++
++#ifdef	__KERNEL__
++extern int vc_get_ncaps(struct nx_info *, void __user *);
++extern int vc_set_ncaps(struct nx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_CONTEXT_CMD_H */
+--- a/include/linux/vserver/percpu.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/percpu.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,14 @@
++#ifndef _VX_PERCPU_H
++#define _VX_PERCPU_H
++
++#include "cvirt_def.h"
++#include "sched_def.h"
++
++struct	_vx_percpu {
++	struct _vx_cvirt_pc cvirt;
++	struct _vx_sched_pc sched;
++};
++
++#define	PERCPU_PERCTX	(sizeof(struct _vx_percpu))
++
++#endif	/* _VX_PERCPU_H */
+--- a/include/linux/vserver/pid.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/pid.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,51 @@
++#ifndef _VSERVER_PID_H
++#define _VSERVER_PID_H
++
++/* pid faking stuff */
++
++#define vx_info_map_pid(v, p) \
++	__vx_info_map_pid((v), (p), __func__, __FILE__, __LINE__)
++#define vx_info_map_tgid(v,p)  vx_info_map_pid(v,p)
++#define vx_map_pid(p) vx_info_map_pid(current_vx_info(), p)
++#define vx_map_tgid(p) vx_map_pid(p)
++
++static inline int __vx_info_map_pid(struct vx_info *vxi, int pid,
++	const char *func, const char *file, int line)
++{
++	if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) {
++		vxfprintk(VXD_CBIT(cvirt, 2),
++			"vx_map_tgid: %p/%llx: %d -> %d",
++			vxi, (long long)vxi->vx_flags, pid,
++			(pid && pid == vxi->vx_initpid) ? 1 : pid,
++			func, file, line);
++		if (pid == 0)
++			return 0;
++		if (pid == vxi->vx_initpid)
++			return 1;
++	}
++	return pid;
++}
++
++#define vx_info_rmap_pid(v, p) \
++	__vx_info_rmap_pid((v), (p), __func__, __FILE__, __LINE__)
++#define vx_rmap_pid(p) vx_info_rmap_pid(current_vx_info(), p)
++#define vx_rmap_tgid(p) vx_rmap_pid(p)
++
++static inline int __vx_info_rmap_pid(struct vx_info *vxi, int pid,
++	const char *func, const char *file, int line)
++{
++	if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) {
++		vxfprintk(VXD_CBIT(cvirt, 2),
++			"vx_rmap_tgid: %p/%llx: %d -> %d",
++			vxi, (long long)vxi->vx_flags, pid,
++			(pid == 1) ? vxi->vx_initpid : pid,
++			func, file, line);
++		if ((pid == 1) && vxi->vx_initpid)
++			return vxi->vx_initpid;
++		if (pid == vxi->vx_initpid)
++			return ~0U;
++	}
++	return pid;
++}
++
++#endif
+--- a/include/linux/vserver/sched.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/sched.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,26 @@
++#ifndef _VX_SCHED_H
++#define _VX_SCHED_H
++
++
++#ifdef	__KERNEL__
++
++struct timespec;
++
++void vx_vsi_uptime(struct timespec *, struct timespec *);
++
++
++struct vx_info;
++
++void vx_update_load(struct vx_info *);
++
++
++int vx_tokens_recalc(struct _vx_sched_pc *,
++	unsigned long *, unsigned long *, int [2]);
++
++void vx_update_sched_param(struct _vx_sched *sched,
++	struct _vx_sched_pc *sched_pc);
++
++#endif	/* __KERNEL__ */
++#else	/* _VX_SCHED_H */
++#warning duplicate inclusion
++#endif	/* _VX_SCHED_H */
+--- a/include/linux/vserver/sched_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/sched_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,108 @@
++#ifndef _VX_SCHED_CMD_H
++#define _VX_SCHED_CMD_H
++
++
++/*  sched vserver commands */
++
++#define VCMD_set_sched_v2	VC_CMD(SCHED, 1, 2)
++#define VCMD_set_sched_v3	VC_CMD(SCHED, 1, 3)
++#define VCMD_set_sched_v4	VC_CMD(SCHED, 1, 4)
++
++struct	vcmd_set_sched_v2 {
++	int32_t fill_rate;
++	int32_t interval;
++	int32_t tokens;
++	int32_t tokens_min;
++	int32_t tokens_max;
++	uint64_t cpu_mask;
++};
++
++struct	vcmd_set_sched_v3 {
++	uint32_t set_mask;
++	int32_t fill_rate;
++	int32_t interval;
++	int32_t tokens;
++	int32_t tokens_min;
++	int32_t tokens_max;
++	int32_t priority_bias;
++};
++
++struct	vcmd_set_sched_v4 {
++	uint32_t set_mask;
++	int32_t fill_rate;
++	int32_t interval;
++	int32_t tokens;
++	int32_t tokens_min;
++	int32_t tokens_max;
++	int32_t prio_bias;
++	int32_t cpu_id;
++	int32_t bucket_id;
++};
++
++#define VCMD_set_sched		VC_CMD(SCHED, 1, 5)
++#define VCMD_get_sched		VC_CMD(SCHED, 2, 5)
++
++struct	vcmd_sched_v5 {
++	uint32_t mask;
++	int32_t cpu_id;
++	int32_t bucket_id;
++	int32_t fill_rate[2];
++	int32_t interval[2];
++	int32_t tokens;
++	int32_t tokens_min;
++	int32_t tokens_max;
++	int32_t prio_bias;
++};
++
++#define VXSM_FILL_RATE		0x0001
++#define VXSM_INTERVAL		0x0002
++#define VXSM_FILL_RATE2		0x0004
++#define VXSM_INTERVAL2		0x0008
++#define VXSM_TOKENS		0x0010
++#define VXSM_TOKENS_MIN		0x0020
++#define VXSM_TOKENS_MAX		0x0040
++#define VXSM_PRIO_BIAS		0x0100
++
++#define VXSM_IDLE_TIME		0x0200
++#define VXSM_FORCE		0x0400
++
++#define	VXSM_V3_MASK		0x0173
++#define	VXSM_SET_MASK		0x01FF
++
++#define VXSM_CPU_ID		0x1000
++#define VXSM_BUCKET_ID		0x2000
++
++#define VXSM_MSEC		0x4000
++
++#define SCHED_KEEP		(-2)	/* only for v2 */
++
++#ifdef	__KERNEL__
++
++#include <linux/compiler.h>
++
++extern int vc_set_sched_v2(struct vx_info *, void __user *);
++extern int vc_set_sched_v3(struct vx_info *, void __user *);
++extern int vc_set_sched_v4(struct vx_info *, void __user *);
++extern int vc_set_sched(struct vx_info *, void __user *);
++extern int vc_get_sched(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++#define VCMD_sched_info		VC_CMD(SCHED, 3, 0)
++
++struct	vcmd_sched_info {
++	int32_t cpu_id;
++	int32_t bucket_id;
++	uint64_t user_msec;
++	uint64_t sys_msec;
++	uint64_t hold_msec;
++	uint32_t token_usec;
++	int32_t vavavoom;
++};
++
++#ifdef	__KERNEL__
++
++extern int vc_sched_info(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_SCHED_CMD_H */
+--- a/include/linux/vserver/sched_def.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/sched_def.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,68 @@
++#ifndef _VX_SCHED_DEF_H
++#define _VX_SCHED_DEF_H
++
++#include <linux/spinlock.h>
++#include <linux/jiffies.h>
++#include <linux/cpumask.h>
++#include <asm/atomic.h>
++#include <asm/param.h>
++
++
++/* context sub struct */
++
++struct _vx_sched {
++	spinlock_t tokens_lock;		/* lock for token bucket */
++
++	int tokens;			/* number of CPU tokens */
++	int fill_rate[2];		/* Fill rate: add X tokens... */
++	int interval[2];		/* Divisor:   per Y jiffies   */
++	int tokens_min;			/* Limit:     minimum for unhold */
++	int tokens_max;			/* Limit:     no more than N tokens */
++
++	int prio_bias;			/* bias offset for priority */
++
++	unsigned update_mask;		/* which features should be updated */
++	cpumask_t update;		/* CPUs which should update */
++};
++
++struct _vx_sched_pc {
++	int tokens;			/* number of CPU tokens */
++	int flags;			/* bucket flags */
++
++	int fill_rate[2];		/* Fill rate: add X tokens... */
++	int interval[2];		/* Divisor:   per Y jiffies   */
++	int tokens_min;			/* Limit:     minimum for unhold */
++	int tokens_max;			/* Limit:     no more than N tokens */
++
++	int prio_bias;			/* bias offset for priority */
++	int vavavoom;			/* last calculated vavavoom */
++
++	unsigned long norm_time;	/* last time accounted */
++	unsigned long idle_time;	/* non linear time for fair sched */
++	unsigned long token_time;	/* token time for accounting */
++	unsigned long onhold;		/* jiffies when put on hold */
++
++	uint64_t user_ticks;		/* token tick events */
++	uint64_t sys_ticks;		/* token tick events */
++	uint64_t hold_ticks;		/* token ticks paused */
++};
++
++
++#define VXSF_ONHOLD	0x0001
++#define VXSF_IDLE_TIME	0x0100
++
++#ifdef CONFIG_VSERVER_DEBUG
++
++static inline void __dump_vx_sched(struct _vx_sched *sched)
++{
++	printk("\t_vx_sched:\n");
++	printk("\t tokens: %4d/%4d, %4d/%4d, %4d, %4d\n",
++		sched->fill_rate[0], sched->interval[0],
++		sched->fill_rate[1], sched->interval[1],
++		sched->tokens_min, sched->tokens_max);
++	printk("\t priority = %4d\n", sched->prio_bias);
++}
++
++#endif
++
++#endif	/* _VX_SCHED_DEF_H */
+--- a/include/linux/vserver/signal.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/signal.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,14 @@
++#ifndef _VX_SIGNAL_H
++#define _VX_SIGNAL_H
++
++
++#ifdef	__KERNEL__
++
++struct vx_info;
++
++int vx_info_kill(struct vx_info *, int, int);
++
++#endif	/* __KERNEL__ */
++#else	/* _VX_SIGNAL_H */
++#warning duplicate inclusion
++#endif	/* _VX_SIGNAL_H */
+--- a/include/linux/vserver/signal_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/signal_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,43 @@
++#ifndef _VX_SIGNAL_CMD_H
++#define _VX_SIGNAL_CMD_H
++
++
++/*  signalling vserver commands */
++
++#define VCMD_ctx_kill		VC_CMD(PROCTRL, 1, 0)
++#define VCMD_wait_exit		VC_CMD(EVENT, 99, 0)
++
++struct	vcmd_ctx_kill_v0 {
++	int32_t pid;
++	int32_t sig;
++};
++
++struct	vcmd_wait_exit_v0 {
++	int32_t reboot_cmd;
++	int32_t exit_code;
++};
++
++#ifdef	__KERNEL__
++
++extern int vc_ctx_kill(struct vx_info *, void __user *);
++extern int vc_wait_exit(struct vx_info *, void __user *);
++
++#endif	/* __KERNEL__ */
++
++/*  process alteration commands */
++
++#define VCMD_get_pflags		VC_CMD(PROCALT, 5, 0)
++#define VCMD_set_pflags		VC_CMD(PROCALT, 6, 0)
++
++struct	vcmd_pflags_v0 {
++	uint32_t flagword;
++	uint32_t mask;
++};
++
++#ifdef	__KERNEL__
++
++extern int vc_get_pflags(uint32_t pid, void __user *);
++extern int vc_set_pflags(uint32_t pid, void __user *);
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_SIGNAL_CMD_H */
+--- a/include/linux/vserver/space.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/space.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,12 @@
++#ifndef _VX_SPACE_H
++#define _VX_SPACE_H
++
++#include <linux/types.h>
++
++struct vx_info;
++
++int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index);
++
++#else	/* _VX_SPACE_H */
++#warning duplicate inclusion
++#endif	/* _VX_SPACE_H */
+--- a/include/linux/vserver/space_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/space_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,38 @@
++#ifndef _VX_SPACE_CMD_H
++#define _VX_SPACE_CMD_H
++
++
++#define VCMD_enter_space_v0	VC_CMD(PROCALT, 1, 0)
++#define VCMD_enter_space_v1	VC_CMD(PROCALT, 1, 1)
++#define VCMD_enter_space	VC_CMD(PROCALT, 1, 2)
++
++#define VCMD_set_space_v0	VC_CMD(PROCALT, 3, 0)
++#define VCMD_set_space_v1	VC_CMD(PROCALT, 3, 1)
++#define VCMD_set_space		VC_CMD(PROCALT, 3, 2)
++
++#define VCMD_get_space_mask_v0	VC_CMD(PROCALT, 4, 0)
++
++#define VCMD_get_space_mask	VC_CMD(VSPACE, 0, 1)
++#define VCMD_get_space_default	VC_CMD(VSPACE, 1, 0)
++
++
++struct	vcmd_space_mask_v1 {
++	uint64_t mask;
++};
++
++struct	vcmd_space_mask_v2 {
++	uint64_t mask;
++	uint32_t index;
++};
++
++
++#ifdef	__KERNEL__
++
++extern int vc_enter_space_v1(struct vx_info *, void __user *);
++extern int vc_set_space_v1(struct vx_info *, void __user *);
++extern int vc_enter_space(struct vx_info *, void __user *);
++extern int vc_set_space(struct vx_info *, void __user *);
++extern int vc_get_space_mask(void __user *, int);
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_SPACE_CMD_H */
+--- a/include/linux/vserver/switch.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/switch.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,98 @@
++#ifndef _VX_SWITCH_H
++#define _VX_SWITCH_H
++
++#include <linux/types.h>
++
++
++#define VC_CATEGORY(c)		(((c) >> 24) & 0x3F)
++#define VC_COMMAND(c)		(((c) >> 16) & 0xFF)
++#define VC_VERSION(c)		((c) & 0xFFF)
++
++#define VC_CMD(c, i, v)		((((VC_CAT_ ## c) & 0x3F) << 24) \
++				| (((i) & 0xFF) << 16) | ((v) & 0xFFF))
++
++/*
++
++  Syscall Matrix V2.8
++
++	 |VERSION|CREATE |MODIFY |MIGRATE|CONTROL|EXPERIM| |SPECIAL|SPECIAL|
++	 |STATS  |DESTROY|ALTER  |CHANGE |LIMIT  |TEST   | |       |       |
++	 |INFO   |SETUP  |       |MOVE   |       |       | |       |       |
++  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++  SYSTEM |VERSION|VSETUP |VHOST  |       |       |       | |DEVICE |       |
++  HOST   |     00|     01|     02|     03|     04|     05| |     06|     07|
++  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++  CPU    |       |VPROC  |PROCALT|PROCMIG|PROCTRL|       | |SCHED. |       |
++  PROCESS|     08|     09|     10|     11|     12|     13| |     14|     15|
++  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++  MEMORY |       |       |       |       |MEMCTRL|       | |SWAP   |       |
++	 |     16|     17|     18|     19|     20|     21| |     22|     23|
++  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++  NETWORK|       |VNET   |NETALT |NETMIG |NETCTL |       | |SERIAL |       |
++	 |     24|     25|     26|     27|     28|     29| |     30|     31|
++  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++  DISK   |       |       |       |TAGMIG |DLIMIT |       | |INODE  |       |
++  VFS    |     32|     33|     34|     35|     36|     37| |     38|     39|
++  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++  OTHER  |VSTAT  |       |       |       |       |       | |VINFO  |       |
++	 |     40|     41|     42|     43|     44|     45| |     46|     47|
++  =======+=======+=======+=======+=======+=======+=======+ +=======+=======+
++  SPECIAL|EVENT  |       |       |       |FLAGS  |       | |VSPACE |       |
++	 |     48|     49|     50|     51|     52|     53| |     54|     55|
++  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++  SPECIAL|DEBUG  |       |       |       |RLIMIT |SYSCALL| |       |COMPAT |
++	 |     56|     57|     58|     59|     60|TEST 61| |     62|     63|
++  -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
++
++*/
++
++#define VC_CAT_VERSION		0
++
++#define VC_CAT_VSETUP		1
++#define VC_CAT_VHOST		2
++
++#define VC_CAT_DEVICE		6
++
++#define VC_CAT_VPROC		9
++#define VC_CAT_PROCALT		10
++#define VC_CAT_PROCMIG		11
++#define VC_CAT_PROCTRL		12
++
++#define VC_CAT_SCHED		14
++#define VC_CAT_MEMCTRL		20
++
++#define VC_CAT_VNET		25
++#define VC_CAT_NETALT		26
++#define VC_CAT_NETMIG		27
++#define VC_CAT_NETCTRL		28
++
++#define VC_CAT_TAGMIG		35
++#define VC_CAT_DLIMIT		36
++#define VC_CAT_INODE		38
++
++#define VC_CAT_VSTAT		40
++#define VC_CAT_VINFO		46
++#define VC_CAT_EVENT		48
++
++#define VC_CAT_FLAGS		52
++#define VC_CAT_VSPACE		54
++#define VC_CAT_DEBUG		56
++#define VC_CAT_RLIMIT		60
++
++#define VC_CAT_SYSTEST		61
++#define VC_CAT_COMPAT		63
++
++/*  query version */
++
++#define VCMD_get_version	VC_CMD(VERSION, 0, 0)
++#define VCMD_get_vci		VC_CMD(VERSION, 1, 0)
++
++
++#ifdef	__KERNEL__
++
++#include <linux/errno.h>
++
++#endif	/* __KERNEL__ */
++
++#endif	/* _VX_SWITCH_H */
++
+--- a/include/linux/vserver/tag.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/tag.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,143 @@
++#ifndef _DX_TAG_H
++#define _DX_TAG_H
++
++#include <linux/types.h>
++
++
++#define DX_TAG(in)	(IS_TAGGED(in))
++
++
++#ifdef CONFIG_TAG_NFSD
++#define DX_TAG_NFSD	1
++#else
++#define DX_TAG_NFSD	0
++#endif
++
++
++#ifdef CONFIG_TAGGING_NONE
++
++#define MAX_UID		0xFFFFFFFF
++#define MAX_GID		0xFFFFFFFF
++
++#define INOTAG_TAG(cond, uid, gid, tag)	(0)
++
++#define TAGINO_UID(cond, uid, tag)	(uid)
++#define TAGINO_GID(cond, gid, tag)	(gid)
++
++#endif
++
++
++#ifdef CONFIG_TAGGING_GID16
++
++#define MAX_UID		0xFFFFFFFF
++#define MAX_GID		0x0000FFFF
++
++#define INOTAG_TAG(cond, uid, gid, tag)	\
++	((cond) ? (((gid) >> 16) & 0xFFFF) : 0)
++
++#define TAGINO_UID(cond, uid, tag)	(uid)
++#define TAGINO_GID(cond, gid, tag)	\
++	((cond) ? (((gid) & 0xFFFF) | ((tag) << 16)) : (gid))
++
++#endif
++
++
++#ifdef CONFIG_TAGGING_ID24
++
++#define MAX_UID		0x00FFFFFF
++#define MAX_GID		0x00FFFFFF
++
++#define INOTAG_TAG(cond, uid, gid, tag)	\
++	((cond) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0)
++
++#define TAGINO_UID(cond, uid, tag)	\
++	((cond) ? (((uid) & 0xFFFFFF) | (((tag) & 0xFF00) << 16)) : (uid))
++#define TAGINO_GID(cond, gid, tag)	\
++	((cond) ? (((gid) & 0xFFFFFF) | (((tag) & 0x00FF) << 24)) : (gid))
++
++#endif
++
++
++#ifdef CONFIG_TAGGING_UID16
++
++#define MAX_UID		0x0000FFFF
++#define MAX_GID		0xFFFFFFFF
++
++#define INOTAG_TAG(cond, uid, gid, tag)	\
++	((cond) ? (((uid) >> 16) & 0xFFFF) : 0)
++
++#define TAGINO_UID(cond, uid, tag)	\
++	((cond) ? (((uid) & 0xFFFF) | ((tag) << 16)) : (uid))
++#define TAGINO_GID(cond, gid, tag)	(gid)
++
++#endif
++
++
++#ifdef CONFIG_TAGGING_INTERN
++
++#define MAX_UID		0xFFFFFFFF
++#define MAX_GID		0xFFFFFFFF
++
++#define INOTAG_TAG(cond, uid, gid, tag)	\
++	((cond) ? (tag) : 0)
++
++#define TAGINO_UID(cond, uid, tag)	(uid)
++#define TAGINO_GID(cond, gid, tag)	(gid)
++
++#endif
++
++
++#ifndef CONFIG_TAGGING_NONE
++#define dx_current_fstag(sb)	\
++	((sb)->s_flags & MS_TAGGED ? dx_current_tag() : 0)
++#else
++#define dx_current_fstag(sb)	(0)
++#endif
++
++#ifndef CONFIG_TAGGING_INTERN
++#define TAGINO_TAG(cond, tag)	(0)
++#else
++#define TAGINO_TAG(cond, tag)	((cond) ? (tag) : 0)
++#endif
++
++#define INOTAG_UID(cond, uid, gid)	\
++	((cond) ? ((uid) & MAX_UID) : (uid))
++#define INOTAG_GID(cond, uid, gid)	\
++	((cond) ? ((gid) & MAX_GID) : (gid))
++
++
++static inline uid_t dx_map_uid(uid_t uid)
++{
++	if ((uid > MAX_UID) && (uid != -1))
++		uid = -2;
++	return (uid & MAX_UID);
++}
++
++static inline gid_t dx_map_gid(gid_t gid)
++{
++	if ((gid > MAX_GID) && (gid != -1))
++		gid = -2;
++	return (gid & MAX_GID);
++}
++
++struct peer_tag {
++	int32_t xid;
++	int32_t nid;
++};
++
++#define dx_notagcheck(sb) ((sb) && ((sb)->s_flags & MS_NOTAGCHECK))
++
++int dx_parse_tag(char *string, tag_t *tag, int remove, int *mnt_flags,
++		 unsigned long *flags);
++
++#ifdef	CONFIG_PROPAGATE
++
++void __dx_propagate_tag(struct nameidata *nd, struct inode *inode);
++
++#define dx_propagate_tag(n, i)	__dx_propagate_tag(n, i)
++
++#else
++#define dx_propagate_tag(n, i)	do { } while (0)
++#endif
++
++#endif /* _DX_TAG_H */
+--- a/include/linux/vserver/tag_cmd.h	1970-01-01 01:00:00.000000000 +0100
++++ a/include/linux/vserver/tag_cmd.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,22 @@
++#ifndef _VX_TAG_CMD_H
++#define _VX_TAG_CMD_H
++
++
++/* vinfo commands */
++
++#define VCMD_task_tag		VC_CMD(VINFO, 3, 0)
++
++#ifdef	__KERNEL__
++extern int vc_task_tag(uint32_t);
++
++#endif	/* __KERNEL__ */
++
++/* context commands */
++
++#define VCMD_tag_migrate	VC_CMD(TAGMIG, 1, 0)
++
++#ifdef	__KERNEL__
++extern int vc_tag_migrate(uint32_t);
++
++#endif	/* __KERNEL__ */
++#endif	/* _VX_TAG_CMD_H */
+--- a/include/net/addrconf.h	2009-12-03 20:02:57.000000000 +0100
++++ a/include/net/addrconf.h	2011-06-10 13:03:02.000000000 +0200
+@@ -84,7 +84,8 @@ extern int			ipv6_dev_get_saddr(struct n
+ 					       struct net_device *dev,
+ 					       const struct in6_addr *daddr,
+ 					       unsigned int srcprefs,
+-					       struct in6_addr *saddr);
++					       struct in6_addr *saddr,
++					       struct nx_info *nxi);
+ extern int			ipv6_get_lladdr(struct net_device *dev,
+ 						struct in6_addr *addr,
+ 						unsigned char banned_flags);
+--- a/include/net/af_unix.h	2011-05-29 23:42:28.000000000 +0200
++++ a/include/net/af_unix.h	2011-06-10 13:03:02.000000000 +0200
+@@ -4,6 +4,7 @@
+ #include <linux/socket.h>
+ #include <linux/un.h>
+ #include <linux/mutex.h>
++#include <linux/vs_base.h>
+ #include <net/sock.h>
+ 
+ extern void unix_inflight(struct file *fp);
+--- a/include/net/inet_timewait_sock.h	2009-12-03 20:02:57.000000000 +0100
++++ a/include/net/inet_timewait_sock.h	2011-06-10 13:03:02.000000000 +0200
+@@ -117,6 +117,10 @@ struct inet_timewait_sock {
+ #define tw_hash			__tw_common.skc_hash
+ #define tw_prot			__tw_common.skc_prot
+ #define tw_net			__tw_common.skc_net
++#define tw_xid			__tw_common.skc_xid
++#define tw_vx_info		__tw_common.skc_vx_info
++#define tw_nid			__tw_common.skc_nid
++#define tw_nx_info		__tw_common.skc_nx_info
+ 	int			tw_timeout;
+ 	volatile unsigned char	tw_substate;
+ 	/* 3 bits hole, try to pack */
+--- a/include/net/route.h	2009-09-10 15:26:27.000000000 +0200
++++ a/include/net/route.h	2011-06-10 13:03:02.000000000 +0200
+@@ -135,6 +135,9 @@ static inline void ip_rt_put(struct rtab
+ 		dst_release(&rt->u.dst);
+ }
+ 
++#include <linux/vs_base.h>
++#include <linux/vs_inet.h>
++
+ #define IPTOS_RT_MASK	(IPTOS_TOS_MASK & ~3)
+ 
+ extern const __u8 ip_tos2prio[16];
+@@ -144,6 +147,9 @@ static inline char rt_tos2priority(u8 to
+ 	return ip_tos2prio[IPTOS_TOS(tos)>>1];
+ }
+ 
++extern int ip_v4_find_src(struct net *net, struct nx_info *,
++	struct rtable **, struct flowi *);
++
+ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
+ 				   __be32 src, u32 tos, int oif, u8 protocol,
+ 				   __be16 sport, __be16 dport, struct sock *sk,
+@@ -161,11 +167,24 @@ static inline int ip_route_connect(struc
+ 
+ 	int err;
+ 	struct net *net = sock_net(sk);
++	struct nx_info *nx_info = current_nx_info();
+ 
+ 	if (inet_sk(sk)->transparent)
+ 		fl.flags |= FLOWI_FLAG_ANYSRC;
+ 
+-	if (!dst || !src) {
++	if (sk)
++		nx_info = sk->sk_nx_info;
++
++	vxdprintk(VXD_CBIT(net, 4),
++		"ip_route_connect(%p) %p,%p;%lx",
++		sk, nx_info, sk->sk_socket,
++		(sk->sk_socket?sk->sk_socket->flags:0));
++
++	err = ip_v4_find_src(net, nx_info, rp, &fl);
++	if (err)
++		return err;
++
++	if (!fl.fl4_dst || !fl.fl4_src) {
+ 		err = __ip_route_output_key(net, rp, &fl);
+ 		if (err)
+ 			return err;
+--- a/include/net/sock.h	2009-12-03 20:02:57.000000000 +0100
++++ a/include/net/sock.h	2011-06-10 13:03:02.000000000 +0200
+@@ -139,6 +139,10 @@ struct sock_common {
+ #ifdef CONFIG_NET_NS
+ 	struct net	 	*skc_net;
+ #endif
++	xid_t			skc_xid;
++	struct vx_info		*skc_vx_info;
++	nid_t			skc_nid;
++	struct nx_info		*skc_nx_info;
+ };
+ 
+ /**
+@@ -225,6 +229,10 @@ struct sock {
+ #define sk_bind_node		__sk_common.skc_bind_node
+ #define sk_prot			__sk_common.skc_prot
+ #define sk_net			__sk_common.skc_net
++#define sk_xid			__sk_common.skc_xid
++#define sk_vx_info		__sk_common.skc_vx_info
++#define sk_nid			__sk_common.skc_nid
++#define sk_nx_info		__sk_common.skc_nx_info
+ 	kmemcheck_bitfield_begin(flags);
+ 	unsigned int		sk_shutdown  : 2,
+ 				sk_no_check  : 2,
+--- a/init/Kconfig	2011-05-29 23:42:28.000000000 +0200
++++ a/init/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -426,6 +426,19 @@ config LOG_BUF_SHIFT
+ config HAVE_UNSTABLE_SCHED_CLOCK
+ 	bool
+ 
++config CFS_HARD_LIMITS
++	bool "Hard Limits for CFS Group Scheduler"
++	depends on EXPERIMENTAL
++	depends on FAIR_GROUP_SCHED && CGROUP_SCHED
++	default n
++	help
++	  This option enables hard limiting of CPU time obtained by
++	  a fair task group. Use this if you want to throttle a group of tasks
++	  based on its CPU usage. For more details refer to
++	  Documentation/scheduler/sched-cfs-hard-limits.txt
++
++	  Say N if unsure.
++
+ menuconfig CGROUPS
+ 	boolean "Control Group support"
+ 	help
+--- a/init/main.c	2011-05-29 23:42:28.000000000 +0200
++++ a/init/main.c	2011-06-10 13:03:02.000000000 +0200
+@@ -70,6 +70,7 @@
+ #include <linux/sfi.h>
+ #include <linux/shmem_fs.h>
+ #include <trace/boot.h>
++#include <linux/vserver/percpu.h>
+ 
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+--- a/ipc/mqueue.c	2011-05-29 23:42:28.000000000 +0200
++++ a/ipc/mqueue.c	2011-06-10 13:03:02.000000000 +0200
+@@ -33,6 +33,8 @@
+ #include <linux/pid.h>
+ #include <linux/ipc_namespace.h>
+ #include <linux/ima.h>
++#include <linux/vs_context.h>
++#include <linux/vs_limit.h>
+ 
+ #include <net/sock.h>
+ #include "util.h"
+@@ -66,6 +68,7 @@ struct mqueue_inode_info {
+ 	struct sigevent notify;
+ 	struct pid* notify_owner;
+ 	struct user_struct *user;	/* user who created, for accounting */
++	struct vx_info *vxi;
+ 	struct sock *notify_sock;
+ 	struct sk_buff *notify_cookie;
+ 
+@@ -125,6 +128,7 @@ static struct inode *mqueue_get_inode(st
+ 		if (S_ISREG(mode)) {
+ 			struct mqueue_inode_info *info;
+ 			struct task_struct *p = current;
++			struct vx_info *vxi = p->vx_info;
+ 			unsigned long mq_bytes, mq_msg_tblsz;
+ 
+ 			inode->i_fop = &mqueue_file_operations;
+@@ -139,6 +143,7 @@ static struct inode *mqueue_get_inode(st
+ 			info->notify_owner = NULL;
+ 			info->qsize = 0;
+ 			info->user = NULL;	/* set when all is ok */
++			info->vxi = NULL;
+ 			memset(&info->attr, 0, sizeof(info->attr));
+ 			info->attr.mq_maxmsg = ipc_ns->mq_msg_max;
+ 			info->attr.mq_msgsize = ipc_ns->mq_msgsize_max;
+@@ -153,22 +158,26 @@ static struct inode *mqueue_get_inode(st
+ 			spin_lock(&mq_lock);
+ 			if (u->mq_bytes + mq_bytes < u->mq_bytes ||
+ 		 	    u->mq_bytes + mq_bytes >
+-			    p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
++			    p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur ||
++			    !vx_ipcmsg_avail(vxi, mq_bytes)) {
+ 				spin_unlock(&mq_lock);
+ 				goto out_inode;
+ 			}
+ 			u->mq_bytes += mq_bytes;
++			vx_ipcmsg_add(vxi, u, mq_bytes);
+ 			spin_unlock(&mq_lock);
+ 
+ 			info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
+ 			if (!info->messages) {
+ 				spin_lock(&mq_lock);
+ 				u->mq_bytes -= mq_bytes;
++				vx_ipcmsg_sub(vxi, u, mq_bytes);
+ 				spin_unlock(&mq_lock);
+ 				goto out_inode;
+ 			}
+ 			/* all is ok */
+ 			info->user = get_uid(u);
++			info->vxi = get_vx_info(vxi);
+ 		} else if (S_ISDIR(mode)) {
+ 			inc_nlink(inode);
+ 			/* Some things misbehave if size == 0 on a directory */
+@@ -269,8 +278,11 @@ static void mqueue_delete_inode(struct i
+ 		   (info->attr.mq_maxmsg * info->attr.mq_msgsize));
+ 	user = info->user;
+ 	if (user) {
++		struct vx_info *vxi = info->vxi;
++
+ 		spin_lock(&mq_lock);
+ 		user->mq_bytes -= mq_bytes;
++		vx_ipcmsg_sub(vxi, user, mq_bytes);
+ 		/*
+ 		 * get_ns_from_inode() ensures that the
+ 		 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
+@@ -280,6 +292,7 @@ static void mqueue_delete_inode(struct i
+ 		if (ipc_ns)
+ 			ipc_ns->mq_queues_count--;
+ 		spin_unlock(&mq_lock);
++		put_vx_info(vxi);
+ 		free_uid(user);
+ 	}
+ 	if (ipc_ns)
+--- a/ipc/msg.c	2011-05-29 23:42:28.000000000 +0200
++++ a/ipc/msg.c	2011-06-10 13:03:02.000000000 +0200
+@@ -38,6 +38,7 @@
+ #include <linux/rwsem.h>
+ #include <linux/nsproxy.h>
+ #include <linux/ipc_namespace.h>
++#include <linux/vs_base.h>
+ 
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+@@ -191,6 +192,7 @@ static int newque(struct ipc_namespace *
+ 
+ 	msq->q_perm.mode = msgflg & S_IRWXUGO;
+ 	msq->q_perm.key = key;
++	msq->q_perm.xid = vx_current_xid();
+ 
+ 	msq->q_perm.security = NULL;
+ 	retval = security_msg_queue_alloc(msq);
+--- a/ipc/namespace.c	2009-09-10 15:26:27.000000000 +0200
++++ a/ipc/namespace.c	2011-06-10 13:03:02.000000000 +0200
+@@ -11,6 +11,8 @@
+ #include <linux/slab.h>
+ #include <linux/fs.h>
+ #include <linux/mount.h>
++#include <linux/vs_base.h>
++#include <linux/vserver/global.h>
+ 
+ #include "util.h"
+ 
+--- a/ipc/sem.c	2011-05-29 23:42:28.000000000 +0200
++++ a/ipc/sem.c	2011-06-10 13:03:02.000000000 +0200
+@@ -83,6 +83,8 @@
+ #include <linux/rwsem.h>
+ #include <linux/nsproxy.h>
+ #include <linux/ipc_namespace.h>
++#include <linux/vs_base.h>
++#include <linux/vs_limit.h>
+ 
+ #include <asm/uaccess.h>
+ #include "util.h"
+@@ -256,6 +258,7 @@ static int newary(struct ipc_namespace *
+ 
+ 	sma->sem_perm.mode = (semflg & S_IRWXUGO);
+ 	sma->sem_perm.key = key;
++	sma->sem_perm.xid = vx_current_xid();
+ 
+ 	sma->sem_perm.security = NULL;
+ 	retval = security_sem_alloc(sma);
+@@ -271,6 +274,9 @@ static int newary(struct ipc_namespace *
+ 		return id;
+ 	}
+ 	ns->used_sems += nsems;
++	/* FIXME: obsoleted? */
++	vx_semary_inc(sma);
++	vx_nsems_add(sma, nsems);
+ 
+ 	sma->sem_base = (struct sem *) &sma[1];
+ 	INIT_LIST_HEAD(&sma->sem_pending);
+@@ -547,6 +553,9 @@ static void freeary(struct ipc_namespace
+ 	sem_unlock(sma);
+ 
+ 	ns->used_sems -= sma->sem_nsems;
++	/* FIXME: obsoleted? */
++	vx_nsems_sub(sma, sma->sem_nsems);
++	vx_semary_dec(sma);
+ 	security_sem_free(sma);
+ 	ipc_rcu_putref(sma);
+ }
+--- a/ipc/shm.c	2011-05-29 23:42:28.000000000 +0200
++++ a/ipc/shm.c	2011-06-10 13:03:02.000000000 +0200
+@@ -40,6 +40,8 @@
+ #include <linux/mount.h>
+ #include <linux/ipc_namespace.h>
+ #include <linux/ima.h>
++#include <linux/vs_context.h>
++#include <linux/vs_limit.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -170,7 +172,12 @@ static void shm_open(struct vm_area_stru
+  */
+ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
+ {
+-	ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	struct vx_info *vxi = lookup_vx_info(shp->shm_perm.xid);
++	int numpages = (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++	vx_ipcshm_sub(vxi, shp, numpages);
++	ns->shm_tot -= numpages;
++
+ 	shm_rmid(ns, shp);
+ 	shm_unlock(shp);
+ 	if (!is_file_hugepages(shp->shm_file))
+@@ -180,6 +187,7 @@ static void shm_destroy(struct ipc_names
+ 						shp->mlock_user);
+ 	fput (shp->shm_file);
+ 	security_shm_free(shp);
++	put_vx_info(vxi);
+ 	ipc_rcu_putref(shp);
+ }
+ 
+@@ -350,11 +358,15 @@ static int newseg(struct ipc_namespace *
+ 	if (ns->shm_tot + numpages > ns->shm_ctlall)
+ 		return -ENOSPC;
+ 
++	if (!vx_ipcshm_avail(current_vx_info(), numpages))
++		return -ENOSPC;
++
+ 	shp = ipc_rcu_alloc(sizeof(*shp));
+ 	if (!shp)
+ 		return -ENOMEM;
+ 
+ 	shp->shm_perm.key = key;
++	shp->shm_perm.xid = vx_current_xid();
+ 	shp->shm_perm.mode = (shmflg & S_IRWXUGO);
+ 	shp->mlock_user = NULL;
+ 
+@@ -408,6 +420,7 @@ static int newseg(struct ipc_namespace *
+ 	ns->shm_tot += numpages;
+ 	error = shp->shm_perm.id;
+ 	shm_unlock(shp);
++	vx_ipcshm_add(current_vx_info(), key, numpages);
+ 	return error;
+ 
+ no_id:
+--- a/kernel/Makefile	2009-12-03 20:02:57.000000000 +0100
++++ a/kernel/Makefile	2011-06-10 13:03:02.000000000 +0200
+@@ -23,6 +23,7 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
+ CFLAGS_REMOVE_sched_clock.o = -pg
+ endif
+ 
++obj-y += vserver/
+ obj-$(CONFIG_FREEZER) += freezer.o
+ obj-$(CONFIG_PROFILING) += profile.o
+ obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
+--- a/kernel/capability.c	2011-05-29 23:42:28.000000000 +0200
++++ a/kernel/capability.c	2011-06-10 13:03:02.000000000 +0200
+@@ -14,6 +14,7 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/pid_namespace.h>
++#include <linux/vs_context.h>
+ #include <asm/uaccess.h>
+ 
+ /*
+@@ -121,6 +122,7 @@ static int cap_validate_magic(cap_user_h
+ 	return 0;
+ }
+ 
++
+ /*
+  * The only thing that can change the capabilities of the current
+  * process is the current process. As such, we can't be in this code
+@@ -288,6 +290,8 @@ error:
+ 	return ret;
+ }
+ 
++#include <linux/vserver/base.h>
++
+ /**
+  * capable - Determine if the current task has a superior capability in effect
+  * @cap: The capability to be tested for
+@@ -300,6 +304,9 @@ error:
+  */
+ int capable(int cap)
+ {
++	/* here for now so we don't require task locking */
++	if (vs_check_bit(VXC_CAP_MASK, cap) && !vx_mcaps(1L << cap))
++		return 0;
+ 	if (unlikely(!cap_valid(cap))) {
+ 		printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
+ 		BUG();
+--- a/kernel/compat.c	2011-05-29 23:42:28.000000000 +0200
++++ a/kernel/compat.c	2011-06-10 13:03:02.000000000 +0200
+@@ -900,7 +900,7 @@ asmlinkage long compat_sys_time(compat_t
+ 	compat_time_t i;
+ 	struct timeval tv;
+ 
+-	do_gettimeofday(&tv);
++	vx_gettimeofday(&tv);
+ 	i = tv.tv_sec;
+ 
+ 	if (tloc) {
+@@ -925,7 +925,7 @@ asmlinkage long compat_sys_stime(compat_
+ 	if (err)
+ 		return err;
+ 
+-	do_settimeofday(&tv);
++	vx_settimeofday(&tv);
+ 	return 0;
+ }
+ 
+--- a/kernel/exit.c	2011-05-29 23:42:28.000000000 +0200
++++ a/kernel/exit.c	2011-06-10 13:03:02.000000000 +0200
+@@ -48,6 +48,10 @@
+ #include <linux/fs_struct.h>
+ #include <linux/init_task.h>
+ #include <linux/perf_event.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vs_pid.h>
+ #include <trace/events/sched.h>
+ 
+ #include <asm/uaccess.h>
+@@ -495,9 +499,11 @@ static void close_files(struct files_str
+ 					filp_close(file, files);
+ 					cond_resched();
+ 				}
++				vx_openfd_dec(i);
+ 			}
+ 			i++;
+ 			set >>= 1;
++			cond_resched();
+ 		}
+ 	}
+ }
+@@ -1027,11 +1033,16 @@ NORET_TYPE void do_exit(long code)
+ 
+ 	validate_creds_for_do_exit(tsk);
+ 
++	/* needs to stay after exit_notify() */
++	exit_vx_info(tsk, code);
++	exit_nx_info(tsk);
++
+ 	preempt_disable();
+ 	exit_rcu();
+ 	/* causes final put_task_struct in finish_task_switch(). */
+ 	tsk->state = TASK_DEAD;
+ 	schedule();
++	printk("bad task: %p [%lx]\n", current, current->state);
+ 	BUG();
+ 	/* Avoid "noreturn function does return".  */
+ 	for (;;)
+--- a/kernel/fork.c	2011-05-29 23:42:28.000000000 +0200
++++ a/kernel/fork.c	2011-06-10 13:03:02.000000000 +0200
+@@ -64,6 +64,10 @@
+ #include <linux/magic.h>
+ #include <linux/perf_event.h>
+ #include <linux/posix-timers.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
+@@ -151,6 +155,8 @@ void free_task(struct task_struct *tsk)
+ 	account_kernel_stack(tsk->stack, -1);
+ 	free_thread_info(tsk->stack);
+ 	rt_mutex_debug_task_free(tsk);
++	clr_vx_info(&tsk->vx_info);
++	clr_nx_info(&tsk->nx_info);
+ 	ftrace_graph_exit_task(tsk);
+ 	free_task_struct(tsk);
+ }
+@@ -296,6 +302,8 @@ static int dup_mmap(struct mm_struct *mm
+ 	mm->free_area_cache = oldmm->mmap_base;
+ 	mm->cached_hole_size = ~0UL;
+ 	mm->map_count = 0;
++	__set_mm_counter(mm, file_rss, 0);
++	__set_mm_counter(mm, anon_rss, 0);
+ 	cpumask_clear(mm_cpumask(mm));
+ 	mm->mm_rb = RB_ROOT;
+ 	rb_link = &mm->mm_rb.rb_node;
+@@ -311,7 +319,7 @@ static int dup_mmap(struct mm_struct *mm
+ 
+ 		if (mpnt->vm_flags & VM_DONTCOPY) {
+ 			long pages = vma_pages(mpnt);
+-			mm->total_vm -= pages;
++			vx_vmpages_sub(mm, pages);
+ 			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
+ 								-pages);
+ 			continue;
+@@ -455,8 +463,8 @@ static struct mm_struct * mm_init(struct
+ 		(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
+ 	mm->core_state = NULL;
+ 	mm->nr_ptes = 0;
+-	set_mm_counter(mm, file_rss, 0);
+-	set_mm_counter(mm, anon_rss, 0);
++	__set_mm_counter(mm, file_rss, 0);
++	__set_mm_counter(mm, anon_rss, 0);
+ 	spin_lock_init(&mm->page_table_lock);
+ 	mm->free_area_cache = TASK_UNMAPPED_BASE;
+ 	mm->cached_hole_size = ~0UL;
+@@ -466,6 +474,7 @@ static struct mm_struct * mm_init(struct
+ 	if (likely(!mm_alloc_pgd(mm))) {
+ 		mm->def_flags = 0;
+ 		mmu_notifier_mm_init(mm);
++		set_vx_info(&mm->mm_vx_info, p->vx_info);
+ 		return mm;
+ 	}
+ 
+@@ -499,6 +508,7 @@ void __mmdrop(struct mm_struct *mm)
+ 	mm_free_pgd(mm);
+ 	destroy_context(mm);
+ 	mmu_notifier_mm_destroy(mm);
++	clr_vx_info(&mm->mm_vx_info);
+ 	free_mm(mm);
+ }
+ EXPORT_SYMBOL_GPL(__mmdrop);
+@@ -634,6 +644,7 @@ struct mm_struct *dup_mm(struct task_str
+ 		goto fail_nomem;
+ 
+ 	memcpy(mm, oldmm, sizeof(*mm));
++	mm->mm_vx_info = NULL;
+ 
+ 	/* Initializing for Swap token stuff */
+ 	mm->token_priority = 0;
+@@ -672,6 +683,7 @@ fail_nocontext:
+ 	 * If init_new_context() failed, we cannot use mmput() to free the mm
+ 	 * because it calls destroy_context()
+ 	 */
++	clr_vx_info(&mm->mm_vx_info);
+ 	mm_free_pgd(mm);
+ 	free_mm(mm);
+ 	return NULL;
+@@ -986,6 +998,8 @@ static struct task_struct *copy_process(
+ 	int retval;
+ 	struct task_struct *p;
+ 	int cgroup_callbacks_done = 0;
++	struct vx_info *vxi;
++	struct nx_info *nxi;
+ 
+ 	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
+ 		return ERR_PTR(-EINVAL);
+@@ -1032,12 +1046,28 @@ static struct task_struct *copy_process(
+ 	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
+ 	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
+ #endif
++	init_vx_info(&p->vx_info, current_vx_info());
++	init_nx_info(&p->nx_info, current_nx_info());
++
++	/* check vserver memory */
++	if (p->mm && !(clone_flags & CLONE_VM)) {
++		if (vx_vmpages_avail(p->mm, p->mm->total_vm))
++			vx_pages_add(p->vx_info, RLIMIT_AS, p->mm->total_vm);
++		else
++			goto bad_fork_free;
++	}
++	if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
++		if (!vx_rss_avail(p->mm, get_mm_counter(p->mm, file_rss)))
++			goto bad_fork_cleanup_vm;
++	}
+ 	retval = -EAGAIN;
++	if (!vx_nproc_avail(1))
++		goto bad_fork_cleanup_vm;
+ 	if (atomic_read(&p->real_cred->user->processes) >=
+ 			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
+ 		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
+ 		    p->real_cred->user != INIT_USER)
+-			goto bad_fork_free;
++			goto bad_fork_cleanup_vm;
+ 	}
+ 
+ 	retval = copy_creds(p, clone_flags);
+@@ -1289,6 +1319,18 @@ static struct task_struct *copy_process(
+ 
+ 	total_forks++;
+ 	spin_unlock(&current->sighand->siglock);
++
++	/* p is copy of current */
++	vxi = p->vx_info;
++	if (vxi) {
++		claim_vx_info(vxi, p);
++		atomic_inc(&vxi->cvirt.nr_threads);
++		atomic_inc(&vxi->cvirt.total_forks);
++		vx_nproc_inc(p);
++	}
++	nxi = p->nx_info;
++	if (nxi)
++		claim_nx_info(nxi, p);
+ 	write_unlock_irq(&tasklist_lock);
+ 	proc_fork_connector(p);
+ 	cgroup_post_fork(p);
+@@ -1330,6 +1372,9 @@ bad_fork_cleanup_cgroup:
+ bad_fork_cleanup_count:
+ 	atomic_dec(&p->cred->user->processes);
+ 	exit_creds(p);
++bad_fork_cleanup_vm:
++	if (p->mm && !(clone_flags & CLONE_VM))
++		vx_pages_sub(p->vx_info, RLIMIT_AS, p->mm->total_vm);
+ bad_fork_free:
+ 	free_task(p);
+ fork_out:
+--- a/kernel/kthread.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/kthread.c	2011-06-10 13:03:02.000000000 +0200
+@@ -14,6 +14,7 @@
+ #include <linux/file.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
++#include <linux/vs_pid.h>
+ #include <trace/events/sched.h>
+ 
+ static DEFINE_SPINLOCK(kthread_create_lock);
+--- a/kernel/nsproxy.c	2009-09-10 15:26:28.000000000 +0200
++++ a/kernel/nsproxy.c	2011-06-10 13:03:02.000000000 +0200
+@@ -19,6 +19,8 @@
+ #include <linux/mnt_namespace.h>
+ #include <linux/utsname.h>
+ #include <linux/pid_namespace.h>
++#include <linux/vserver/global.h>
++#include <linux/vserver/debug.h>
+ #include <net/net_namespace.h>
+ #include <linux/ipc_namespace.h>
+ 
+@@ -31,8 +33,11 @@ static inline struct nsproxy *create_nsp
+ 	struct nsproxy *nsproxy;
+ 
+ 	nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL);
+-	if (nsproxy)
++	if (nsproxy) {
+ 		atomic_set(&nsproxy->count, 1);
++		atomic_inc(&vs_global_nsproxy);
++	}
++	vxdprintk(VXD_CBIT(space, 2), "create_nsproxy = %p[1]", nsproxy);
+ 	return nsproxy;
+ }
+ 
+@@ -41,41 +46,52 @@ static inline struct nsproxy *create_nsp
+  * Return the newly created nsproxy.  Do not attach this to the task,
+  * leave it to the caller to do proper locking and attach it to task.
+  */
+-static struct nsproxy *create_new_namespaces(unsigned long flags,
+-			struct task_struct *tsk, struct fs_struct *new_fs)
++static struct nsproxy *unshare_namespaces(unsigned long flags,
++			struct nsproxy *orig, struct fs_struct *new_fs)
+ {
+ 	struct nsproxy *new_nsp;
+ 	int err;
+ 
++	vxdprintk(VXD_CBIT(space, 4),
++		"unshare_namespaces(0x%08lx,%p,%p)",
++		flags, orig, new_fs);
++
+ 	new_nsp = create_nsproxy();
+ 	if (!new_nsp)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, new_fs);
++	new_nsp->mnt_ns = copy_mnt_ns(flags, orig->mnt_ns, new_fs);
+ 	if (IS_ERR(new_nsp->mnt_ns)) {
+ 		err = PTR_ERR(new_nsp->mnt_ns);
+ 		goto out_ns;
+ 	}
+ 
+-	new_nsp->uts_ns = copy_utsname(flags, tsk->nsproxy->uts_ns);
++	new_nsp->uts_ns = copy_utsname(flags, orig->uts_ns);
+ 	if (IS_ERR(new_nsp->uts_ns)) {
+ 		err = PTR_ERR(new_nsp->uts_ns);
+ 		goto out_uts;
+ 	}
+ 
+-	new_nsp->ipc_ns = copy_ipcs(flags, tsk->nsproxy->ipc_ns);
++	new_nsp->ipc_ns = copy_ipcs(flags, orig->ipc_ns);
+ 	if (IS_ERR(new_nsp->ipc_ns)) {
+ 		err = PTR_ERR(new_nsp->ipc_ns);
+ 		goto out_ipc;
+ 	}
+ 
+-	new_nsp->pid_ns = copy_pid_ns(flags, task_active_pid_ns(tsk));
++	new_nsp->pid_ns = copy_pid_ns(flags, orig->pid_ns);
+ 	if (IS_ERR(new_nsp->pid_ns)) {
+ 		err = PTR_ERR(new_nsp->pid_ns);
+ 		goto out_pid;
+ 	}
+ 
+-	new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns);
++	/* disabled now?
++	new_nsp->user_ns = copy_user_ns(flags, orig->user_ns);
++	if (IS_ERR(new_nsp->user_ns)) {
++		err = PTR_ERR(new_nsp->user_ns);
++		goto out_user;
++	} */
++
++	new_nsp->net_ns = copy_net_ns(flags, orig->net_ns);
+ 	if (IS_ERR(new_nsp->net_ns)) {
+ 		err = PTR_ERR(new_nsp->net_ns);
+ 		goto out_net;
+@@ -100,6 +116,38 @@ out_ns:
+ 	return ERR_PTR(err);
+ }
+ 
++static struct nsproxy *create_new_namespaces(int flags, struct task_struct *tsk,
++			struct fs_struct *new_fs)
++{
++	return unshare_namespaces(flags, tsk->nsproxy, new_fs);
++}
++
++/*
++ * copies the nsproxy, setting refcount to 1, and grabbing a
++ * reference to all contained namespaces.
++ */
++struct nsproxy *copy_nsproxy(struct nsproxy *orig)
++{
++	struct nsproxy *ns = create_nsproxy();
++
++	if (ns) {
++		memcpy(ns, orig, sizeof(struct nsproxy));
++		atomic_set(&ns->count, 1);
++
++		if (ns->mnt_ns)
++			get_mnt_ns(ns->mnt_ns);
++		if (ns->uts_ns)
++			get_uts_ns(ns->uts_ns);
++		if (ns->ipc_ns)
++			get_ipc_ns(ns->ipc_ns);
++		if (ns->pid_ns)
++			get_pid_ns(ns->pid_ns);
++		if (ns->net_ns)
++			get_net(ns->net_ns);
++	}
++	return ns;
++}
++
+ /*
+  * called from clone.  This now handles copy for nsproxy and all
+  * namespaces therein.
+@@ -107,9 +155,12 @@ out_ns:
+ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
+ {
+ 	struct nsproxy *old_ns = tsk->nsproxy;
+-	struct nsproxy *new_ns;
++	struct nsproxy *new_ns = NULL;
+ 	int err = 0;
+ 
++	vxdprintk(VXD_CBIT(space, 7), "copy_namespaces(0x%08lx,%p[%p])",
++		flags, tsk, old_ns);
++
+ 	if (!old_ns)
+ 		return 0;
+ 
+@@ -119,7 +170,7 @@ int copy_namespaces(unsigned long flags,
+ 				CLONE_NEWPID | CLONE_NEWNET)))
+ 		return 0;
+ 
+-	if (!capable(CAP_SYS_ADMIN)) {
++	if (!vx_can_unshare(CAP_SYS_ADMIN, flags)) {
+ 		err = -EPERM;
+ 		goto out;
+ 	}
+@@ -146,6 +197,9 @@ int copy_namespaces(unsigned long flags,
+ 
+ out:
+ 	put_nsproxy(old_ns);
++	vxdprintk(VXD_CBIT(space, 3),
++		"copy_namespaces(0x%08lx,%p[%p]) = %d [%p]",
++		flags, tsk, old_ns, err, new_ns);
+ 	return err;
+ }
+ 
+@@ -159,7 +213,9 @@ void free_nsproxy(struct nsproxy *ns)
+ 		put_ipc_ns(ns->ipc_ns);
+ 	if (ns->pid_ns)
+ 		put_pid_ns(ns->pid_ns);
+-	put_net(ns->net_ns);
++	if (ns->net_ns)
++		put_net(ns->net_ns);
++	atomic_dec(&vs_global_nsproxy);
+ 	kmem_cache_free(nsproxy_cachep, ns);
+ }
+ 
+@@ -172,11 +228,15 @@ int unshare_nsproxy_namespaces(unsigned 
+ {
+ 	int err = 0;
+ 
++	vxdprintk(VXD_CBIT(space, 4),
++		"unshare_nsproxy_namespaces(0x%08lx,[%p])",
++		unshare_flags, current->nsproxy);
++
+ 	if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
+ 			       CLONE_NEWNET)))
+ 		return 0;
+ 
+-	if (!capable(CAP_SYS_ADMIN))
++	if (!vx_can_unshare(CAP_SYS_ADMIN, unshare_flags))
+ 		return -EPERM;
+ 
+ 	*new_nsp = create_new_namespaces(unshare_flags, current,
+--- a/kernel/pid.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/pid.c	2011-06-10 13:03:02.000000000 +0200
+@@ -36,6 +36,7 @@
+ #include <linux/pid_namespace.h>
+ #include <linux/init_task.h>
+ #include <linux/syscalls.h>
++#include <linux/vs_pid.h>
+ 
+ #define pid_hashfn(nr, ns)	\
+ 	hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
+@@ -308,7 +309,7 @@ EXPORT_SYMBOL_GPL(find_pid_ns);
+ 
+ struct pid *find_vpid(int nr)
+ {
+-	return find_pid_ns(nr, current->nsproxy->pid_ns);
++	return find_pid_ns(vx_rmap_pid(nr), current->nsproxy->pid_ns);
+ }
+ EXPORT_SYMBOL_GPL(find_vpid);
+ 
+@@ -368,6 +369,9 @@ void transfer_pid(struct task_struct *ol
+ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
+ {
+ 	struct task_struct *result = NULL;
++
++	if (type == PIDTYPE_REALPID)
++		type = PIDTYPE_PID;
+ 	if (pid) {
+ 		struct hlist_node *first;
+ 		first = rcu_dereference(pid->tasks[type].first);
+@@ -383,7 +387,7 @@ EXPORT_SYMBOL(pid_task);
+  */
+ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
+ {
+-	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
++	return pid_task(find_pid_ns(vx_rmap_pid(nr), ns), PIDTYPE_PID);
+ }
+ 
+ struct task_struct *find_task_by_vpid(pid_t vnr)
+@@ -425,7 +429,7 @@ struct pid *find_get_pid(pid_t nr)
+ }
+ EXPORT_SYMBOL_GPL(find_get_pid);
+ 
+-pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
++pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns)
+ {
+ 	struct upid *upid;
+ 	pid_t nr = 0;
+@@ -438,6 +442,11 @@ pid_t pid_nr_ns(struct pid *pid, struct 
+ 	return nr;
+ }
+ 
++pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
++{
++	return vx_map_pid(pid_unmapped_nr_ns(pid, ns));
++}
++
+ pid_t pid_vnr(struct pid *pid)
+ {
+ 	return pid_nr_ns(pid, current->nsproxy->pid_ns);
+--- a/kernel/pid_namespace.c	2009-12-03 20:02:58.000000000 +0100
++++ a/kernel/pid_namespace.c	2011-06-10 13:03:02.000000000 +0200
+@@ -13,6 +13,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/err.h>
+ #include <linux/acct.h>
++#include <linux/vserver/global.h>
+ 
+ #define BITS_PER_PAGE		(PAGE_SIZE*8)
+ 
+@@ -86,6 +87,7 @@ static struct pid_namespace *create_pid_
+ 		goto out_free_map;
+ 
+ 	kref_init(&ns->kref);
++	atomic_inc(&vs_global_pid_ns);
+ 	ns->level = level;
+ 	ns->parent = get_pid_ns(parent_pid_ns);
+ 
+@@ -111,6 +113,7 @@ static void destroy_pid_namespace(struct
+ 
+ 	for (i = 0; i < PIDMAP_ENTRIES; i++)
+ 		kfree(ns->pidmap[i].page);
++	atomic_dec(&vs_global_pid_ns);
+ 	kmem_cache_free(pid_ns_cachep, ns);
+ }
+ 
+--- a/kernel/posix-timers.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/posix-timers.c	2011-06-10 13:03:02.000000000 +0200
+@@ -46,6 +46,7 @@
+ #include <linux/wait.h>
+ #include <linux/workqueue.h>
+ #include <linux/module.h>
++#include <linux/vs_context.h>
+ 
+ /*
+  * Management arrays for POSIX timers.	 Timers are kept in slab memory
+@@ -363,6 +364,7 @@ int posix_timer_event(struct k_itimer *t
+ {
+ 	struct task_struct *task;
+ 	int shared, ret = -1;
++
+ 	/*
+ 	 * FIXME: if ->sigq is queued we can race with
+ 	 * dequeue_signal()->do_schedule_next_timer().
+@@ -379,10 +381,18 @@ int posix_timer_event(struct k_itimer *t
+ 	rcu_read_lock();
+ 	task = pid_task(timr->it_pid, PIDTYPE_PID);
+ 	if (task) {
++		struct vx_info_save vxis;
++		struct vx_info *vxi;
++
++		vxi = get_vx_info(task->vx_info);
++		enter_vx_info(vxi, &vxis);
+ 		shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
+ 		ret = send_sigqueue(timr->sigq, task, shared);
++		leave_vx_info(&vxis);
++		put_vx_info(vxi);
+ 	}
+ 	rcu_read_unlock();
++
+ 	/* If we failed to send the signal the timer stops. */
+ 	return ret > 0;
+ }
+--- a/kernel/printk.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/printk.c	2011-06-10 13:03:02.000000000 +0200
+@@ -33,6 +33,7 @@
+ #include <linux/bootmem.h>
+ #include <linux/syscalls.h>
+ #include <linux/kexec.h>
++#include <linux/vs_cvirt.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -276,18 +277,13 @@ int do_syslog(int type, char __user *buf
+ 	unsigned i, j, limit, count;
+ 	int do_clear = 0;
+ 	char c;
+-	int error = 0;
++	int error;
+ 
+ 	error = security_syslog(type);
+ 	if (error)
+ 		return error;
+ 
+-	switch (type) {
+-	case 0:		/* Close log */
+-		break;
+-	case 1:		/* Open log */
+-		break;
+-	case 2:		/* Read from log */
++	if ((type >= 2) && (type <= 4)) {
+ 		error = -EINVAL;
+ 		if (!buf || len < 0)
+ 			goto out;
+@@ -298,6 +294,16 @@ int do_syslog(int type, char __user *buf
+ 			error = -EFAULT;
+ 			goto out;
+ 		}
++	}
++	if (!vx_check(0, VS_ADMIN|VS_WATCH))
++		return vx_do_syslog(type, buf, len);
++
++	switch (type) {
++	case 0:		/* Close log */
++		break;
++	case 1:		/* Open log */
++		break;
++	case 2:		/* Read from log */
+ 		error = wait_event_interruptible(log_wait,
+ 							(log_start - log_end));
+ 		if (error)
+@@ -322,16 +328,6 @@ int do_syslog(int type, char __user *buf
+ 		do_clear = 1;
+ 		/* FALL THRU */
+ 	case 3:		/* Read last kernel messages */
+-		error = -EINVAL;
+-		if (!buf || len < 0)
+-			goto out;
+-		error = 0;
+-		if (!len)
+-			goto out;
+-		if (!access_ok(VERIFY_WRITE, buf, len)) {
+-			error = -EFAULT;
+-			goto out;
+-		}
+ 		count = len;
+ 		if (count > log_buf_len)
+ 			count = log_buf_len;
+--- a/kernel/ptrace.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/ptrace.c	2011-06-10 14:03:28.000000000 +0200
+@@ -22,6 +22,7 @@
+ #include <linux/pid_namespace.h>
+ #include <linux/syscalls.h>
+ #include <linux/uaccess.h>
++#include <linux/vs_context.h>
+ 
+ 
+ /*
+@@ -151,6 +152,11 @@ int __ptrace_may_access(struct task_stru
+ 		dumpable = get_dumpable(task->mm);
+ 	if (!dumpable && !capable(CAP_SYS_PTRACE))
+ 		return -EPERM;
++	if (!vx_check(task->xid, VS_ADMIN_P|VS_IDENT))
++		return -EPERM;
++	if (!vx_check(task->xid, VS_IDENT) &&
++		!task_vx_flags(task, VXF_STATE_ADMIN, 0))
++		return -EACCES;
+ 
+ 	return security_ptrace_access_check(task, mode);
+ }
+--- a/kernel/sched.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/sched.c	2011-06-10 13:15:21.000000000 +0200
+@@ -71,6 +71,8 @@
+ #include <linux/debugfs.h>
+ #include <linux/ctype.h>
+ #include <linux/ftrace.h>
++#include <linux/vs_sched.h>
++#include <linux/vs_cvirt.h>
+ 
+ #include <asm/tlb.h>
+ #include <asm/irq_regs.h>
+@@ -237,6 +239,15 @@ static DEFINE_MUTEX(sched_domains_mutex)
+ 
+ #include <linux/cgroup.h>
+ 
++#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS)
++struct cfs_bandwidth {
++	spinlock_t		cfs_runtime_lock;
++	ktime_t			cfs_period;
++	u64			cfs_runtime;
++	struct hrtimer		cfs_period_timer;
++};
++#endif
++
+ struct cfs_rq;
+ 
+ static LIST_HEAD(task_groups);
+@@ -251,6 +262,9 @@ struct task_group {
+ 	/* runqueue "owned" by this group on each cpu */
+ 	struct cfs_rq **cfs_rq;
+ 	unsigned long shares;
++#ifdef CONFIG_CFS_HARD_LIMITS
++	struct cfs_bandwidth cfs_bandwidth;
++#endif
+ #endif
+ 
+ #ifdef CONFIG_RT_GROUP_SCHED
+@@ -404,6 +418,19 @@ struct cfs_rq {
+ 	unsigned long rq_weight;
+ #endif
+ #endif
++#ifdef CONFIG_CFS_HARD_LIMITS
++	/* set when the group is throttled  on this cpu */
++	int cfs_throttled;
++
++	/* runtime currently consumed by the group on this rq */
++	u64 cfs_time;
++
++	/* runtime available to the group on this rq */
++	u64 cfs_runtime;
++
++	/* Protects the cfs runtime related fields of this cfs_rq */
++	spinlock_t cfs_runtime_lock;
++#endif
+ };
+ 
+ /* Real-Time classes' related field in a runqueue: */
+@@ -1586,6 +1613,7 @@ static void update_group_shares_cpu(stru
+ 	}
+ }
+ 
++static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
+ /*
+  * Re-compute the task group their per cpu shares over the given domain.
+  * This needs to be done in a bottom-up fashion because the rq weight of a
+@@ -1614,8 +1642,10 @@ static int tg_shares_up(struct task_grou
+ 		 * If there are currently no tasks on the cpu pretend there
+ 		 * is one of average load so that when a new task gets to
+ 		 * run here it will not get delayed by group starvation.
++		 * Also if the group is throttled on this cpu, pretend that
++		 * it has no tasks.
+ 		 */
+-		if (!weight)
++		if (!weight || cfs_rq_throttled(tg->cfs_rq[i]))
+ 			weight = NICE_0_LOAD;
+ 
+ 		sum_weight += weight;
+@@ -1792,6 +1822,175 @@ static void cfs_rq_set_shares(struct cfs
+ static void calc_load_account_active(struct rq *this_rq);
+ static void update_sysctl(void);
+ 
++
++#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_FAIR_GROUP_SCHED)
++
++#ifdef CONFIG_SMP
++static inline const struct cpumask *sched_bw_period_mask(void)
++{
++	return cpu_rq(smp_processor_id())->rd->span;
++}
++#else /* !CONFIG_SMP */
++static inline const struct cpumask *sched_bw_period_mask(void)
++{
++	return cpu_online_mask;
++}
++#endif /* CONFIG_SMP */
++
++#else
++static inline const struct cpumask *sched_bw_period_mask(void)
++{
++	return cpu_online_mask;
++}
++
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++#ifdef CONFIG_CFS_HARD_LIMITS
++
++/*
++ * Runtime allowed for a cfs group before it is hard limited.
++ * default: Infinite which means no hard limiting.
++ */
++u64 sched_cfs_runtime = RUNTIME_INF;
++
++/*
++ * period over which we hard limit the cfs group's bandwidth.
++ * default: 0.5s
++ */
++u64 sched_cfs_period = 500000;
++
++static inline u64 global_cfs_period(void)
++{
++	return sched_cfs_period * NSEC_PER_USEC;
++}
++
++static inline u64 global_cfs_runtime(void)
++{
++	return RUNTIME_INF;
++}
++
++void do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b);
++
++static inline void cfs_rq_runtime_lock(struct cfs_rq *cfs_rq)
++{
++	spin_lock(&cfs_rq->cfs_runtime_lock);
++}
++
++static inline void cfs_rq_runtime_unlock(struct cfs_rq *cfs_rq)
++{
++	spin_unlock(&cfs_rq->cfs_runtime_lock);
++}
++
++/*
++ * Refresh the runtimes of the throttled groups.
++ * But nothing much to do now, will populate this in later patches.
++ */
++static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
++{
++	struct cfs_bandwidth *cfs_b =
++		container_of(timer, struct cfs_bandwidth, cfs_period_timer);
++
++	do_sched_cfs_period_timer(cfs_b);
++	hrtimer_add_expires_ns(timer, ktime_to_ns(cfs_b->cfs_period));
++	return HRTIMER_RESTART;
++}
++
++/*
++ * TODO: Check if this kind of timer setup is sufficient for cfs or
++ * should we do what rt is doing.
++ */
++static void start_cfs_bandwidth(struct task_group *tg)
++{
++	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
++
++	/*
++	 * Timer isn't setup for groups with infinite runtime
++	 */
++	if (cfs_b->cfs_runtime == RUNTIME_INF)
++		return;
++
++	if (hrtimer_active(&cfs_b->cfs_period_timer))
++		return;
++
++	hrtimer_start_range_ns(&cfs_b->cfs_period_timer, cfs_b->cfs_period,
++			0, HRTIMER_MODE_REL);
++}
++
++static void init_cfs_bandwidth(struct task_group *tg)
++{
++	struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
++
++	cfs_b->cfs_period = ns_to_ktime(global_cfs_period());
++	cfs_b->cfs_runtime = global_cfs_runtime();
++
++	spin_lock_init(&cfs_b->cfs_runtime_lock);
++
++	hrtimer_init(&cfs_b->cfs_period_timer,
++			CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++	cfs_b->cfs_period_timer.function = &sched_cfs_period_timer;
++}
++
++static inline void destroy_cfs_bandwidth(struct task_group *tg)
++{
++	hrtimer_cancel(&tg->cfs_bandwidth.cfs_period_timer);
++}
++
++static void init_cfs_hard_limits(struct cfs_rq *cfs_rq, struct task_group *tg)
++{
++	cfs_rq->cfs_time = 0;
++	cfs_rq->cfs_throttled = 0;
++	cfs_rq->cfs_runtime = tg->cfs_bandwidth.cfs_runtime;
++	spin_lock_init(&cfs_rq->cfs_runtime_lock);
++}
++
++#else /* !CONFIG_CFS_HARD_LIMITS */
++
++static void init_cfs_bandwidth(struct task_group *tg)
++{
++	return;
++}
++
++static inline void destroy_cfs_bandwidth(struct task_group *tg)
++{
++	return;
++}
++
++static void init_cfs_hard_limits(struct cfs_rq *cfs_rq, struct task_group *tg)
++{
++	return;
++}
++
++static inline void cfs_rq_runtime_lock(struct cfs_rq *cfs_rq)
++{
++	return;
++}
++
++static inline void cfs_rq_runtime_unlock(struct cfs_rq *cfs_rq)
++{
++	return;
++}
++
++#endif /* CONFIG_CFS_HARD_LIMITS */
++#else /* !CONFIG_FAIR_GROUP_SCHED */
++
++static inline void cfs_rq_runtime_lock(struct cfs_rq *cfs_rq)
++{
++	return;
++}
++
++static inline void cfs_rq_runtime_unlock(struct cfs_rq *cfs_rq)
++{
++	return;
++}
++
++static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
++{
++	return 0;
++}
++
++#endif /* CONFIG_FAIR_GROUP_SCHED */
++
+ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+ {
+ 	set_task_rq(p, cpu);
+@@ -3129,9 +3328,17 @@ EXPORT_SYMBOL(avenrun);
+  */
+ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+ {
+-	loads[0] = (avenrun[0] + offset) << shift;
+-	loads[1] = (avenrun[1] + offset) << shift;
+-	loads[2] = (avenrun[2] + offset) << shift;
++	if (vx_flags(VXF_VIRT_LOAD, 0)) {
++		struct vx_info *vxi = current_vx_info();
++
++		loads[0] = (vxi->cvirt.load[0] + offset) << shift;
++		loads[1] = (vxi->cvirt.load[1] + offset) << shift;
++		loads[2] = (vxi->cvirt.load[2] + offset) << shift;
++	} else {
++		loads[0] = (avenrun[0] + offset) << shift;
++		loads[1] = (avenrun[1] + offset) << shift;
++		loads[2] = (avenrun[2] + offset) << shift;
++	}
+ }
+ 
+ static unsigned long
+@@ -5245,16 +5452,19 @@ void account_user_time(struct task_struc
+ 		       cputime_t cputime_scaled)
+ {
+ 	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++	struct vx_info *vxi = p->vx_info;  /* p is _always_ current */
+ 	cputime64_t tmp;
++	int nice = (TASK_NICE(p) > 0);
+ 
+ 	/* Add user time to process. */
+ 	p->utime = cputime_add(p->utime, cputime);
+ 	p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
++	vx_account_user(vxi, cputime, nice);
+ 	account_group_user_time(p, cputime);
+ 
+ 	/* Add user time to cpustat. */
+ 	tmp = cputime_to_cputime64(cputime);
+-	if (TASK_NICE(p) > 0)
++	if (nice)
+ 		cpustat->nice = cputime64_add(cpustat->nice, tmp);
+ 	else
+ 		cpustat->user = cputime64_add(cpustat->user, tmp);
+@@ -5300,6 +5510,7 @@ void account_system_time(struct task_str
+ 			 cputime_t cputime, cputime_t cputime_scaled)
+ {
+ 	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
++	struct vx_info *vxi = p->vx_info;  /* p is _always_ current */
+ 	cputime64_t tmp;
+ 
+ 	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
+@@ -5310,6 +5521,7 @@ void account_system_time(struct task_str
+ 	/* Add system time to process. */
+ 	p->stime = cputime_add(p->stime, cputime);
+ 	p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
++	vx_account_system(vxi, cputime, 0 /* do we have idle time? */);
+ 	account_group_system_time(p, cputime);
+ 
+ 	/* Add system time to cpustat. */
+@@ -6393,7 +6605,7 @@ SYSCALL_DEFINE1(nice, int, increment)
+ 		nice = 19;
+ 
+ 	if (increment < 0 && !can_nice(current, nice))
+-		return -EPERM;
++		return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM;
+ 
+ 	retval = security_task_setnice(current, nice);
+ 	if (retval)
+@@ -9470,6 +9682,32 @@ static int update_sched_domains(struct n
+ }
+ #endif
+ 
++#ifdef CONFIG_SMP
++static void disable_runtime(struct rq *rq)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&rq->lock, flags);
++#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS)
++	disable_runtime_cfs(rq);
++#endif
++	disable_runtime_rt(rq);
++	spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++static void enable_runtime(struct rq *rq)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&rq->lock, flags);
++#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_CFS_HARD_LIMITS)
++	enable_runtime_cfs(rq);
++#endif
++	enable_runtime_rt(rq);
++	spin_unlock_irqrestore(&rq->lock, flags);
++}
++#endif
++
+ static int update_runtime(struct notifier_block *nfb,
+ 				unsigned long action, void *hcpu)
+ {
+@@ -9602,6 +9840,7 @@ static void init_tg_cfs_entry(struct tas
+ 	struct rq *rq = cpu_rq(cpu);
+ 	tg->cfs_rq[cpu] = cfs_rq;
+ 	init_cfs_rq(cfs_rq, rq);
++	init_cfs_hard_limits(cfs_rq, tg);
+ 	cfs_rq->tg = tg;
+ 	if (add)
+ 		list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
+@@ -9710,6 +9949,10 @@ void __init sched_init(void)
+ 			global_rt_period(), global_rt_runtime());
+ #endif /* CONFIG_RT_GROUP_SCHED */
+ 
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	init_cfs_bandwidth(&init_task_group);
++#endif
++
+ #ifdef CONFIG_CGROUP_SCHED
+ 	list_add(&init_task_group.list, &task_groups);
+ 	INIT_LIST_HEAD(&init_task_group.children);
+@@ -9731,6 +9974,7 @@ void __init sched_init(void)
+ 		init_cfs_rq(&rq->cfs, rq);
+ 		init_rt_rq(&rq->rt, rq);
+ #ifdef CONFIG_FAIR_GROUP_SCHED
++		init_cfs_hard_limits(&rq->cfs, &init_task_group);
+ 		init_task_group.shares = init_task_group_load;
+ 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
+ #ifdef CONFIG_CGROUP_SCHED
+@@ -9992,6 +10236,7 @@ static void free_fair_sched_group(struct
+ {
+ 	int i;
+ 
++	destroy_cfs_bandwidth(tg);
+ 	for_each_possible_cpu(i) {
+ 		if (tg->cfs_rq)
+ 			kfree(tg->cfs_rq[i]);
+@@ -10018,6 +10263,7 @@ int alloc_fair_sched_group(struct task_g
+ 	if (!tg->se)
+ 		goto err;
+ 
++	init_cfs_bandwidth(tg);
+ 	tg->shares = NICE_0_LOAD;
+ 
+ 	for_each_possible_cpu(i) {
+@@ -10734,6 +10980,100 @@ static u64 cpu_shares_read_u64(struct cg
+ 
+ 	return (u64) tg->shares;
+ }
++
++#ifdef CONFIG_CFS_HARD_LIMITS
++
++static int tg_set_cfs_bandwidth(struct task_group *tg,
++		u64 cfs_period, u64 cfs_runtime)
++{
++	int i;
++
++	spin_lock_irq(&tg->cfs_bandwidth.cfs_runtime_lock);
++	tg->cfs_bandwidth.cfs_period = ns_to_ktime(cfs_period);
++	tg->cfs_bandwidth.cfs_runtime = cfs_runtime;
++
++	for_each_possible_cpu(i) {
++		struct cfs_rq *cfs_rq = tg->cfs_rq[i];
++
++		cfs_rq_runtime_lock(cfs_rq);
++		cfs_rq->cfs_runtime = cfs_runtime;
++		cfs_rq_runtime_unlock(cfs_rq);
++	}
++
++	start_cfs_bandwidth(tg);
++	spin_unlock_irq(&tg->cfs_bandwidth.cfs_runtime_lock);
++	return 0;
++}
++
++int tg_set_cfs_runtime(struct task_group *tg, long cfs_runtime_us)
++{
++	u64 cfs_runtime, cfs_period;
++
++	cfs_period = ktime_to_ns(tg->cfs_bandwidth.cfs_period);
++	cfs_runtime = (u64)cfs_runtime_us * NSEC_PER_USEC;
++	if (cfs_runtime_us < 0)
++		cfs_runtime = RUNTIME_INF;
++
++	return tg_set_cfs_bandwidth(tg, cfs_period, cfs_runtime);
++}
++
++long tg_get_cfs_runtime(struct task_group *tg)
++{
++	u64 cfs_runtime_us;
++
++	if (tg->cfs_bandwidth.cfs_runtime == RUNTIME_INF)
++		return -1;
++
++	cfs_runtime_us = tg->cfs_bandwidth.cfs_runtime;
++	do_div(cfs_runtime_us, NSEC_PER_USEC);
++	return cfs_runtime_us;
++}
++
++int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
++{
++	u64 cfs_runtime, cfs_period;
++
++	cfs_period = (u64)cfs_period_us * NSEC_PER_USEC;
++	cfs_runtime = tg->cfs_bandwidth.cfs_runtime;
++
++	if (cfs_period == 0)
++		return -EINVAL;
++
++	return tg_set_cfs_bandwidth(tg, cfs_period, cfs_runtime);
++}
++
++long tg_get_cfs_period(struct task_group *tg)
++{
++	u64 cfs_period_us;
++
++	cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.cfs_period);
++	do_div(cfs_period_us, NSEC_PER_USEC);
++	return cfs_period_us;
++}
++
++static s64 cpu_cfs_runtime_read_s64(struct cgroup *cgrp, struct cftype *cft)
++{
++	return tg_get_cfs_runtime(cgroup_tg(cgrp));
++}
++
++static int cpu_cfs_runtime_write_s64(struct cgroup *cgrp, struct cftype *cftype,
++				s64 cfs_runtime_us)
++{
++	return tg_set_cfs_runtime(cgroup_tg(cgrp), cfs_runtime_us);
++}
++
++static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
++{
++	return tg_get_cfs_period(cgroup_tg(cgrp));
++}
++
++static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
++				u64 cfs_period_us)
++{
++	return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
++}
++
++#endif /* CONFIG_CFS_HARD_LIMITS */
+ #endif /* CONFIG_FAIR_GROUP_SCHED */
+ 
+ #ifdef CONFIG_RT_GROUP_SCHED
+@@ -10767,6 +11107,18 @@ static struct cftype cpu_files[] = {
+ 		.read_u64 = cpu_shares_read_u64,
+ 		.write_u64 = cpu_shares_write_u64,
+ 	},
++#ifdef CONFIG_CFS_HARD_LIMITS
++	{
++		.name = "cfs_runtime_us",
++		.read_s64 = cpu_cfs_runtime_read_s64,
++		.write_s64 = cpu_cfs_runtime_write_s64,
++	},
++	{
++		.name = "cfs_period_us",
++		.read_u64 = cpu_cfs_period_read_u64,
++		.write_u64 = cpu_cfs_period_write_u64,
++	},
++#endif /* CONFIG_CFS_HARD_LIMITS */
+ #endif
+ #ifdef CONFIG_RT_GROUP_SCHED
+ 	{
+--- a/kernel/sched_debug.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/sched_debug.c	2011-06-10 13:03:02.000000000 +0200
+@@ -80,6 +80,11 @@ static void print_cfs_group_stats(struct
+ 	PN(se->wait_max);
+ 	PN(se->wait_sum);
+ 	P(se->wait_count);
++#ifdef CONFIG_CFS_HARD_LIMITS
++	PN(se->throttle_max);
++	PN(se->throttle_sum);
++	P(se->throttle_count);
++#endif
+ #endif
+ 	P(se->load.weight);
+ #undef PN
+@@ -209,6 +214,16 @@ void print_cfs_rq(struct seq_file *m, in
+ #ifdef CONFIG_SMP
+ 	SEQ_printf(m, "  .%-30s: %lu\n", "shares", cfs_rq->shares);
+ #endif
++#ifdef CONFIG_CFS_HARD_LIMITS
++	spin_lock_irqsave(&rq->lock, flags);
++	SEQ_printf(m, "  .%-30s: %d\n", "cfs_throttled",
++			cfs_rq->cfs_throttled);
++	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "cfs_time",
++			SPLIT_NS(cfs_rq->cfs_time));
++	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "cfs_runtime",
++			SPLIT_NS(cfs_rq->cfs_runtime));
++	spin_unlock_irqrestore(&rq->lock, flags);
++#endif /* CONFIG_CFS_HARD_LIMITS */
+ 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
+ #endif
+ }
+@@ -309,7 +324,7 @@ static int sched_debug_show(struct seq_f
+ 	u64 now = ktime_to_ns(ktime_get());
+ 	int cpu;
+ 
+-	SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
++	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
+ 		init_utsname()->release,
+ 		(int)strcspn(init_utsname()->version, " "),
+ 		init_utsname()->version);
+--- a/kernel/sched_fair.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/sched_fair.c	2011-06-10 13:08:20.000000000 +0200
+@@ -192,7 +192,308 @@ find_matching_se(struct sched_entity **s
+ 	}
+ }
+ 
+-#else	/* !CONFIG_FAIR_GROUP_SCHED */
++#ifdef CONFIG_CFS_HARD_LIMITS
++
++static inline void update_stats_throttle_start(struct cfs_rq *cfs_rq,
++			struct sched_entity *se)
++{
++	schedstat_set(se->throttle_start, rq_of(cfs_rq)->clock);
++}
++
++static inline void update_stats_throttle_end(struct cfs_rq *cfs_rq,
++			struct sched_entity *se)
++{
++	schedstat_set(se->throttle_max, max(se->throttle_max,
++			rq_of(cfs_rq)->clock - se->throttle_start));
++	schedstat_set(se->throttle_count, se->throttle_count + 1);
++	schedstat_set(se->throttle_sum, se->throttle_sum +
++			rq_of(cfs_rq)->clock - se->throttle_start);
++	schedstat_set(se->throttle_start, 0);
++}
++
++static inline
++struct cfs_rq *sched_cfs_period_cfs_rq(struct cfs_bandwidth *cfs_b, int cpu)
++{
++	return container_of(cfs_b, struct task_group,
++			cfs_bandwidth)->cfs_rq[cpu];
++}
++
++static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
++{
++	return cfs_rq->cfs_throttled;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * Ensure this RQ takes back all the runtime it lend to its neighbours.
++ */
++static void disable_runtime_cfs(struct rq *rq)
++{
++	struct root_domain *rd = rq->rd;
++	struct cfs_rq *cfs_rq;
++
++	if (unlikely(!scheduler_running))
++		return;
++
++	for_each_leaf_cfs_rq(rq, cfs_rq) {
++		struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
++		s64 want;
++		int i;
++
++		spin_lock(&cfs_b->cfs_runtime_lock);
++		spin_lock(&cfs_rq->cfs_runtime_lock);
++
++		/*
++		 * Either we're all are infinity and nobody needs to borrow,
++		 * or we're already disabled and this have nothing to do, or
++		 * we have exactly the right amount of runtime to take out.
++		 */
++		 if (cfs_rq->cfs_runtime == RUNTIME_INF ||
++				cfs_rq->cfs_runtime == cfs_b->cfs_runtime)
++			goto balanced;
++		spin_unlock(&cfs_rq->cfs_runtime_lock);
++
++		/*
++		 * Calculate the difference between what we started out with
++		 * and what we current have, that's the amount of runtime
++		 * we lend and now have to reclaim.
++		 */
++		 want = cfs_b->cfs_runtime - cfs_rq->cfs_runtime;
++
++		/*
++		 * Greedy reclaim, take back as much as possible.
++		 */
++		for_each_cpu(i, rd->span) {
++			struct cfs_rq *iter = sched_cfs_period_cfs_rq(cfs_b, i);
++			s64 diff;
++
++			/*
++			 * Can't reclaim from ourselves or disabled runqueues.
++			 */
++			if (iter == cfs_rq || iter->cfs_runtime == RUNTIME_INF)
++				continue;
++
++			spin_lock(&iter->cfs_runtime_lock);
++			if (want > 0) {
++				diff = min_t(s64, iter->cfs_runtime, want);
++				iter->cfs_runtime -= diff;
++				want -= diff;
++			} else {
++				iter->cfs_runtime -= want;
++				want -= want;
++			}
++
++			spin_unlock(&iter->cfs_runtime_lock);
++			if (!want)
++				break;
++		}
++
++		spin_lock(&cfs_rq->cfs_runtime_lock);
++		/*
++		 * We cannot be left wanting - that would mean some
++		 * runtime leaked out of the system.
++		 */
++		BUG_ON(want);
++balanced:
++		/*
++		 * Disable all the borrow logic by pretending we have infinite
++		 * runtime - in which case borrowing doesn't make sense.
++		 */
++		 cfs_rq->cfs_runtime = RUNTIME_INF;
++		 spin_unlock(&cfs_rq->cfs_runtime_lock);
++		 spin_unlock(&cfs_b->cfs_runtime_lock);
++	}
++}
++
++static void enable_runtime_cfs(struct rq *rq)
++{
++	struct cfs_rq *cfs_rq;
++
++	if (unlikely(!scheduler_running))
++		return;
++
++	/*
++	 * Reset each runqueue's bandwidth settings
++	 */
++	for_each_leaf_cfs_rq(rq, cfs_rq) {
++		struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
++
++		spin_lock(&cfs_b->cfs_runtime_lock);
++		spin_lock(&cfs_rq->cfs_runtime_lock);
++		cfs_rq->cfs_runtime = cfs_b->cfs_runtime;
++		cfs_rq->cfs_time = 0;
++		cfs_rq->cfs_throttled = 0;
++		spin_unlock(&cfs_rq->cfs_runtime_lock);
++		spin_unlock(&cfs_b->cfs_runtime_lock);
++	}
++}
++
++/*
++ * Ran out of runtime, check if we can borrow some from others
++ * instead of getting throttled right away.
++ */
++static void do_cfs_balance_runtime(struct cfs_rq *cfs_rq)
++{
++	struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
++	const struct cpumask *span = sched_bw_period_mask();
++	int i, weight;
++	u64 cfs_period;
++
++	weight = cpumask_weight(span);
++	spin_lock(&cfs_b->cfs_runtime_lock);
++	cfs_period = ktime_to_ns(cfs_b->cfs_period);
++
++	for_each_cpu(i, span) {
++		struct cfs_rq *borrow_cfs_rq =
++				sched_cfs_period_cfs_rq(cfs_b, i);
++		s64 diff;
++
++		if (borrow_cfs_rq == cfs_rq)
++			continue;
++
++		cfs_rq_runtime_lock(borrow_cfs_rq);
++		if (borrow_cfs_rq->cfs_runtime == RUNTIME_INF) {
++			cfs_rq_runtime_unlock(borrow_cfs_rq);
++			continue;
++		}
++
++		diff = borrow_cfs_rq->cfs_runtime - borrow_cfs_rq->cfs_time;
++		if (diff > 0) {
++			diff = div_u64((u64)diff, weight);
++			if (cfs_rq->cfs_runtime + diff > cfs_period)
++				diff = cfs_period - cfs_rq->cfs_runtime;
++			borrow_cfs_rq->cfs_runtime -= diff;
++			cfs_rq->cfs_runtime += diff;
++			if (cfs_rq->cfs_runtime == cfs_period) {
++				cfs_rq_runtime_unlock(borrow_cfs_rq);
++				break;
++			}
++		}
++		cfs_rq_runtime_unlock(borrow_cfs_rq);
++	}
++	spin_unlock(&cfs_b->cfs_runtime_lock);
++}
++
++/*
++ * Called with rq->runtime_lock held.
++ */
++static void cfs_balance_runtime(struct cfs_rq *cfs_rq)
++{
++	cfs_rq_runtime_unlock(cfs_rq);
++	do_cfs_balance_runtime(cfs_rq);
++	cfs_rq_runtime_lock(cfs_rq);
++}
++
++#else /* !CONFIG_SMP */
++
++static void cfs_balance_runtime(struct cfs_rq *cfs_rq)
++{
++	return;
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * Check if group entity exceeded its runtime. If so, mark the cfs_rq as
++ * throttled mark the current task for reschedling.
++ */
++static void sched_cfs_runtime_exceeded(struct sched_entity *se,
++	struct task_struct *tsk_curr, unsigned long delta_exec)
++{
++	struct cfs_rq *cfs_rq;
++
++	cfs_rq = group_cfs_rq(se);
++
++	if (cfs_rq->cfs_runtime == RUNTIME_INF)
++		return;
++
++	cfs_rq->cfs_time += delta_exec;
++
++	if (cfs_rq_throttled(cfs_rq))
++		return;
++
++	if (cfs_rq->cfs_time > cfs_rq->cfs_runtime)
++		cfs_balance_runtime(cfs_rq);
++
++	if (cfs_rq->cfs_time > cfs_rq->cfs_runtime) {
++		cfs_rq->cfs_throttled = 1;
++		update_stats_throttle_start(cfs_rq, se);
++		resched_task(tsk_curr);
++	}
++}
++
++static inline void update_curr_group(struct sched_entity *curr,
++		unsigned long delta_exec, struct task_struct *tsk_curr)
++{
++	sched_cfs_runtime_exceeded(curr, tsk_curr, delta_exec);
++}
++
++static void enqueue_entity_locked(struct cfs_rq *cfs_rq,
++		struct sched_entity *se, int flags);
++
++static void enqueue_throttled_entity(struct rq *rq, struct sched_entity *se)
++{
++	for_each_sched_entity(se) {
++		struct cfs_rq *gcfs_rq = group_cfs_rq(se);
++
++		if (se->on_rq || cfs_rq_throttled(gcfs_rq) ||
++				!gcfs_rq->nr_running)
++			break;
++		enqueue_entity_locked(cfs_rq_of(se), se, 0);
++	}
++}
++
++/*
++ * Refresh runtimes of all cfs_rqs in this group, i,e.,
++ * refresh runtimes of the representative cfs_rq of this
++ * tg on all cpus. Enqueue any throttled entity back.
++ */
++void do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b)
++{
++	int i;
++	const struct cpumask *span = sched_bw_period_mask();
++	unsigned long flags;
++
++	for_each_cpu(i, span) {
++		struct rq *rq = cpu_rq(i);
++		struct cfs_rq *cfs_rq = sched_cfs_period_cfs_rq(cfs_b, i);
++		struct sched_entity *se = cfs_rq->tg->se[i];
++
++		spin_lock_irqsave(&rq->lock, flags);
++		cfs_rq_runtime_lock(cfs_rq);
++		cfs_rq->cfs_time = 0;
++		if (cfs_rq_throttled(cfs_rq)) {
++			update_rq_clock(rq);
++			update_stats_throttle_end(cfs_rq, se);
++			cfs_rq->cfs_throttled = 0;
++			enqueue_throttled_entity(rq, se);
++		}
++		cfs_rq_runtime_unlock(cfs_rq);
++		spin_unlock_irqrestore(&rq->lock, flags);
++	}
++}
++
++#else
++
++static inline void update_curr_group(struct sched_entity *curr,
++		unsigned long delta_exec, struct task_struct *tsk_curr)
++{
++	return;
++}
++
++static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
++{
++	return 0;
++}
++
++#endif /* CONFIG_CFS_HARD_LIMITS */
++
++#else	/* CONFIG_FAIR_GROUP_SCHED */
++
++static inline void update_curr_group(struct sched_entity *curr,
++		unsigned long delta_exec, struct task_struct *tsk_curr)
++{
++	return;
++}
+ 
+ static inline struct task_struct *task_of(struct sched_entity *se)
+ {
+@@ -254,7 +555,6 @@ find_matching_se(struct sched_entity **s
+ 
+ #endif	/* CONFIG_FAIR_GROUP_SCHED */
+ 
+-
+ /**************************************************************
+  * Scheduling class tree data structure manipulation methods:
+  */
+@@ -493,14 +793,25 @@ __update_curr(struct cfs_rq *cfs_rq, str
+ 	update_min_vruntime(cfs_rq);
+ }
+ 
+-static void update_curr(struct cfs_rq *cfs_rq)
++static void update_curr_task(struct sched_entity *curr,
++		unsigned long delta_exec)
++{
++	struct task_struct *curtask = task_of(curr);
++
++	trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
++	cpuacct_charge(curtask, delta_exec);
++	account_group_exec_runtime(curtask, delta_exec);
++}
++
++static int update_curr_common(struct cfs_rq *cfs_rq, unsigned long *delta)
+ {
+ 	struct sched_entity *curr = cfs_rq->curr;
+-	u64 now = rq_of(cfs_rq)->clock_task;
++	struct rq *rq = rq_of(cfs_rq);
++	u64 now = rq->clock_task;
+ 	unsigned long delta_exec;
+ 
+ 	if (unlikely(!curr))
+-		return;
++		return 1;
+ 
+ 	/*
+ 	 * Get the amount of time the current task was running
+@@ -509,17 +820,29 @@ static void update_curr(struct cfs_rq *c
+ 	 */
+ 	delta_exec = (unsigned long)(now - curr->exec_start);
+ 	if (!delta_exec)
+-		return;
++		return 1;
+ 
+ 	__update_curr(cfs_rq, curr, delta_exec);
+ 	curr->exec_start = now;
++	*delta = delta_exec;
++	return 0;
++}
+ 
+-	if (entity_is_task(curr)) {
+-		struct task_struct *curtask = task_of(curr);
++static void update_curr(struct cfs_rq *cfs_rq)
++{
++	struct sched_entity *curr = cfs_rq->curr;
++	struct rq *rq = rq_of(cfs_rq);
++	unsigned long delta_exec;
+ 
+-		trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
+-		cpuacct_charge(curtask, delta_exec);
+-		account_group_exec_runtime(curtask, delta_exec);
++	if (update_curr_common(cfs_rq, &delta_exec))
++		return ;
++
++	if (entity_is_task(curr))
++		update_curr_task(curr, delta_exec);
++	else {
++		cfs_rq_runtime_lock(group_cfs_rq(curr));
++		update_curr_group(curr, delta_exec, rq->curr);
++		cfs_rq_runtime_unlock(group_cfs_rq(curr));
+ 	}
+ }
+ 
+@@ -748,6 +1071,25 @@ place_entity(struct cfs_rq *cfs_rq, stru
+ #define ENQUEUE_MIGRATE 2
+ 
+ static void
++enqueue_entity_common(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
++{
++	account_entity_enqueue(cfs_rq, se);
++
++	if (flags & ENQUEUE_WAKEUP) {
++		place_entity(cfs_rq, se, 0);
++		enqueue_sleeper(cfs_rq, se);
++	}
++
++	update_stats_enqueue(cfs_rq, se);
++	check_spread(cfs_rq, se);
++	if (se != cfs_rq->curr)
++		__enqueue_entity(cfs_rq, se);
++
++	if (entity_is_task(se))
++		vx_activate_task(task_of(se));
++}
++
++static void
+ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ {
+ 	/*
+@@ -761,17 +1103,17 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
+ 	 * Update run-time statistics of the 'current'.
+ 	 */
+ 	update_curr(cfs_rq);
+-	account_entity_enqueue(cfs_rq, se);
+-
+-	if (flags & ENQUEUE_WAKEUP) {
+-		place_entity(cfs_rq, se, 0);
+-		enqueue_sleeper(cfs_rq, se);
+-	}
++	enqueue_entity_common(cfs_rq, se, flags);
++}
+ 
+-	update_stats_enqueue(cfs_rq, se);
+-	check_spread(cfs_rq, se);
+-	if (se != cfs_rq->curr)
+-		__enqueue_entity(cfs_rq, se);
++static void enqueue_entity_locked(struct cfs_rq *cfs_rq,
++		struct sched_entity *se, int flags)
++{
++	/*
++	 * Update run-time statistics of the 'current'.
++	 */
++	// update_curr_locked(cfs_rq);
++	enqueue_entity_common(cfs_rq, se, flags);
+ }
+ 
+ static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -815,6 +1157,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
+ 
+ 	if (se != cfs_rq->curr)
+ 		__dequeue_entity(cfs_rq, se);
++	if (entity_is_task(se))
++		vx_deactivate_task(task_of(se));
+ 	account_entity_dequeue(cfs_rq, se);
+ 	update_min_vruntime(cfs_rq);
+ 
+@@ -919,6 +1263,32 @@ static struct sched_entity *pick_next_en
+ 	return se;
+ }
+ 
++/*
++ * Called from put_prev_entity()
++ * If a group entity (@se) is found to be throttled, it will not be put back
++ * on @cfs_rq, which is equivalent to dequeing it.
++ */
++static int dequeue_throttled_entity(struct cfs_rq *cfs_rq,
++		struct sched_entity *se)
++{
++	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
++
++	if (entity_is_task(se))
++		return 0;
++
++	cfs_rq_runtime_lock(gcfs_rq);
++	if (!cfs_rq_throttled(gcfs_rq) && gcfs_rq->nr_running) {
++		cfs_rq_runtime_unlock(gcfs_rq);
++		return 0;
++	}
++
++	__clear_buddies(cfs_rq, se);
++	account_entity_dequeue(cfs_rq, se);
++	cfs_rq->curr = NULL;
++	cfs_rq_runtime_unlock(gcfs_rq);
++	return 1;
++}
++
+ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
+ {
+ 	/*
+@@ -930,6 +1300,8 @@ static void put_prev_entity(struct cfs_r
+ 
+ 	check_spread(cfs_rq, prev);
+ 	if (prev->on_rq) {
++		if (dequeue_throttled_entity(cfs_rq, prev))
++			return;
+ 		update_stats_wait_start(cfs_rq, prev);
+ 		/* Put 'current' back into the tree. */
+ 		__enqueue_entity(cfs_rq, prev);
+@@ -1026,10 +1398,28 @@ static inline void hrtick_update(struct 
+ }
+ #endif
+ 
++static int enqueue_group_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
++		 int flags)
++{
++	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
++	int ret = 0;
++
++	cfs_rq_runtime_lock(gcfs_rq);
++	if (cfs_rq_throttled(gcfs_rq)) {
++		ret = 1;
++		goto out;
++	}
++	enqueue_entity_locked(cfs_rq, se, flags);
++out:
++	cfs_rq_runtime_unlock(gcfs_rq);
++	return ret;
++}
++
+ /*
+  * The enqueue_task method is called before nr_running is
+  * increased. Here we update the fair scheduling stats and
+  * then put the task into the rbtree:
++ * Don't enqueue a throttled entity further into the hierarchy.
+  */
+ static void
+ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
+@@ -1046,11 +1436,15 @@ enqueue_task_fair(struct rq *rq, struct 
+ 	for_each_sched_entity(se) {
+ 		if (se->on_rq)
+ 			break;
++
+ 		cfs_rq = cfs_rq_of(se);
+-		enqueue_entity(cfs_rq, se, flags);
++		if (entity_is_task(se))
++			enqueue_entity(cfs_rq, se, flags);
++		else
++			if (enqueue_group_entity(cfs_rq, se, flags))
++				break;
+ 		flags = ENQUEUE_WAKEUP;
+ 	}
+-
+ 	hrtick_update(rq);
+ }
+ 
+@@ -1070,6 +1464,17 @@ static void dequeue_task_fair(struct rq 
+ 		/* Don't dequeue parent if it has other entities besides us */
+ 		if (cfs_rq->load.weight)
+ 			break;
++
++		/*
++		 * If this cfs_rq is throttled, then it is already
++		 * dequeued.
++		 */
++		cfs_rq_runtime_lock(cfs_rq);
++		if (cfs_rq_throttled(cfs_rq)) {
++			cfs_rq_runtime_unlock(cfs_rq);
++			break;
++		}
++		cfs_rq_runtime_unlock(cfs_rq);
+ 		sleep = 1;
+ 	}
+ 
+@@ -1896,9 +2301,10 @@ load_balance_fair(struct rq *this_rq, in
+ 		u64 rem_load, moved_load;
+ 
+ 		/*
+-		 * empty group
++		 * empty group or throttled group
+ 		 */
+-		if (!busiest_cfs_rq->task_weight)
++		if (!busiest_cfs_rq->task_weight ||
++				cfs_rq_throttled(busiest_cfs_rq))
+ 			continue;
+ 
+ 		rem_load = (u64)rem_load_move * busiest_weight;
+@@ -1947,6 +2353,12 @@ move_one_task_fair(struct rq *this_rq, i
+ 
+ 	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
+ 		/*
++		 * Don't move task from a throttled cfs_rq
++		 */
++		if (cfs_rq_throttled(busy_cfs_rq))
++			continue;
++
++		/*
+ 		 * pass busy_cfs_rq argument into
+ 		 * load_balance_[start|next]_fair iterators
+ 		 */
+--- a/kernel/sched_rt.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/sched_rt.c	2011-06-10 13:03:02.000000000 +0200
+@@ -235,18 +235,6 @@ static int rt_se_boosted(struct sched_rt
+ 	return p->prio != p->normal_prio;
+ }
+ 
+-#ifdef CONFIG_SMP
+-static inline const struct cpumask *sched_rt_period_mask(void)
+-{
+-	return cpu_rq(smp_processor_id())->rd->span;
+-}
+-#else
+-static inline const struct cpumask *sched_rt_period_mask(void)
+-{
+-	return cpu_online_mask;
+-}
+-#endif
+-
+ static inline
+ struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+ {
+@@ -296,11 +284,6 @@ static inline int rt_rq_throttled(struct
+ 	return rt_rq->rt_throttled;
+ }
+ 
+-static inline const struct cpumask *sched_rt_period_mask(void)
+-{
+-	return cpu_online_mask;
+-}
+-
+ static inline
+ struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+ {
+@@ -373,7 +356,7 @@ next:
+ /*
+  * Ensure this RQ takes back all the runtime it lend to its neighbours.
+  */
+-static void __disable_runtime(struct rq *rq)
++static void disable_runtime_rt(struct rq *rq)
+ {
+ 	struct root_domain *rd = rq->rd;
+ 	struct rt_rq *rt_rq;
+@@ -450,16 +433,7 @@ balanced:
+ 	}
+ }
+ 
+-static void disable_runtime(struct rq *rq)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&rq->lock, flags);
+-	__disable_runtime(rq);
+-	spin_unlock_irqrestore(&rq->lock, flags);
+-}
+-
+-static void __enable_runtime(struct rq *rq)
++static void enable_runtime_rt(struct rq *rq)
+ {
+ 	struct rt_rq *rt_rq;
+ 
+@@ -482,15 +456,6 @@ static void __enable_runtime(struct rq *
+ 	}
+ }
+ 
+-static void enable_runtime(struct rq *rq)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&rq->lock, flags);
+-	__enable_runtime(rq);
+-	spin_unlock_irqrestore(&rq->lock, flags);
+-}
+-
+ static int balance_runtime(struct rt_rq *rt_rq)
+ {
+ 	int more = 0;
+@@ -518,7 +483,7 @@ static int do_sched_rt_period_timer(stru
+ 	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
+ 		return 1;
+ 
+-	span = sched_rt_period_mask();
++	span = sched_bw_period_mask();
+ 	for_each_cpu(i, span) {
+ 		int enqueue = 0;
+ 		struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
+@@ -1571,7 +1536,7 @@ static void rq_online_rt(struct rq *rq)
+ 	if (rq->rt.overloaded)
+ 		rt_set_overload(rq);
+ 
+-	__enable_runtime(rq);
++	enable_runtime_rt(rq);
+ 
+ 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
+ }
+@@ -1582,7 +1547,7 @@ static void rq_offline_rt(struct rq *rq)
+ 	if (rq->rt.overloaded)
+ 		rt_clear_overload(rq);
+ 
+-	__disable_runtime(rq);
++	disable_runtime_rt(rq);
+ 
+ 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
+ }
+--- a/kernel/signal.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/signal.c	2011-06-10 13:03:02.000000000 +0200
+@@ -27,6 +27,8 @@
+ #include <linux/freezer.h>
+ #include <linux/pid_namespace.h>
+ #include <linux/nsproxy.h>
++#include <linux/vs_context.h>
++#include <linux/vs_pid.h>
+ #include <trace/events/sched.h>
+ 
+ #include <asm/param.h>
+@@ -598,6 +600,14 @@ static int check_kill_permission(int sig
+ 	if (!valid_signal(sig))
+ 		return -EINVAL;
+ 
++	if ((info != SEND_SIG_NOINFO) &&
++		(is_si_special(info) || !SI_FROMUSER(info)))
++		goto skip;
++
++	vxdprintk(VXD_CBIT(misc, 7),
++		"check_kill_permission(%d,%p,%p[#%u,%u])",
++		sig, info, t, vx_task_xid(t), t->pid);
++
+ 	if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
+ 		return 0;
+ 
+@@ -627,6 +637,20 @@ static int check_kill_permission(int sig
+ 		}
+ 	}
+ 
++	error = -EPERM;
++	if (t->pid == 1 && current->xid)
++		return error;
++
++	error = -ESRCH;
++	/* FIXME: we shouldn't return ESRCH ever, to avoid
++		  loops, maybe ENOENT or EACCES? */
++	if (!vx_check(vx_task_xid(t), VS_WATCH_P | VS_IDENT)) {
++		vxdprintk(current->xid || VXD_CBIT(misc, 7),
++			"signal %d[%p] xid mismatch %p[#%u,%u] xid=#%u",
++			sig, info, t, vx_task_xid(t), t->pid, current->xid);
++		return error;
++	}
++skip:
+ 	return security_task_kill(t, info, sig, 0);
+ }
+ 
+@@ -1115,7 +1139,7 @@ int kill_pid_info(int sig, struct siginf
+ 	rcu_read_lock();
+ retry:
+ 	p = pid_task(pid, PIDTYPE_PID);
+-	if (p) {
++	if (p && vx_check(vx_task_xid(p), VS_IDENT)) {
+ 		error = group_send_sig_info(sig, info, p);
+ 		if (unlikely(error == -ESRCH))
+ 			/*
+@@ -1154,7 +1178,7 @@ int kill_pid_info_as_uid(int sig, struct
+ 
+ 	read_lock(&tasklist_lock);
+ 	p = pid_task(pid, PIDTYPE_PID);
+-	if (!p) {
++	if (!p || !vx_check(vx_task_xid(p), VS_IDENT)) {
+ 		ret = -ESRCH;
+ 		goto out_unlock;
+ 	}
+@@ -1208,8 +1232,10 @@ static int kill_something_info(int sig, 
+ 		struct task_struct * p;
+ 
+ 		for_each_process(p) {
+-			if (task_pid_vnr(p) > 1 &&
+-					!same_thread_group(p, current)) {
++			if (vx_check(vx_task_xid(p), VS_ADMIN|VS_IDENT) &&
++				task_pid_vnr(p) > 1 &&
++				!same_thread_group(p, current) &&
++				!vx_current_initpid(p->pid)) {
+ 				int err = group_send_sig_info(sig, info, p);
+ 				++count;
+ 				if (err != -EPERM)
+@@ -1874,6 +1900,11 @@ relock:
+ 				!sig_kernel_only(signr))
+ 			continue;
+ 
++		/* virtual init is protected against user signals */
++		if ((info->si_code == SI_USER) &&
++			vx_current_initpid(current->pid))
++			continue;
++
+ 		if (sig_kernel_stop(signr)) {
+ 			/*
+ 			 * The default action is to stop all threads in
+--- a/kernel/softirq.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/softirq.c	2011-06-10 13:03:02.000000000 +0200
+@@ -24,6 +24,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/tick.h>
++#include <linux/vs_context.h>
+ 
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+--- a/kernel/sys.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/sys.c	2011-06-10 13:03:02.000000000 +0200
+@@ -41,6 +41,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/kprobes.h>
+ #include <linux/user_namespace.h>
++#include <linux/vs_pid.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -130,7 +131,10 @@ static int set_one_prio(struct task_stru
+ 		goto out;
+ 	}
+ 	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
+-		error = -EACCES;
++		if (vx_flags(VXF_IGNEG_NICE, 0))
++			error = 0;
++		else
++			error = -EACCES;
+ 		goto out;
+ 	}
+ 	no_nice = security_task_setnice(p, niceval);
+@@ -179,6 +183,8 @@ SYSCALL_DEFINE3(setpriority, int, which,
+ 			else
+ 				pgrp = task_pgrp(current);
+ 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
++				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
++					continue;
+ 				error = set_one_prio(p, niceval, error);
+ 			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+ 			break;
+@@ -240,6 +246,8 @@ SYSCALL_DEFINE2(getpriority, int, which,
+ 			else
+ 				pgrp = task_pgrp(current);
+ 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
++				if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
++					continue;
+ 				niceval = 20 - task_nice(p);
+ 				if (niceval > retval)
+ 					retval = niceval;
+@@ -349,6 +357,9 @@ void kernel_power_off(void)
+ 	machine_power_off();
+ }
+ EXPORT_SYMBOL_GPL(kernel_power_off);
++
++long vs_reboot(unsigned int, void __user *);
++
+ /*
+  * Reboot system call: for obvious reasons only root may call it,
+  * and even root needs to set up some magic numbers in the registers
+@@ -381,6 +392,9 @@ SYSCALL_DEFINE4(reboot, int, magic1, int
+ 	if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
+ 		cmd = LINUX_REBOOT_CMD_HALT;
+ 
++	if (!vx_check(0, VS_ADMIN|VS_WATCH))
++		return vs_reboot(cmd, arg);
++
+ 	lock_kernel();
+ 	switch (cmd) {
+ 	case LINUX_REBOOT_CMD_RESTART:
+@@ -1129,7 +1143,7 @@ SYSCALL_DEFINE2(sethostname, char __user
+ 	int errno;
+ 	char tmp[__NEW_UTS_LEN];
+ 
+-	if (!capable(CAP_SYS_ADMIN))
++	if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
+ 		return -EPERM;
+ 	if (len < 0 || len > __NEW_UTS_LEN)
+ 		return -EINVAL;
+@@ -1178,7 +1192,7 @@ SYSCALL_DEFINE2(setdomainname, char __us
+ 	int errno;
+ 	char tmp[__NEW_UTS_LEN];
+ 
+-	if (!capable(CAP_SYS_ADMIN))
++	if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
+ 		return -EPERM;
+ 	if (len < 0 || len > __NEW_UTS_LEN)
+ 		return -EINVAL;
+@@ -1247,7 +1261,7 @@ SYSCALL_DEFINE2(setrlimit, unsigned int,
+ 		return -EINVAL;
+ 	old_rlim = current->signal->rlim + resource;
+ 	if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
+-	    !capable(CAP_SYS_RESOURCE))
++	    !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
+ 		return -EPERM;
+ 	if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
+ 		return -EPERM;
+--- a/kernel/sysctl.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/sysctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -124,6 +124,7 @@ static int ngroups_max = NGROUPS_MAX;
+ extern char modprobe_path[];
+ extern int modules_disabled;
+ #endif
++extern char vshelper_path[];
+ #ifdef CONFIG_CHR_DEV_SG
+ extern int sg_big_buff;
+ #endif
+@@ -593,6 +594,15 @@ static struct ctl_table kern_table[] = {
+ 		.strategy	= &sysctl_string,
+ 	},
+ #endif
++	{
++		.ctl_name	= KERN_VSHELPER,
++		.procname	= "vshelper",
++		.data		= &vshelper_path,
++		.maxlen		= 256,
++		.mode		= 0644,
++		.proc_handler	= &proc_dostring,
++		.strategy	= &sysctl_string,
++	},
+ #ifdef CONFIG_CHR_DEV_SG
+ 	{
+ 		.ctl_name	= KERN_SG_BIG_BUFF,
+--- a/kernel/sysctl_check.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/sysctl_check.c	2011-06-10 13:03:02.000000000 +0200
+@@ -39,6 +39,7 @@ static const struct trans_ctl_table tran
+ 
+ 	{ KERN_PANIC,			"panic" },
+ 	{ KERN_REALROOTDEV,		"real-root-dev" },
++	{ KERN_VSHELPER,		"vshelper", },
+ 
+ 	{ KERN_SPARC_REBOOT,		"reboot-cmd" },
+ 	{ KERN_CTLALTDEL,		"ctrl-alt-del" },
+@@ -1218,6 +1219,22 @@ static const struct trans_ctl_table tran
+ 	{}
+ };
+ 
++static struct trans_ctl_table trans_vserver_table[] = {
++	{ 1,	"debug_switch" },
++	{ 2,	"debug_xid" },
++	{ 3,	"debug_nid" },
++	{ 4,	"debug_tag" },
++	{ 5,	"debug_net" },
++	{ 6,	"debug_limit" },
++	{ 7,	"debug_cres" },
++	{ 8,	"debug_dlim" },
++	{ 9,	"debug_quota" },
++	{ 10,	"debug_cvirt" },
++	{ 11,	"debug_space" },
++	{ 12,	"debug_misc" },
++	{}
++};
++
+ static const struct trans_ctl_table trans_root_table[] = {
+ 	{ CTL_KERN,	"kernel",	trans_kern_table },
+ 	{ CTL_VM,	"vm",		trans_vm_table },
+@@ -1234,6 +1251,7 @@ static const struct trans_ctl_table tran
+ 	{ CTL_SUNRPC,	"sunrpc",	trans_sunrpc_table },
+ 	{ CTL_PM,	"pm",		trans_pm_table },
+ 	{ CTL_FRV,	"frv",		trans_frv_table },
++	{ CTL_VSERVER,	"vserver",	trans_vserver_table },
+ 	{}
+ };
+ 
+--- a/kernel/time.c	2009-12-03 20:02:58.000000000 +0100
++++ a/kernel/time.c	2011-06-10 13:03:02.000000000 +0200
+@@ -63,6 +63,7 @@ EXPORT_SYMBOL(sys_tz);
+ SYSCALL_DEFINE1(time, time_t __user *, tloc)
+ {
+ 	time_t i = get_seconds();
++/*	FIXME: do_gettimeofday(&tv) -> vx_gettimeofday(&tv) */
+ 
+ 	if (tloc) {
+ 		if (put_user(i,tloc))
+@@ -93,7 +94,7 @@ SYSCALL_DEFINE1(stime, time_t __user *, 
+ 	if (err)
+ 		return err;
+ 
+-	do_settimeofday(&tv);
++	vx_settimeofday(&tv);
+ 	return 0;
+ }
+ 
+@@ -104,7 +105,7 @@ SYSCALL_DEFINE2(gettimeofday, struct tim
+ {
+ 	if (likely(tv != NULL)) {
+ 		struct timeval ktv;
+-		do_gettimeofday(&ktv);
++		vx_gettimeofday(&ktv);
+ 		if (copy_to_user(tv, &ktv, sizeof(ktv)))
+ 			return -EFAULT;
+ 	}
+@@ -179,7 +180,7 @@ int do_sys_settimeofday(struct timespec 
+ 		/* SMP safe, again the code in arch/foo/time.c should
+ 		 * globally block out interrupts when it runs.
+ 		 */
+-		return do_settimeofday(tv);
++		return vx_settimeofday(tv);
+ 	}
+ 	return 0;
+ }
+@@ -311,7 +312,7 @@ void getnstimeofday(struct timespec *tv)
+ {
+ 	struct timeval x;
+ 
+-	do_gettimeofday(&x);
++	vx_gettimeofday(&x);
+ 	tv->tv_sec = x.tv_sec;
+ 	tv->tv_nsec = x.tv_usec * NSEC_PER_USEC;
+ }
+--- a/kernel/timer.c	2011-05-29 23:42:29.000000000 +0200
++++ a/kernel/timer.c	2011-06-10 13:03:02.000000000 +0200
+@@ -39,6 +39,10 @@
+ #include <linux/kallsyms.h>
+ #include <linux/perf_event.h>
+ #include <linux/sched.h>
++#include <linux/vs_base.h>
++#include <linux/vs_cvirt.h>
++#include <linux/vs_pid.h>
++#include <linux/vserver/sched.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+@@ -1261,12 +1265,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, sec
+ 
+ #endif
+ 
+-#ifndef __alpha__
+-
+-/*
+- * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this
+- * should be moved into arch/i386 instead?
+- */
+ 
+ /**
+  * sys_getpid - return the thread group id of the current process
+@@ -1295,10 +1293,23 @@ SYSCALL_DEFINE0(getppid)
+ 	rcu_read_lock();
+ 	pid = task_tgid_vnr(current->real_parent);
+ 	rcu_read_unlock();
++	return vx_map_pid(pid);
++}
+ 
+-	return pid;
++#ifdef __alpha__
++
++/*
++ * The Alpha uses getxpid, getxuid, and getxgid instead.
++ */
++
++asmlinkage long do_getxpid(long *ppid)
++{
++	*ppid = sys_getppid();
++	return sys_getpid();
+ }
+ 
++#else /* _alpha_ */
++
+ SYSCALL_DEFINE0(getuid)
+ {
+ 	/* Only we change this so SMP safe */
+--- a/kernel/user_namespace.c	2009-03-24 14:22:45.000000000 +0100
++++ a/kernel/user_namespace.c	2011-06-10 13:03:02.000000000 +0200
+@@ -10,6 +10,7 @@
+ #include <linux/slab.h>
+ #include <linux/user_namespace.h>
+ #include <linux/cred.h>
++#include <linux/vserver/global.h>
+ 
+ /*
+  * Create a new user namespace, deriving the creator from the user in the
+@@ -30,6 +31,7 @@ int create_user_ns(struct cred *new)
+ 		return -ENOMEM;
+ 
+ 	kref_init(&ns->kref);
++	atomic_inc(&vs_global_user_ns);
+ 
+ 	for (n = 0; n < UIDHASH_SZ; ++n)
+ 		INIT_HLIST_HEAD(ns->uidhash_table + n);
+@@ -78,6 +80,8 @@ void free_user_ns(struct kref *kref)
+ 	struct user_namespace *ns =
+ 		container_of(kref, struct user_namespace, kref);
+ 
++	/* FIXME: maybe move into destroyer? */
++	atomic_dec(&vs_global_user_ns);
+ 	INIT_WORK(&ns->destroyer, free_user_ns_work);
+ 	schedule_work(&ns->destroyer);
+ }
+--- a/kernel/utsname.c	2009-09-10 15:26:28.000000000 +0200
++++ a/kernel/utsname.c	2011-06-10 13:03:02.000000000 +0200
+@@ -14,14 +14,17 @@
+ #include <linux/utsname.h>
+ #include <linux/err.h>
+ #include <linux/slab.h>
++#include <linux/vserver/global.h>
+ 
+ static struct uts_namespace *create_uts_ns(void)
+ {
+ 	struct uts_namespace *uts_ns;
+ 
+ 	uts_ns = kmalloc(sizeof(struct uts_namespace), GFP_KERNEL);
+-	if (uts_ns)
++	if (uts_ns) {
+ 		kref_init(&uts_ns->kref);
++		atomic_inc(&vs_global_uts_ns);
++	}
+ 	return uts_ns;
+ }
+ 
+@@ -71,5 +74,6 @@ void free_uts_ns(struct kref *kref)
+ 	struct uts_namespace *ns;
+ 
+ 	ns = container_of(kref, struct uts_namespace, kref);
++	atomic_dec(&vs_global_uts_ns);
+ 	kfree(ns);
+ }
+--- a/kernel/vserver/Kconfig	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,251 @@
++#
++# Linux VServer configuration
++#
++
++menu "Linux VServer"
++
++config	VSERVER_AUTO_LBACK
++	bool    "Automatically Assign Loopback IP"
++	default y
++	help
++	  Automatically assign a guest specific loopback
++	  IP and add it to the kernel network stack on
++	  startup.
++
++config	VSERVER_AUTO_SINGLE
++	bool	"Automatic Single IP Special Casing"
++	depends on EXPERIMENTAL
++	default y
++	help
++	  This allows network contexts with a single IP to
++	  automatically remap 0.0.0.0 bindings to that IP,
++	  avoiding further network checks and improving
++	  performance.
++
++	  (note: such guests do not allow to change the ip
++	   on the fly and do not show loopback addresses)
++
++config	VSERVER_COWBL
++	bool	"Enable COW Immutable Link Breaking"
++	default y
++	help
++	  This enables the COW (Copy-On-Write) link break code.
++	  It allows you to treat unified files like normal files
++	  when writing to them (which will implicitely break the
++	  link and create a copy of the unified file)
++
++config	VSERVER_VTIME
++	bool	"Enable Virtualized Guest Time"
++	depends on EXPERIMENTAL
++	default n
++	help
++	  This enables per guest time offsets to allow for
++	  adjusting the system clock individually per guest.
++	  this adds some overhead to the time functions and
++	  therefore should not be enabled without good reason.
++
++config	VSERVER_DEVICE
++	bool	"Enable Guest Device Mapping"
++	depends on EXPERIMENTAL
++	default n
++	help
++	  This enables generic device remapping.
++
++config	VSERVER_PROC_SECURE
++	bool	"Enable Proc Security"
++	depends on PROC_FS
++	default y
++	help
++	  This configures ProcFS security to initially hide
++	  non-process entries for all contexts except the main and
++	  spectator context (i.e. for all guests), which is a secure
++	  default.
++
++	  (note: on 1.2x the entries were visible by default)
++
++config	VSERVER_HARDCPU
++	bool	"Enable Hard CPU Limits"
++	default y
++	help
++	  Activate the Hard CPU Limits
++
++	  This will compile in code that allows the Token Bucket
++	  Scheduler to put processes on hold when a context's
++	  tokens are depleted (provided that its per-context
++	  sched_hard flag is set).
++
++	  Processes belonging to that context will not be able
++	  to consume CPU resources again until a per-context
++	  configured minimum of tokens has been reached.
++
++config	VSERVER_IDLETIME
++	bool	"Avoid idle CPUs by skipping Time"
++	depends on VSERVER_HARDCPU
++	default y
++	help
++	  This option allows the scheduler to artificially
++	  advance time (per cpu) when otherwise the idle
++	  task would be scheduled, thus keeping the cpu
++	  busy and sharing the available resources among
++	  certain contexts.
++
++config	VSERVER_IDLELIMIT
++	bool	"Limit the IDLE task"
++	depends on VSERVER_HARDCPU
++	default n
++	help
++	  Limit the idle slices, so the the next context
++	  will be scheduled as soon as possible.
++
++	  This might improve interactivity and latency, but
++	  will also marginally increase scheduling overhead.
++
++choice
++	prompt	"Persistent Inode Tagging"
++	default	TAGGING_ID24
++	help
++	  This adds persistent context information to filesystems
++	  mounted with the tagxid option. Tagging is a requirement
++	  for per-context disk limits and per-context quota.
++
++
++config	TAGGING_NONE
++	bool	"Disabled"
++	help
++	  do not store per-context information in inodes.
++
++config	TAGGING_UID16
++	bool	"UID16/GID32"
++	help
++	  reduces UID to 16 bit, but leaves GID at 32 bit.
++
++config	TAGGING_GID16
++	bool	"UID32/GID16"
++	help
++	  reduces GID to 16 bit, but leaves UID at 32 bit.
++
++config	TAGGING_ID24
++	bool	"UID24/GID24"
++	help
++	  uses the upper 8bit from UID and GID for XID tagging
++	  which leaves 24bit for UID/GID each, which should be
++	  more than sufficient for normal use.
++
++config	TAGGING_INTERN
++	bool	"UID32/GID32"
++	help
++	  this uses otherwise reserved inode fields in the on
++	  disk representation, which limits the use to a few
++	  filesystems (currently ext2 and ext3)
++
++endchoice
++
++config	TAG_NFSD
++	bool	"Tag NFSD User Auth and Files"
++	default n
++	help
++	  Enable this if you do want the in-kernel NFS
++	  Server to use the tagging specified above.
++	  (will require patched clients too)
++
++config	VSERVER_PRIVACY
++	bool	"Honor Privacy Aspects of Guests"
++	default n
++	help
++	  When enabled, most context checks will disallow
++	  access to structures assigned to a specific context,
++	  like ptys or loop devices.
++
++config	VSERVER_CONTEXTS
++	int	"Maximum number of Contexts (1-65533)"	if EMBEDDED
++	range 1 65533
++	default "768"	if 64BIT
++	default "256"
++	help
++	  This setting will optimize certain data structures
++	  and memory allocations according to the expected
++	  maximum.
++
++	  note: this is not a strict upper limit.
++
++config	VSERVER_WARN
++	bool	"VServer Warnings"
++	default y
++	help
++	  This enables various runtime warnings, which will
++	  notify about potential manipulation attempts or
++	  resource shortage. It is generally considered to
++	  be a good idea to have that enabled.
++
++config	VSERVER_DEBUG
++	bool	"VServer Debugging Code"
++	default n
++	help
++	  Set this to yes if you want to be able to activate
++	  debugging output at runtime. It adds a very small
++	  overhead to all vserver related functions and
++	  increases the kernel size by about 20k.
++
++config	VSERVER_HISTORY
++	bool	"VServer History Tracing"
++	depends on VSERVER_DEBUG
++	default n
++	help
++	  Set this to yes if you want to record the history of
++	  linux-vserver activities, so they can be replayed in
++	  the event of a kernel panic or oops.
++
++config	VSERVER_HISTORY_SIZE
++	int	"Per-CPU History Size (32-65536)"
++	depends on VSERVER_HISTORY
++	range 32 65536
++	default 64
++	help
++	  This allows you to specify the number of entries in
++	  the per-CPU history buffer.
++
++config	VSERVER_MONITOR
++	bool	"VServer Scheduling Monitor"
++	depends on VSERVER_DISABLED
++	default n
++	help
++	  Set this to yes if you want to record the scheduling
++	  decisions, so that they can be relayed to userspace
++	  for detailed analysis.
++
++config	VSERVER_MONITOR_SIZE
++	int	"Per-CPU Monitor Queue Size (32-65536)"
++	depends on VSERVER_MONITOR
++	range 32 65536
++	default 1024
++	help
++	  This allows you to specify the number of entries in
++	  the per-CPU scheduling monitor buffer.
++
++config	VSERVER_MONITOR_SYNC
++	int	"Per-CPU Monitor Sync Interval (0-65536)"
++	depends on VSERVER_MONITOR
++	range 0 65536
++	default 256
++	help
++	  This allows you to specify the interval in ticks
++	  when a time sync entry is inserted.
++
++endmenu
++
++
++config	VSERVER
++	bool
++	default y
++	select NAMESPACES
++	select UTS_NS
++	select IPC_NS
++	select USER_NS
++	select SYSVIPC
++
++config	VSERVER_SECURITY
++	bool
++	depends on SECURITY
++	default y
++	select SECURITY_CAPABILITIES
++
+--- a/kernel/vserver/Makefile	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/Makefile	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,18 @@
++#
++# Makefile for the Linux vserver routines.
++#
++
++
++obj-y		+= vserver.o
++
++vserver-y	:= switch.o context.o space.o sched.o network.o inode.o \
++		   limit.o cvirt.o cacct.o signal.o helper.o init.o \
++		   dlimit.o tag.o
++
++vserver-$(CONFIG_INET) += inet.o
++vserver-$(CONFIG_PROC_FS) += proc.o
++vserver-$(CONFIG_VSERVER_DEBUG) += sysctl.o debug.o
++vserver-$(CONFIG_VSERVER_HISTORY) += history.o
++vserver-$(CONFIG_VSERVER_MONITOR) += monitor.o
++vserver-$(CONFIG_VSERVER_DEVICE) += device.o
++
+--- a/kernel/vserver/cacct.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/cacct.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,42 @@
++/*
++ *  linux/kernel/vserver/cacct.c
++ *
++ *  Virtual Server: Context Accounting
++ *
++ *  Copyright (C) 2006-2007 Herbert Pötzl
++ *
++ *  V0.01  added accounting stats
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/vs_context.h>
++#include <linux/vserver/cacct_cmd.h>
++#include <linux/vserver/cacct_int.h>
++
++#include <asm/errno.h>
++#include <asm/uaccess.h>
++
++
++int vc_sock_stat(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_sock_stat_v0 vc_data;
++	int j, field;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	field = vc_data.field;
++	if ((field < 0) || (field >= VXA_SOCK_SIZE))
++		return -EINVAL;
++
++	for (j = 0; j < 3; j++) {
++		vc_data.count[j] = vx_sock_count(&vxi->cacct, field, j);
++		vc_data.total[j] = vx_sock_total(&vxi->cacct, field, j);
++	}
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
+--- a/kernel/vserver/cacct_init.h	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/cacct_init.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,25 @@
++
++
++static inline void vx_info_init_cacct(struct _vx_cacct *cacct)
++{
++	int i, j;
++
++
++	for (i = 0; i < VXA_SOCK_SIZE; i++) {
++		for (j = 0; j < 3; j++) {
++			atomic_long_set(&cacct->sock[i][j].count, 0);
++			atomic_long_set(&cacct->sock[i][j].total, 0);
++		}
++	}
++	for (i = 0; i < 8; i++)
++		atomic_set(&cacct->slab[i], 0);
++	for (i = 0; i < 5; i++)
++		for (j = 0; j < 4; j++)
++			atomic_set(&cacct->page[i][j], 0);
++}
++
++static inline void vx_info_exit_cacct(struct _vx_cacct *cacct)
++{
++	return;
++}
++
+--- a/kernel/vserver/cacct_proc.h	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/cacct_proc.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,53 @@
++#ifndef _VX_CACCT_PROC_H
++#define _VX_CACCT_PROC_H
++
++#include <linux/vserver/cacct_int.h>
++
++
++#define VX_SOCKA_TOP	\
++	"Type\t    recv #/bytes\t\t   send #/bytes\t\t    fail #/bytes\n"
++
++static inline int vx_info_proc_cacct(struct _vx_cacct *cacct, char *buffer)
++{
++	int i, j, length = 0;
++	static char *type[VXA_SOCK_SIZE] = {
++		"UNSPEC", "UNIX", "INET", "INET6", "PACKET", "OTHER"
++	};
++
++	length += sprintf(buffer + length, VX_SOCKA_TOP);
++	for (i = 0; i < VXA_SOCK_SIZE; i++) {
++		length += sprintf(buffer + length, "%s:", type[i]);
++		for (j = 0; j < 3; j++) {
++			length += sprintf(buffer + length,
++				"\t%10lu/%-10lu",
++				vx_sock_count(cacct, i, j),
++				vx_sock_total(cacct, i, j));
++		}
++		buffer[length++] = '\n';
++	}
++
++	length += sprintf(buffer + length, "\n");
++	length += sprintf(buffer + length,
++		"slab:\t %8u %8u %8u %8u\n",
++		atomic_read(&cacct->slab[1]),
++		atomic_read(&cacct->slab[4]),
++		atomic_read(&cacct->slab[0]),
++		atomic_read(&cacct->slab[2]));
++
++	length += sprintf(buffer + length, "\n");
++	for (i = 0; i < 5; i++) {
++		length += sprintf(buffer + length,
++			"page[%d]: %8u %8u %8u %8u\t %8u %8u %8u %8u\n", i,
++			atomic_read(&cacct->page[i][0]),
++			atomic_read(&cacct->page[i][1]),
++			atomic_read(&cacct->page[i][2]),
++			atomic_read(&cacct->page[i][3]),
++			atomic_read(&cacct->page[i][4]),
++			atomic_read(&cacct->page[i][5]),
++			atomic_read(&cacct->page[i][6]),
++			atomic_read(&cacct->page[i][7]));
++	}
++	return length;
++}
++
++#endif	/* _VX_CACCT_PROC_H */
+--- a/kernel/vserver/context.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/context.c	2011-06-10 14:02:35.000000000 +0200
+@@ -0,0 +1,1058 @@
++/*
++ *  linux/kernel/vserver/context.c
++ *
++ *  Virtual Server: Context Support
++ *
++ *  Copyright (C) 2003-2007  Herbert Pötzl
++ *
++ *  V0.01  context helper
++ *  V0.02  vx_ctx_kill syscall command
++ *  V0.03  replaced context_info calls
++ *  V0.04  redesign of struct (de)alloc
++ *  V0.05  rlimit basic implementation
++ *  V0.06  task_xid and info commands
++ *  V0.07  context flags and caps
++ *  V0.08  switch to RCU based hash
++ *  V0.09  revert to non RCU for now
++ *  V0.10  and back to working RCU hash
++ *  V0.11  and back to locking again
++ *  V0.12  referenced context store
++ *  V0.13  separate per cpu data
++ *  V0.14  changed vcmds to vxi arg
++ *  V0.15  added context stat
++ *  V0.16  have __create claim() the vxi
++ *  V0.17  removed older and legacy stuff
++ *
++ */
++
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/security.h>
++#include <linux/pid_namespace.h>
++
++#include <linux/vserver/context.h>
++#include <linux/vserver/network.h>
++#include <linux/vserver/debug.h>
++#include <linux/vserver/limit.h>
++#include <linux/vserver/limit_int.h>
++#include <linux/vserver/space.h>
++#include <linux/init_task.h>
++#include <linux/fs_struct.h>
++
++#include <linux/vs_context.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_pid.h>
++#include <linux/vserver/context_cmd.h>
++
++#include "cvirt_init.h"
++#include "cacct_init.h"
++#include "limit_init.h"
++#include "sched_init.h"
++
++
++atomic_t vx_global_ctotal	= ATOMIC_INIT(0);
++atomic_t vx_global_cactive	= ATOMIC_INIT(0);
++
++
++/*	now inactive context structures */
++
++static struct hlist_head vx_info_inactive = HLIST_HEAD_INIT;
++
++static spinlock_t vx_info_inactive_lock = SPIN_LOCK_UNLOCKED;
++
++
++/*	__alloc_vx_info()
++
++	* allocate an initialized vx_info struct
++	* doesn't make it visible (hash)			*/
++
++static struct vx_info *__alloc_vx_info(xid_t xid)
++{
++	struct vx_info *new = NULL;
++	int cpu, index;
++
++	vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
++
++	/* would this benefit from a slab cache? */
++	new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
++	if (!new)
++		return 0;
++
++	memset(new, 0, sizeof(struct vx_info));
++#ifdef CONFIG_SMP
++	new->ptr_pc = alloc_percpu(struct _vx_info_pc);
++	if (!new->ptr_pc)
++		goto error;
++#endif
++	new->vx_id = xid;
++	INIT_HLIST_NODE(&new->vx_hlist);
++	atomic_set(&new->vx_usecnt, 0);
++	atomic_set(&new->vx_tasks, 0);
++	new->vx_parent = NULL;
++	new->vx_state = 0;
++	init_waitqueue_head(&new->vx_wait);
++
++	/* prepare reaper */
++	get_task_struct(init_pid_ns.child_reaper);
++	new->vx_reaper = init_pid_ns.child_reaper;
++	new->vx_badness_bias = 0;
++
++	/* rest of init goes here */
++	vx_info_init_limit(&new->limit);
++	vx_info_init_sched(&new->sched);
++	vx_info_init_cvirt(&new->cvirt);
++	vx_info_init_cacct(&new->cacct);
++
++	/* per cpu data structures */
++	for_each_possible_cpu(cpu) {
++		vx_info_init_sched_pc(
++			&vx_per_cpu(new, sched_pc, cpu), cpu);
++		vx_info_init_cvirt_pc(
++			&vx_per_cpu(new, cvirt_pc, cpu), cpu);
++	}
++
++	new->vx_flags = VXF_INIT_SET;
++	cap_set_init_eff(new->vx_bcaps);
++	new->vx_ccaps = 0;
++	new->vx_umask = 0;
++
++	new->reboot_cmd = 0;
++	new->exit_code = 0;
++
++	// preconfig fs entries
++	for (index = 0; index < VX_SPACES; index++) {
++		write_lock(&init_fs.lock);
++		init_fs.users++;
++		write_unlock(&init_fs.lock);
++		new->vx_fs[index] = &init_fs;
++	}
++
++	vxdprintk(VXD_CBIT(xid, 0),
++		"alloc_vx_info(%d) = %p", xid, new);
++	vxh_alloc_vx_info(new);
++	atomic_inc(&vx_global_ctotal);
++	return new;
++#ifdef CONFIG_SMP
++error:
++	kfree(new);
++	return 0;
++#endif
++}
++
++/*	__dealloc_vx_info()
++
++	* final disposal of vx_info				*/
++
++static void __dealloc_vx_info(struct vx_info *vxi)
++{
++#ifdef	CONFIG_VSERVER_WARN
++	struct vx_info_save vxis;
++	int cpu;
++#endif
++	vxdprintk(VXD_CBIT(xid, 0),
++		"dealloc_vx_info(%p)", vxi);
++	vxh_dealloc_vx_info(vxi);
++
++#ifdef	CONFIG_VSERVER_WARN
++	enter_vx_info(vxi, &vxis);
++	vx_info_exit_limit(&vxi->limit);
++	vx_info_exit_sched(&vxi->sched);
++	vx_info_exit_cvirt(&vxi->cvirt);
++	vx_info_exit_cacct(&vxi->cacct);
++
++	for_each_possible_cpu(cpu) {
++		vx_info_exit_sched_pc(
++			&vx_per_cpu(vxi, sched_pc, cpu), cpu);
++		vx_info_exit_cvirt_pc(
++			&vx_per_cpu(vxi, cvirt_pc, cpu), cpu);
++	}
++	leave_vx_info(&vxis);
++#endif
++
++	vxi->vx_id = -1;
++	vxi->vx_state |= VXS_RELEASED;
++
++#ifdef CONFIG_SMP
++	free_percpu(vxi->ptr_pc);
++#endif
++	kfree(vxi);
++	atomic_dec(&vx_global_ctotal);
++}
++
++static void __shutdown_vx_info(struct vx_info *vxi)
++{
++	struct nsproxy *nsproxy;
++	struct fs_struct *fs;
++	int index, kill;
++
++	might_sleep();
++
++	vxi->vx_state |= VXS_SHUTDOWN;
++	vs_state_change(vxi, VSC_SHUTDOWN);
++
++	for (index = 0; index < VX_SPACES; index++) {
++		nsproxy = xchg(&vxi->vx_nsproxy[index], NULL);
++		if (nsproxy)
++			put_nsproxy(nsproxy);
++
++		fs = xchg(&vxi->vx_fs[index], NULL);
++		write_lock(&fs->lock);
++		kill = !--fs->users;
++		write_unlock(&fs->lock);
++		if (kill)
++			free_fs_struct(fs);
++	}
++}
++
++/* exported stuff */
++
++void free_vx_info(struct vx_info *vxi)
++{
++	unsigned long flags;
++	unsigned index;
++
++	/* check for reference counts first */
++	BUG_ON(atomic_read(&vxi->vx_usecnt));
++	BUG_ON(atomic_read(&vxi->vx_tasks));
++
++	/* context must not be hashed */
++	BUG_ON(vx_info_state(vxi, VXS_HASHED));
++
++	/* context shutdown is mandatory */
++	BUG_ON(!vx_info_state(vxi, VXS_SHUTDOWN));
++
++	/* nsproxy and fs check */
++	for (index = 0; index < VX_SPACES; index++) {
++		BUG_ON(vxi->vx_nsproxy[index]);
++		BUG_ON(vxi->vx_fs[index]);
++	}
++
++	spin_lock_irqsave(&vx_info_inactive_lock, flags);
++	hlist_del(&vxi->vx_hlist);
++	spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
++
++	__dealloc_vx_info(vxi);
++}
++
++
++/*	hash table for vx_info hash */
++
++#define VX_HASH_SIZE	13
++
++static struct hlist_head vx_info_hash[VX_HASH_SIZE] =
++	{ [0 ... VX_HASH_SIZE-1] = HLIST_HEAD_INIT };
++
++static spinlock_t vx_info_hash_lock = SPIN_LOCK_UNLOCKED;
++
++
++static inline unsigned int __hashval(xid_t xid)
++{
++	return (xid % VX_HASH_SIZE);
++}
++
++
++
++/*	__hash_vx_info()
++
++	* add the vxi to the global hash table
++	* requires the hash_lock to be held			*/
++
++static inline void __hash_vx_info(struct vx_info *vxi)
++{
++	struct hlist_head *head;
++
++	vxd_assert_lock(&vx_info_hash_lock);
++	vxdprintk(VXD_CBIT(xid, 4),
++		"__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
++	vxh_hash_vx_info(vxi);
++
++	/* context must not be hashed */
++	BUG_ON(vx_info_state(vxi, VXS_HASHED));
++
++	vxi->vx_state |= VXS_HASHED;
++	head = &vx_info_hash[__hashval(vxi->vx_id)];
++	hlist_add_head(&vxi->vx_hlist, head);
++	atomic_inc(&vx_global_cactive);
++}
++
++/*	__unhash_vx_info()
++
++	* remove the vxi from the global hash table
++	* requires the hash_lock to be held			*/
++
++static inline void __unhash_vx_info(struct vx_info *vxi)
++{
++	unsigned long flags;
++
++	vxd_assert_lock(&vx_info_hash_lock);
++	vxdprintk(VXD_CBIT(xid, 4),
++		"__unhash_vx_info: %p[#%d.%d.%d]", vxi, vxi->vx_id,
++		atomic_read(&vxi->vx_usecnt), atomic_read(&vxi->vx_tasks));
++	vxh_unhash_vx_info(vxi);
++
++	/* context must be hashed */
++	BUG_ON(!vx_info_state(vxi, VXS_HASHED));
++	/* but without tasks */
++	BUG_ON(atomic_read(&vxi->vx_tasks));
++
++	vxi->vx_state &= ~VXS_HASHED;
++	hlist_del_init(&vxi->vx_hlist);
++	spin_lock_irqsave(&vx_info_inactive_lock, flags);
++	hlist_add_head(&vxi->vx_hlist, &vx_info_inactive);
++	spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
++	atomic_dec(&vx_global_cactive);
++}
++
++
++/*	__lookup_vx_info()
++
++	* requires the hash_lock to be held
++	* doesn't increment the vx_refcnt			*/
++
++static inline struct vx_info *__lookup_vx_info(xid_t xid)
++{
++	struct hlist_head *head = &vx_info_hash[__hashval(xid)];
++	struct hlist_node *pos;
++	struct vx_info *vxi;
++
++	vxd_assert_lock(&vx_info_hash_lock);
++	hlist_for_each(pos, head) {
++		vxi = hlist_entry(pos, struct vx_info, vx_hlist);
++
++		if (vxi->vx_id == xid)
++			goto found;
++	}
++	vxi = NULL;
++found:
++	vxdprintk(VXD_CBIT(xid, 0),
++		"__lookup_vx_info(#%u): %p[#%u]",
++		xid, vxi, vxi ? vxi->vx_id : 0);
++	vxh_lookup_vx_info(vxi, xid);
++	return vxi;
++}
++
++
++/*	__create_vx_info()
++
++	* create the requested context
++	* get(), claim() and hash it				*/
++
++static struct vx_info *__create_vx_info(int id)
++{
++	struct vx_info *new, *vxi = NULL;
++
++	vxdprintk(VXD_CBIT(xid, 1), "create_vx_info(%d)*", id);
++
++	if (!(new = __alloc_vx_info(id)))
++		return ERR_PTR(-ENOMEM);
++
++	/* required to make dynamic xids unique */
++	spin_lock(&vx_info_hash_lock);
++
++	/* static context requested */
++	if ((vxi = __lookup_vx_info(id))) {
++		vxdprintk(VXD_CBIT(xid, 0),
++			"create_vx_info(%d) = %p (already there)", id, vxi);
++		if (vx_info_flags(vxi, VXF_STATE_SETUP, 0))
++			vxi = ERR_PTR(-EBUSY);
++		else
++			vxi = ERR_PTR(-EEXIST);
++		goto out_unlock;
++	}
++	/* new context */
++	vxdprintk(VXD_CBIT(xid, 0),
++		"create_vx_info(%d) = %p (new)", id, new);
++	claim_vx_info(new, NULL);
++	__hash_vx_info(get_vx_info(new));
++	vxi = new, new = NULL;
++
++out_unlock:
++	spin_unlock(&vx_info_hash_lock);
++	vxh_create_vx_info(IS_ERR(vxi) ? NULL : vxi, id);
++	if (new)
++		__dealloc_vx_info(new);
++	return vxi;
++}
++
++
++/*	exported stuff						*/
++
++
++void unhash_vx_info(struct vx_info *vxi)
++{
++	spin_lock(&vx_info_hash_lock);
++	__unhash_vx_info(vxi);
++	spin_unlock(&vx_info_hash_lock);
++	__shutdown_vx_info(vxi);
++	__wakeup_vx_info(vxi);
++}
++
++
++/*	lookup_vx_info()
++
++	* search for a vx_info and get() it
++	* negative id means current				*/
++
++struct vx_info *lookup_vx_info(int id)
++{
++	struct vx_info *vxi = NULL;
++
++	if (id < 0) {
++		vxi = get_vx_info(current_vx_info());
++	} else if (id > 1) {
++		spin_lock(&vx_info_hash_lock);
++		vxi = get_vx_info(__lookup_vx_info(id));
++		spin_unlock(&vx_info_hash_lock);
++	}
++	return vxi;
++}
++
++/*	xid_is_hashed()
++
++	* verify that xid is still hashed			*/
++
++int xid_is_hashed(xid_t xid)
++{
++	int hashed;
++
++	spin_lock(&vx_info_hash_lock);
++	hashed = (__lookup_vx_info(xid) != NULL);
++	spin_unlock(&vx_info_hash_lock);
++	return hashed;
++}
++
++#ifdef	CONFIG_PROC_FS
++
++/*	get_xid_list()
++
++	* get a subset of hashed xids for proc
++	* assumes size is at least one				*/
++
++int get_xid_list(int index, unsigned int *xids, int size)
++{
++	int hindex, nr_xids = 0;
++
++	/* only show current and children */
++	if (!vx_check(0, VS_ADMIN | VS_WATCH)) {
++		if (index > 0)
++			return 0;
++		xids[nr_xids] = vx_current_xid();
++		return 1;
++	}
++
++	for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) {
++		struct hlist_head *head = &vx_info_hash[hindex];
++		struct hlist_node *pos;
++
++		spin_lock(&vx_info_hash_lock);
++		hlist_for_each(pos, head) {
++			struct vx_info *vxi;
++
++			if (--index > 0)
++				continue;
++
++			vxi = hlist_entry(pos, struct vx_info, vx_hlist);
++			xids[nr_xids] = vxi->vx_id;
++			if (++nr_xids >= size) {
++				spin_unlock(&vx_info_hash_lock);
++				goto out;
++			}
++		}
++		/* keep the lock time short */
++		spin_unlock(&vx_info_hash_lock);
++	}
++out:
++	return nr_xids;
++}
++#endif
++
++#ifdef	CONFIG_VSERVER_DEBUG
++
++void	dump_vx_info_inactive(int level)
++{
++	struct hlist_node *entry, *next;
++
++	hlist_for_each_safe(entry, next, &vx_info_inactive) {
++		struct vx_info *vxi =
++			list_entry(entry, struct vx_info, vx_hlist);
++
++		dump_vx_info(vxi, level);
++	}
++}
++
++#endif
++
++#if 0
++int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
++{
++	struct user_struct *new_user, *old_user;
++
++	if (!p || !vxi)
++		BUG();
++
++	if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
++		return -EACCES;
++
++	new_user = alloc_uid(vxi->vx_id, p->uid);
++	if (!new_user)
++		return -ENOMEM;
++
++	old_user = p->user;
++	if (new_user != old_user) {
++		atomic_inc(&new_user->processes);
++		atomic_dec(&old_user->processes);
++		p->user = new_user;
++	}
++	free_uid(old_user);
++	return 0;
++}
++#endif
++
++#if 0
++void vx_mask_cap_bset(struct vx_info *vxi, struct task_struct *p)
++{
++	// p->cap_effective &= vxi->vx_cap_bset;
++	p->cap_effective =
++		cap_intersect(p->cap_effective, vxi->cap_bset);
++	// p->cap_inheritable &= vxi->vx_cap_bset;
++	p->cap_inheritable =
++		cap_intersect(p->cap_inheritable, vxi->cap_bset);
++	// p->cap_permitted &= vxi->vx_cap_bset;
++	p->cap_permitted =
++		cap_intersect(p->cap_permitted, vxi->cap_bset);
++}
++#endif
++
++
++#include <linux/file.h>
++#include <linux/fdtable.h>
++
++static int vx_openfd_task(struct task_struct *tsk)
++{
++	struct files_struct *files = tsk->files;
++	struct fdtable *fdt;
++	const unsigned long *bptr;
++	int count, total;
++
++	/* no rcu_read_lock() because of spin_lock() */
++	spin_lock(&files->file_lock);
++	fdt = files_fdtable(files);
++	bptr = fdt->open_fds->fds_bits;
++	count = fdt->max_fds / (sizeof(unsigned long) * 8);
++	for (total = 0; count > 0; count--) {
++		if (*bptr)
++			total += hweight_long(*bptr);
++		bptr++;
++	}
++	spin_unlock(&files->file_lock);
++	return total;
++}
++
++
++/*	for *space compatibility */
++
++asmlinkage long sys_unshare(unsigned long);
++
++/*
++ *	migrate task to new context
++ *	gets vxi, puts old_vxi on change
++ *	optionally unshares namespaces (hack)
++ */
++
++int vx_migrate_task(struct task_struct *p, struct vx_info *vxi, int unshare)
++{
++	struct vx_info *old_vxi;
++	int ret = 0;
++
++	if (!p || !vxi)
++		BUG();
++
++	vxdprintk(VXD_CBIT(xid, 5),
++		"vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
++		vxi->vx_id, atomic_read(&vxi->vx_usecnt));
++
++	if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0) &&
++		!vx_info_flags(vxi, VXF_STATE_SETUP, 0))
++		return -EACCES;
++
++	if (vx_info_state(vxi, VXS_SHUTDOWN))
++		return -EFAULT;
++
++	old_vxi = task_get_vx_info(p);
++	if (old_vxi == vxi)
++		goto out;
++
++//	if (!(ret = vx_migrate_user(p, vxi))) {
++	{
++		int openfd;
++
++		task_lock(p);
++		openfd = vx_openfd_task(p);
++
++		if (old_vxi) {
++			atomic_dec(&old_vxi->cvirt.nr_threads);
++			atomic_dec(&old_vxi->cvirt.nr_running);
++			__rlim_dec(&old_vxi->limit, RLIMIT_NPROC);
++			/* FIXME: what about the struct files here? */
++			__rlim_sub(&old_vxi->limit, VLIMIT_OPENFD, openfd);
++			/* account for the executable */
++			__rlim_dec(&old_vxi->limit, VLIMIT_DENTRY);
++		}
++		atomic_inc(&vxi->cvirt.nr_threads);
++		atomic_inc(&vxi->cvirt.nr_running);
++		__rlim_inc(&vxi->limit, RLIMIT_NPROC);
++		/* FIXME: what about the struct files here? */
++		__rlim_add(&vxi->limit, VLIMIT_OPENFD, openfd);
++		/* account for the executable */
++		__rlim_inc(&vxi->limit, VLIMIT_DENTRY);
++
++		if (old_vxi) {
++			release_vx_info(old_vxi, p);
++			clr_vx_info(&p->vx_info);
++		}
++		claim_vx_info(vxi, p);
++		set_vx_info(&p->vx_info, vxi);
++		p->xid = vxi->vx_id;
++
++		vxdprintk(VXD_CBIT(xid, 5),
++			"moved task %p into vxi:%p[#%d]",
++			p, vxi, vxi->vx_id);
++
++		// vx_mask_cap_bset(vxi, p);
++		task_unlock(p);
++
++		/* hack for *spaces to provide compatibility */
++		if (unshare) {
++			struct nsproxy *old_nsp, *new_nsp;
++
++			ret = unshare_nsproxy_namespaces(
++				CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER,
++				&new_nsp, NULL);
++			if (ret)
++				goto out;
++
++			old_nsp = xchg(&p->nsproxy, new_nsp);
++			vx_set_space(vxi,
++				CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER, 0);
++			put_nsproxy(old_nsp);
++		}
++	}
++out:
++	put_vx_info(old_vxi);
++	return ret;
++}
++
++int vx_set_reaper(struct vx_info *vxi, struct task_struct *p)
++{
++	struct task_struct *old_reaper;
++
++	if (!vxi)
++		return -EINVAL;
++
++	vxdprintk(VXD_CBIT(xid, 6),
++		"vx_set_reaper(%p[#%d],%p[#%d,%d])",
++		vxi, vxi->vx_id, p, p->xid, p->pid);
++
++	old_reaper = vxi->vx_reaper;
++	if (old_reaper == p)
++		return 0;
++
++	/* set new child reaper */
++	get_task_struct(p);
++	vxi->vx_reaper = p;
++	put_task_struct(old_reaper);
++	return 0;
++}
++
++int vx_set_init(struct vx_info *vxi, struct task_struct *p)
++{
++	if (!vxi)
++		return -EINVAL;
++
++	vxdprintk(VXD_CBIT(xid, 6),
++		"vx_set_init(%p[#%d],%p[#%d,%d,%d])",
++		vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
++
++	vxi->vx_flags &= ~VXF_STATE_INIT;
++	// vxi->vx_initpid = p->tgid;
++	vxi->vx_initpid = p->pid;
++	return 0;
++}
++
++void vx_exit_init(struct vx_info *vxi, struct task_struct *p, int code)
++{
++	vxdprintk(VXD_CBIT(xid, 6),
++		"vx_exit_init(%p[#%d],%p[#%d,%d,%d])",
++		vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
++
++	vxi->exit_code = code;
++	vxi->vx_initpid = 0;
++}
++
++
++void vx_set_persistent(struct vx_info *vxi)
++{
++	vxdprintk(VXD_CBIT(xid, 6),
++		"vx_set_persistent(%p[#%d])", vxi, vxi->vx_id);
++
++	get_vx_info(vxi);
++	claim_vx_info(vxi, NULL);
++}
++
++void vx_clear_persistent(struct vx_info *vxi)
++{
++	vxdprintk(VXD_CBIT(xid, 6),
++		"vx_clear_persistent(%p[#%d])", vxi, vxi->vx_id);
++
++	release_vx_info(vxi, NULL);
++	put_vx_info(vxi);
++}
++
++void vx_update_persistent(struct vx_info *vxi)
++{
++	if (vx_info_flags(vxi, VXF_PERSISTENT, 0))
++		vx_set_persistent(vxi);
++	else
++		vx_clear_persistent(vxi);
++}
++
++
++/*	task must be current or locked		*/
++
++void	exit_vx_info(struct task_struct *p, int code)
++{
++	struct vx_info *vxi = p->vx_info;
++
++	if (vxi) {
++		atomic_dec(&vxi->cvirt.nr_threads);
++		vx_nproc_dec(p);
++
++		vxi->exit_code = code;
++		release_vx_info(vxi, p);
++	}
++}
++
++void	exit_vx_info_early(struct task_struct *p, int code)
++{
++	struct vx_info *vxi = p->vx_info;
++
++	if (vxi) {
++		if (vxi->vx_initpid == p->pid)
++			vx_exit_init(vxi, p, code);
++		if (vxi->vx_reaper == p)
++			vx_set_reaper(vxi, init_pid_ns.child_reaper);
++	}
++}
++
++
++/* vserver syscall commands below here */
++
++/* taks xid and vx_info functions */
++
++#include <asm/uaccess.h>
++
++
++int vc_task_xid(uint32_t id)
++{
++	xid_t xid;
++
++	if (id) {
++		struct task_struct *tsk;
++
++		read_lock(&tasklist_lock);
++		tsk = find_task_by_real_pid(id);
++		xid = (tsk) ? tsk->xid : -ESRCH;
++		read_unlock(&tasklist_lock);
++	} else
++		xid = vx_current_xid();
++	return xid;
++}
++
++
++int vc_vx_info(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_vx_info_v0 vc_data;
++
++	vc_data.xid = vxi->vx_id;
++	vc_data.initpid = vxi->vx_initpid;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++
++int vc_ctx_stat(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_stat_v0 vc_data;
++
++	vc_data.usecnt = atomic_read(&vxi->vx_usecnt);
++	vc_data.tasks = atomic_read(&vxi->vx_tasks);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++
++/* context functions */
++
++int vc_ctx_create(uint32_t xid, void __user *data)
++{
++	struct vcmd_ctx_create vc_data = { .flagword = VXF_INIT_SET };
++	struct vx_info *new_vxi;
++	int ret;
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	if ((xid > MAX_S_CONTEXT) || (xid < 2))
++		return -EINVAL;
++
++	new_vxi = __create_vx_info(xid);
++	if (IS_ERR(new_vxi))
++		return PTR_ERR(new_vxi);
++
++	/* initial flags */
++	new_vxi->vx_flags = vc_data.flagword;
++
++	ret = -ENOEXEC;
++	if (vs_state_change(new_vxi, VSC_STARTUP))
++		goto out;
++
++	ret = vx_migrate_task(current, new_vxi, (!data));
++	if (ret)
++		goto out;
++
++	/* return context id on success */
++	ret = new_vxi->vx_id;
++
++	/* get a reference for persistent contexts */
++	if ((vc_data.flagword & VXF_PERSISTENT))
++		vx_set_persistent(new_vxi);
++out:
++	release_vx_info(new_vxi, NULL);
++	put_vx_info(new_vxi);
++	return ret;
++}
++
++
++int vc_ctx_migrate(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_migrate vc_data = { .flagword = 0 };
++	int ret;
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = vx_migrate_task(current, vxi, 0);
++	if (ret)
++		return ret;
++	if (vc_data.flagword & VXM_SET_INIT)
++		ret = vx_set_init(vxi, current);
++	if (ret)
++		return ret;
++	if (vc_data.flagword & VXM_SET_REAPER)
++		ret = vx_set_reaper(vxi, current);
++	return ret;
++}
++
++
++int vc_get_cflags(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_flags_v0 vc_data;
++
++	vc_data.flagword = vxi->vx_flags;
++
++	/* special STATE flag handling */
++	vc_data.mask = vs_mask_flags(~0ULL, vxi->vx_flags, VXF_ONE_TIME);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++int vc_set_cflags(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_flags_v0 vc_data;
++	uint64_t mask, trigger;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	/* special STATE flag handling */
++	mask = vs_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
++	trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword);
++
++	if (vxi == current_vx_info()) {
++		/* if (trigger & VXF_STATE_SETUP)
++			vx_mask_cap_bset(vxi, current); */
++		if (trigger & VXF_STATE_INIT) {
++			int ret;
++
++			ret = vx_set_init(vxi, current);
++			if (ret)
++				return ret;
++			ret = vx_set_reaper(vxi, current);
++			if (ret)
++				return ret;
++		}
++	}
++
++	vxi->vx_flags = vs_mask_flags(vxi->vx_flags,
++		vc_data.flagword, mask);
++	if (trigger & VXF_PERSISTENT)
++		vx_update_persistent(vxi);
++
++	return 0;
++}
++
++
++static inline uint64_t caps_from_cap_t(kernel_cap_t c)
++{
++	uint64_t v = c.cap[0] | ((uint64_t)c.cap[1] << 32);
++
++	// printk("caps_from_cap_t(%08x:%08x) = %016llx\n", c.cap[1], c.cap[0], v);
++	return v;
++}
++
++static inline kernel_cap_t cap_t_from_caps(uint64_t v)
++{
++	kernel_cap_t c = __cap_empty_set;
++
++	c.cap[0] = v & 0xFFFFFFFF;
++	c.cap[1] = (v >> 32) & 0xFFFFFFFF;
++
++	// printk("cap_t_from_caps(%016llx) = %08x:%08x\n", v, c.cap[1], c.cap[0]);
++	return c;
++}
++
++
++static int do_get_caps(struct vx_info *vxi, uint64_t *bcaps, uint64_t *ccaps)
++{
++	if (bcaps)
++		*bcaps = caps_from_cap_t(vxi->vx_bcaps);
++	if (ccaps)
++		*ccaps = vxi->vx_ccaps;
++
++	return 0;
++}
++
++int vc_get_ccaps(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_caps_v1 vc_data;
++	int ret;
++
++	ret = do_get_caps(vxi, NULL, &vc_data.ccaps);
++	if (ret)
++		return ret;
++	vc_data.cmask = ~0ULL;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++static int do_set_caps(struct vx_info *vxi,
++	uint64_t bcaps, uint64_t bmask, uint64_t ccaps, uint64_t cmask)
++{
++	uint64_t bcold = caps_from_cap_t(vxi->vx_bcaps);
++
++#if 0
++	printk("do_set_caps(%16llx, %16llx, %16llx, %16llx)\n",
++		bcaps, bmask, ccaps, cmask);
++#endif
++	vxi->vx_bcaps = cap_t_from_caps(
++		vs_mask_flags(bcold, bcaps, bmask));
++	vxi->vx_ccaps = vs_mask_flags(vxi->vx_ccaps, ccaps, cmask);
++
++	return 0;
++}
++
++int vc_set_ccaps(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_caps_v1 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_set_caps(vxi, 0, 0, vc_data.ccaps, vc_data.cmask);
++}
++
++int vc_get_bcaps(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_bcaps vc_data;
++	int ret;
++
++	ret = do_get_caps(vxi, &vc_data.bcaps, NULL);
++	if (ret)
++		return ret;
++	vc_data.bmask = ~0ULL;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++int vc_set_bcaps(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_bcaps vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_set_caps(vxi, vc_data.bcaps, vc_data.bmask, 0, 0);
++}
++
++
++int vc_get_umask(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_umask vc_data;
++
++	vc_data.umask = vxi->vx_umask;
++	vc_data.mask = ~0ULL;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++int vc_set_umask(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_umask vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	vxi->vx_umask = vs_mask_flags(vxi->vx_umask,
++		vc_data.umask, vc_data.mask);
++	return 0;
++}
++
++
++int vc_get_badness(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_badness_v0 vc_data;
++
++	vc_data.bias = vxi->vx_badness_bias;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++int vc_set_badness(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_badness_v0 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	vxi->vx_badness_bias = vc_data.bias;
++	return 0;
++}
++
++#include <linux/module.h>
++
++EXPORT_SYMBOL_GPL(free_vx_info);
++
+--- a/kernel/vserver/cvirt.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/cvirt.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,304 @@
++/*
++ *  linux/kernel/vserver/cvirt.c
++ *
++ *  Virtual Server: Context Virtualization
++ *
++ *  Copyright (C) 2004-2007  Herbert Pötzl
++ *
++ *  V0.01  broken out from limit.c
++ *  V0.02  added utsname stuff
++ *  V0.03  changed vcmds to vxi arg
++ *
++ */
++
++#include <linux/types.h>
++#include <linux/utsname.h>
++#include <linux/vs_cvirt.h>
++#include <linux/vserver/switch.h>
++#include <linux/vserver/cvirt_cmd.h>
++
++#include <asm/uaccess.h>
++
++
++void vx_vsi_uptime(struct timespec *uptime, struct timespec *idle)
++{
++	struct vx_info *vxi = current_vx_info();
++
++	set_normalized_timespec(uptime,
++		uptime->tv_sec - vxi->cvirt.bias_uptime.tv_sec,
++		uptime->tv_nsec - vxi->cvirt.bias_uptime.tv_nsec);
++	if (!idle)
++		return;
++	set_normalized_timespec(idle,
++		idle->tv_sec - vxi->cvirt.bias_idle.tv_sec,
++		idle->tv_nsec - vxi->cvirt.bias_idle.tv_nsec);
++	return;
++}
++
++uint64_t vx_idle_jiffies(void)
++{
++	return init_task.utime + init_task.stime;
++}
++
++
++
++static inline uint32_t __update_loadavg(uint32_t load,
++	int wsize, int delta, int n)
++{
++	unsigned long long calc, prev;
++
++	/* just set it to n */
++	if (unlikely(delta >= wsize))
++		return (n << FSHIFT);
++
++	calc = delta * n;
++	calc <<= FSHIFT;
++	prev = (wsize - delta);
++	prev *= load;
++	calc += prev;
++	do_div(calc, wsize);
++	return calc;
++}
++
++
++void vx_update_load(struct vx_info *vxi)
++{
++	uint32_t now, last, delta;
++	unsigned int nr_running, nr_uninterruptible;
++	unsigned int total;
++	unsigned long flags;
++
++	spin_lock_irqsave(&vxi->cvirt.load_lock, flags);
++
++	now = jiffies;
++	last = vxi->cvirt.load_last;
++	delta = now - last;
++
++	if (delta < 5*HZ)
++		goto out;
++
++	nr_running = atomic_read(&vxi->cvirt.nr_running);
++	nr_uninterruptible = atomic_read(&vxi->cvirt.nr_uninterruptible);
++	total = nr_running + nr_uninterruptible;
++
++	vxi->cvirt.load[0] = __update_loadavg(vxi->cvirt.load[0],
++		60*HZ, delta, total);
++	vxi->cvirt.load[1] = __update_loadavg(vxi->cvirt.load[1],
++		5*60*HZ, delta, total);
++	vxi->cvirt.load[2] = __update_loadavg(vxi->cvirt.load[2],
++		15*60*HZ, delta, total);
++
++	vxi->cvirt.load_last = now;
++out:
++	atomic_inc(&vxi->cvirt.load_updates);
++	spin_unlock_irqrestore(&vxi->cvirt.load_lock, flags);
++}
++
++
++/*
++ * Commands to do_syslog:
++ *
++ *      0 -- Close the log.  Currently a NOP.
++ *      1 -- Open the log. Currently a NOP.
++ *      2 -- Read from the log.
++ *      3 -- Read all messages remaining in the ring buffer.
++ *      4 -- Read and clear all messages remaining in the ring buffer
++ *      5 -- Clear ring buffer.
++ *      6 -- Disable printk's to console
++ *      7 -- Enable printk's to console
++ *      8 -- Set level of messages printed to console
++ *      9 -- Return number of unread characters in the log buffer
++ *     10 -- Return size of the log buffer
++ */
++int vx_do_syslog(int type, char __user *buf, int len)
++{
++	int error = 0;
++	int do_clear = 0;
++	struct vx_info *vxi = current_vx_info();
++	struct _vx_syslog *log;
++
++	if (!vxi)
++		return -EINVAL;
++	log = &vxi->cvirt.syslog;
++
++	switch (type) {
++	case 0:		/* Close log */
++	case 1:		/* Open log */
++		break;
++	case 2:		/* Read from log */
++		error = wait_event_interruptible(log->log_wait,
++			(log->log_start - log->log_end));
++		if (error)
++			break;
++		spin_lock_irq(&log->logbuf_lock);
++		spin_unlock_irq(&log->logbuf_lock);
++		break;
++	case 4:		/* Read/clear last kernel messages */
++		do_clear = 1;
++		/* fall through */
++	case 3:		/* Read last kernel messages */
++		return 0;
++
++	case 5:		/* Clear ring buffer */
++		return 0;
++
++	case 6:		/* Disable logging to console */
++	case 7:		/* Enable logging to console */
++	case 8:		/* Set level of messages printed to console */
++		break;
++
++	case 9:		/* Number of chars in the log buffer */
++		return 0;
++	case 10:	/* Size of the log buffer */
++		return 0;
++	default:
++		error = -EINVAL;
++		break;
++	}
++	return error;
++}
++
++
++/* virtual host info names */
++
++static char *vx_vhi_name(struct vx_info *vxi, int id)
++{
++	struct nsproxy *nsproxy;
++	struct uts_namespace *uts;
++
++	if (id == VHIN_CONTEXT)
++		return vxi->vx_name;
++
++	nsproxy = vxi->vx_nsproxy[0];
++	if (!nsproxy)
++		return NULL;
++
++	uts = nsproxy->uts_ns;
++	if (!uts)
++		return NULL;
++
++	switch (id) {
++	case VHIN_SYSNAME:
++		return uts->name.sysname;
++	case VHIN_NODENAME:
++		return uts->name.nodename;
++	case VHIN_RELEASE:
++		return uts->name.release;
++	case VHIN_VERSION:
++		return uts->name.version;
++	case VHIN_MACHINE:
++		return uts->name.machine;
++	case VHIN_DOMAINNAME:
++		return uts->name.domainname;
++	default:
++		return NULL;
++	}
++	return NULL;
++}
++
++int vc_set_vhi_name(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_vhi_name_v0 vc_data;
++	char *name;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	name = vx_vhi_name(vxi, vc_data.field);
++	if (!name)
++		return -EINVAL;
++
++	memcpy(name, vc_data.name, 65);
++	return 0;
++}
++
++int vc_get_vhi_name(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_vhi_name_v0 vc_data;
++	char *name;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	name = vx_vhi_name(vxi, vc_data.field);
++	if (!name)
++		return -EINVAL;
++
++	memcpy(vc_data.name, name, 65);
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++
++int vc_virt_stat(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_virt_stat_v0 vc_data;
++	struct _vx_cvirt *cvirt = &vxi->cvirt;
++	struct timespec uptime;
++
++	do_posix_clock_monotonic_gettime(&uptime);
++	set_normalized_timespec(&uptime,
++		uptime.tv_sec - cvirt->bias_uptime.tv_sec,
++		uptime.tv_nsec - cvirt->bias_uptime.tv_nsec);
++
++	vc_data.offset = timeval_to_ns(&cvirt->bias_tv);
++	vc_data.uptime = timespec_to_ns(&uptime);
++	vc_data.nr_threads = atomic_read(&cvirt->nr_threads);
++	vc_data.nr_running = atomic_read(&cvirt->nr_running);
++	vc_data.nr_uninterruptible = atomic_read(&cvirt->nr_uninterruptible);
++	vc_data.nr_onhold = atomic_read(&cvirt->nr_onhold);
++	vc_data.nr_forks = atomic_read(&cvirt->total_forks);
++	vc_data.load[0] = cvirt->load[0];
++	vc_data.load[1] = cvirt->load[1];
++	vc_data.load[2] = cvirt->load[2];
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++
++#ifdef CONFIG_VSERVER_VTIME
++
++/* virtualized time base */
++
++void vx_gettimeofday(struct timeval *tv)
++{
++	struct vx_info *vxi;
++
++	do_gettimeofday(tv);
++	if (!vx_flags(VXF_VIRT_TIME, 0))
++		return;
++
++	vxi = current_vx_info();
++	tv->tv_sec += vxi->cvirt.bias_tv.tv_sec;
++	tv->tv_usec += vxi->cvirt.bias_tv.tv_usec;
++
++	if (tv->tv_usec >= USEC_PER_SEC) {
++		tv->tv_sec++;
++		tv->tv_usec -= USEC_PER_SEC;
++	} else if (tv->tv_usec < 0) {
++		tv->tv_sec--;
++		tv->tv_usec += USEC_PER_SEC;
++	}
++}
++
++int vx_settimeofday(struct timespec *ts)
++{
++	struct timeval tv;
++	struct vx_info *vxi;
++
++	if (!vx_flags(VXF_VIRT_TIME, 0))
++		return do_settimeofday(ts);
++
++	do_gettimeofday(&tv);
++	vxi = current_vx_info();
++	vxi->cvirt.bias_tv.tv_sec = ts->tv_sec - tv.tv_sec;
++	vxi->cvirt.bias_tv.tv_usec =
++		(ts->tv_nsec/NSEC_PER_USEC) - tv.tv_usec;
++	return 0;
++}
++
++#endif
++
+--- a/kernel/vserver/cvirt_init.h	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/cvirt_init.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,69 @@
++
++
++extern uint64_t vx_idle_jiffies(void);
++
++static inline void vx_info_init_cvirt(struct _vx_cvirt *cvirt)
++{
++	uint64_t idle_jiffies = vx_idle_jiffies();
++	uint64_t nsuptime;
++
++	do_posix_clock_monotonic_gettime(&cvirt->bias_uptime);
++	nsuptime = (unsigned long long)cvirt->bias_uptime.tv_sec
++		* NSEC_PER_SEC + cvirt->bias_uptime.tv_nsec;
++	cvirt->bias_clock = nsec_to_clock_t(nsuptime);
++	cvirt->bias_tv.tv_sec = 0;
++	cvirt->bias_tv.tv_usec = 0;
++
++	jiffies_to_timespec(idle_jiffies, &cvirt->bias_idle);
++	atomic_set(&cvirt->nr_threads, 0);
++	atomic_set(&cvirt->nr_running, 0);
++	atomic_set(&cvirt->nr_uninterruptible, 0);
++	atomic_set(&cvirt->nr_onhold, 0);
++
++	spin_lock_init(&cvirt->load_lock);
++	cvirt->load_last = jiffies;
++	atomic_set(&cvirt->load_updates, 0);
++	cvirt->load[0] = 0;
++	cvirt->load[1] = 0;
++	cvirt->load[2] = 0;
++	atomic_set(&cvirt->total_forks, 0);
++
++	spin_lock_init(&cvirt->syslog.logbuf_lock);
++	init_waitqueue_head(&cvirt->syslog.log_wait);
++	cvirt->syslog.log_start = 0;
++	cvirt->syslog.log_end = 0;
++	cvirt->syslog.con_start = 0;
++	cvirt->syslog.logged_chars = 0;
++}
++
++static inline
++void vx_info_init_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu)
++{
++	// cvirt_pc->cpustat = { 0 };
++}
++
++static inline void vx_info_exit_cvirt(struct _vx_cvirt *cvirt)
++{
++	int value;
++
++	vxwprintk_xid((value = atomic_read(&cvirt->nr_threads)),
++		"!!! cvirt: %p[nr_threads] = %d on exit.",
++		cvirt, value);
++	vxwprintk_xid((value = atomic_read(&cvirt->nr_running)),
++		"!!! cvirt: %p[nr_running] = %d on exit.",
++		cvirt, value);
++	vxwprintk_xid((value = atomic_read(&cvirt->nr_uninterruptible)),
++		"!!! cvirt: %p[nr_uninterruptible] = %d on exit.",
++		cvirt, value);
++	vxwprintk_xid((value = atomic_read(&cvirt->nr_onhold)),
++		"!!! cvirt: %p[nr_onhold] = %d on exit.",
++		cvirt, value);
++	return;
++}
++
++static inline
++void vx_info_exit_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu)
++{
++	return;
++}
++
+--- a/kernel/vserver/cvirt_proc.h	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/cvirt_proc.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,135 @@
++#ifndef _VX_CVIRT_PROC_H
++#define _VX_CVIRT_PROC_H
++
++#include <linux/nsproxy.h>
++#include <linux/mnt_namespace.h>
++#include <linux/ipc_namespace.h>
++#include <linux/utsname.h>
++#include <linux/ipc.h>
++
++
++static inline
++int vx_info_proc_nsproxy(struct nsproxy *nsproxy, char *buffer)
++{
++	struct mnt_namespace *ns;
++	struct uts_namespace *uts;
++	struct ipc_namespace *ipc;
++	struct path path;
++	char *pstr, *root;
++	int length = 0;
++
++	if (!nsproxy)
++		goto out;
++
++	length += sprintf(buffer + length,
++		"NSProxy:\t%p [%p,%p,%p]\n",
++		nsproxy, nsproxy->mnt_ns,
++		nsproxy->uts_ns, nsproxy->ipc_ns);
++
++	ns = nsproxy->mnt_ns;
++	if (!ns)
++		goto skip_ns;
++
++	pstr = kmalloc(PATH_MAX, GFP_KERNEL);
++	if (!pstr)
++		goto skip_ns;
++
++	path.mnt = ns->root;
++	path.dentry = ns->root->mnt_root;
++	root = d_path(&path, pstr, PATH_MAX - 2);
++	length += sprintf(buffer + length,
++		"Namespace:\t%p [#%u]\n"
++		"RootPath:\t%s\n",
++		ns, atomic_read(&ns->count),
++		root);
++	kfree(pstr);
++skip_ns:
++
++	uts = nsproxy->uts_ns;
++	if (!uts)
++		goto skip_uts;
++
++	length += sprintf(buffer + length,
++		"SysName:\t%.*s\n"
++		"NodeName:\t%.*s\n"
++		"Release:\t%.*s\n"
++		"Version:\t%.*s\n"
++		"Machine:\t%.*s\n"
++		"DomainName:\t%.*s\n",
++		__NEW_UTS_LEN, uts->name.sysname,
++		__NEW_UTS_LEN, uts->name.nodename,
++		__NEW_UTS_LEN, uts->name.release,
++		__NEW_UTS_LEN, uts->name.version,
++		__NEW_UTS_LEN, uts->name.machine,
++		__NEW_UTS_LEN, uts->name.domainname);
++skip_uts:
++
++	ipc = nsproxy->ipc_ns;
++	if (!ipc)
++		goto skip_ipc;
++
++	length += sprintf(buffer + length,
++		"SEMS:\t\t%d %d %d %d  %d\n"
++		"MSG:\t\t%d %d %d\n"
++		"SHM:\t\t%lu %lu  %d %d\n",
++		ipc->sem_ctls[0], ipc->sem_ctls[1],
++		ipc->sem_ctls[2], ipc->sem_ctls[3],
++		ipc->used_sems,
++		ipc->msg_ctlmax, ipc->msg_ctlmnb, ipc->msg_ctlmni,
++		(unsigned long)ipc->shm_ctlmax,
++		(unsigned long)ipc->shm_ctlall,
++		ipc->shm_ctlmni, ipc->shm_tot);
++skip_ipc:
++out:
++	return length;
++}
++
++
++#include <linux/sched.h>
++
++#define LOAD_INT(x) ((x) >> FSHIFT)
++#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1 - 1)) * 100)
++
++static inline
++int vx_info_proc_cvirt(struct _vx_cvirt *cvirt, char *buffer)
++{
++	int length = 0;
++	int a, b, c;
++
++	length += sprintf(buffer + length,
++		"BiasUptime:\t%lu.%02lu\n",
++		(unsigned long)cvirt->bias_uptime.tv_sec,
++		(cvirt->bias_uptime.tv_nsec / (NSEC_PER_SEC / 100)));
++
++	a = cvirt->load[0] + (FIXED_1 / 200);
++	b = cvirt->load[1] + (FIXED_1 / 200);
++	c = cvirt->load[2] + (FIXED_1 / 200);
++	length += sprintf(buffer + length,
++		"nr_threads:\t%d\n"
++		"nr_running:\t%d\n"
++		"nr_unintr:\t%d\n"
++		"nr_onhold:\t%d\n"
++		"load_updates:\t%d\n"
++		"loadavg:\t%d.%02d %d.%02d %d.%02d\n"
++		"total_forks:\t%d\n",
++		atomic_read(&cvirt->nr_threads),
++		atomic_read(&cvirt->nr_running),
++		atomic_read(&cvirt->nr_uninterruptible),
++		atomic_read(&cvirt->nr_onhold),
++		atomic_read(&cvirt->load_updates),
++		LOAD_INT(a), LOAD_FRAC(a),
++		LOAD_INT(b), LOAD_FRAC(b),
++		LOAD_INT(c), LOAD_FRAC(c),
++		atomic_read(&cvirt->total_forks));
++	return length;
++}
++
++static inline
++int vx_info_proc_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc,
++	char *buffer, int cpu)
++{
++	int length = 0;
++	return length;
++}
++
++#endif	/* _VX_CVIRT_PROC_H */
+--- a/kernel/vserver/debug.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/debug.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,32 @@
++/*
++ *  kernel/vserver/debug.c
++ *
++ *  Copyright (C) 2005-2007 Herbert Pötzl
++ *
++ *  V0.01  vx_info dump support
++ *
++ */
++
++#include <linux/module.h>
++
++#include <linux/vserver/context.h>
++
++
++void	dump_vx_info(struct vx_info *vxi, int level)
++{
++	printk("vx_info %p[#%d, %d.%d, %4x]\n", vxi, vxi->vx_id,
++		atomic_read(&vxi->vx_usecnt),
++		atomic_read(&vxi->vx_tasks),
++		vxi->vx_state);
++	if (level > 0) {
++		__dump_vx_limit(&vxi->limit);
++		__dump_vx_sched(&vxi->sched);
++		__dump_vx_cvirt(&vxi->cvirt);
++		__dump_vx_cacct(&vxi->cacct);
++	}
++	printk("---\n");
++}
++
++
++EXPORT_SYMBOL_GPL(dump_vx_info);
++
+--- a/kernel/vserver/device.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/device.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,443 @@
++/*
++ *  linux/kernel/vserver/device.c
++ *
++ *  Linux-VServer: Device Support
++ *
++ *  Copyright (C) 2006  Herbert Pötzl
++ *  Copyright (C) 2007  Daniel Hokka Zakrisson
++ *
++ *  V0.01  device mapping basics
++ *  V0.02  added defaults
++ *
++ */
++
++#include <linux/slab.h>
++#include <linux/rcupdate.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/hash.h>
++
++#include <asm/errno.h>
++#include <asm/uaccess.h>
++#include <linux/vserver/base.h>
++#include <linux/vserver/debug.h>
++#include <linux/vserver/context.h>
++#include <linux/vserver/device.h>
++#include <linux/vserver/device_cmd.h>
++
++
++#define DMAP_HASH_BITS	4
++
++
++struct vs_mapping {
++	union {
++		struct hlist_node hlist;
++		struct list_head list;
++	} u;
++#define dm_hlist	u.hlist
++#define dm_list		u.list
++	xid_t xid;
++	dev_t device;
++	struct vx_dmap_target target;
++};
++
++
++static struct hlist_head dmap_main_hash[1 << DMAP_HASH_BITS];
++
++static spinlock_t dmap_main_hash_lock = SPIN_LOCK_UNLOCKED;
++
++static struct vx_dmap_target dmap_defaults[2] = {
++	{ .flags = DATTR_OPEN },
++	{ .flags = DATTR_OPEN },
++};
++
++
++struct kmem_cache *dmap_cachep __read_mostly;
++
++int __init dmap_cache_init(void)
++{
++	dmap_cachep = kmem_cache_create("dmap_cache",
++		sizeof(struct vs_mapping), 0,
++		SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++	return 0;
++}
++
++__initcall(dmap_cache_init);
++
++
++static inline unsigned int __hashval(dev_t dev, int bits)
++{
++	return hash_long((unsigned long)dev, bits);
++}
++
++
++/*	__hash_mapping()
++ *	add the mapping to the hash table
++ */
++static inline void __hash_mapping(struct vx_info *vxi, struct vs_mapping *vdm)
++{
++	spinlock_t *hash_lock = &dmap_main_hash_lock;
++	struct hlist_head *head, *hash = dmap_main_hash;
++	int device = vdm->device;
++
++	spin_lock(hash_lock);
++	vxdprintk(VXD_CBIT(misc, 8), "__hash_mapping: %p[#%d] %08x:%08x",
++		vxi, vxi ? vxi->vx_id : 0, device, vdm->target.target);
++
++	head = &hash[__hashval(device, DMAP_HASH_BITS)];
++	hlist_add_head(&vdm->dm_hlist, head);
++	spin_unlock(hash_lock);
++}
++
++
++static inline int __mode_to_default(umode_t mode)
++{
++	switch (mode) {
++	case S_IFBLK:
++		return 0;
++	case S_IFCHR:
++		return 1;
++	default:
++		BUG();
++	}
++}
++
++
++/*	__set_default()
++ *	set a default
++ */
++static inline void __set_default(struct vx_info *vxi, umode_t mode,
++	struct vx_dmap_target *vdmt)
++{
++	spinlock_t *hash_lock = &dmap_main_hash_lock;
++	spin_lock(hash_lock);
++
++	if (vxi)
++		vxi->dmap.targets[__mode_to_default(mode)] = *vdmt;
++	else
++		dmap_defaults[__mode_to_default(mode)] = *vdmt;
++
++
++	spin_unlock(hash_lock);
++
++	vxdprintk(VXD_CBIT(misc, 8), "__set_default: %p[#%u] %08x %04x",
++		  vxi, vxi ? vxi->vx_id : 0, vdmt->target, vdmt->flags);
++}
++
++
++/*	__remove_default()
++ *	remove a default
++ */
++static inline int __remove_default(struct vx_info *vxi, umode_t mode)
++{
++	spinlock_t *hash_lock = &dmap_main_hash_lock;
++	spin_lock(hash_lock);
++
++	if (vxi)
++		vxi->dmap.targets[__mode_to_default(mode)].flags = 0;
++	else	/* remove == reset */
++		dmap_defaults[__mode_to_default(mode)].flags = DATTR_OPEN | mode;
++
++	spin_unlock(hash_lock);
++	return 0;
++}
++
++
++/*	__find_mapping()
++ *	find a mapping in the hash table
++ *
++ *	caller must hold hash_lock
++ */
++static inline int __find_mapping(xid_t xid, dev_t device, umode_t mode,
++	struct vs_mapping **local, struct vs_mapping **global)
++{
++	struct hlist_head *hash = dmap_main_hash;
++	struct hlist_head *head = &hash[__hashval(device, DMAP_HASH_BITS)];
++	struct hlist_node *pos;
++	struct vs_mapping *vdm;
++
++	*local = NULL;
++	if (global)
++		*global = NULL;
++
++	hlist_for_each(pos, head) {
++		vdm = hlist_entry(pos, struct vs_mapping, dm_hlist);
++
++		if ((vdm->device == device) &&
++			!((vdm->target.flags ^ mode) & S_IFMT)) {
++			if (vdm->xid == xid) {
++				*local = vdm;
++				return 1;
++			} else if (global && vdm->xid == 0)
++				*global = vdm;
++		}
++	}
++
++	if (global && *global)
++		return 0;
++	else
++		return -ENOENT;
++}
++
++
++/*	__lookup_mapping()
++ *	find a mapping and store the result in target and flags
++ */
++static inline int __lookup_mapping(struct vx_info *vxi,
++	dev_t device, dev_t *target, int *flags, umode_t mode)
++{
++	spinlock_t *hash_lock = &dmap_main_hash_lock;
++	struct vs_mapping *vdm, *global;
++	struct vx_dmap_target *vdmt;
++	int ret = 0;
++	xid_t xid = vxi->vx_id;
++	int index;
++
++	spin_lock(hash_lock);
++	if (__find_mapping(xid, device, mode, &vdm, &global) > 0) {
++		ret = 1;
++		vdmt = &vdm->target;
++		goto found;
++	}
++
++	index = __mode_to_default(mode);
++	if (vxi && vxi->dmap.targets[index].flags) {
++		ret = 2;
++		vdmt = &vxi->dmap.targets[index];
++	} else if (global) {
++		ret = 3;
++		vdmt = &global->target;
++		goto found;
++	} else {
++		ret = 4;
++		vdmt = &dmap_defaults[index];
++	}
++
++found:
++	if (target && (vdmt->flags & DATTR_REMAP))
++		*target = vdmt->target;
++	else if (target)
++		*target = device;
++	if (flags)
++		*flags = vdmt->flags;
++
++	spin_unlock(hash_lock);
++
++	return ret;
++}
++
++
++/*	__remove_mapping()
++ *	remove a mapping from the hash table
++ */
++static inline int __remove_mapping(struct vx_info *vxi, dev_t device,
++	umode_t mode)
++{
++	spinlock_t *hash_lock = &dmap_main_hash_lock;
++	struct vs_mapping *vdm = NULL;
++	int ret = 0;
++
++	spin_lock(hash_lock);
++
++	ret = __find_mapping((vxi ? vxi->vx_id : 0), device, mode, &vdm,
++		NULL);
++	vxdprintk(VXD_CBIT(misc, 8), "__remove_mapping: %p[#%d] %08x %04x",
++		vxi, vxi ? vxi->vx_id : 0, device, mode);
++	if (ret < 0)
++		goto out;
++	hlist_del(&vdm->dm_hlist);
++
++out:
++	spin_unlock(hash_lock);
++	if (vdm)
++		kmem_cache_free(dmap_cachep, vdm);
++	return ret;
++}
++
++
++
++int vs_map_device(struct vx_info *vxi,
++	dev_t device, dev_t *target, umode_t mode)
++{
++	int ret, flags = DATTR_MASK;
++
++	if (!vxi) {
++		if (target)
++			*target = device;
++		goto out;
++	}
++	ret = __lookup_mapping(vxi, device, target, &flags, mode);
++	vxdprintk(VXD_CBIT(misc, 8), "vs_map_device: %08x target: %08x flags: %04x mode: %04x mapped=%d",
++		device, target ? *target : 0, flags, mode, ret);
++out:
++	return (flags & DATTR_MASK);
++}
++
++
++
++static int do_set_mapping(struct vx_info *vxi,
++	dev_t device, dev_t target, int flags, umode_t mode)
++{
++	if (device) {
++		struct vs_mapping *new;
++
++		new = kmem_cache_alloc(dmap_cachep, GFP_KERNEL);
++		if (!new)
++			return -ENOMEM;
++
++		INIT_HLIST_NODE(&new->dm_hlist);
++		new->device = device;
++		new->target.target = target;
++		new->target.flags = flags | mode;
++		new->xid = (vxi ? vxi->vx_id : 0);
++
++		vxdprintk(VXD_CBIT(misc, 8), "do_set_mapping: %08x target: %08x flags: %04x", device, target, flags);
++		__hash_mapping(vxi, new);
++	} else {
++		struct vx_dmap_target new = {
++			.target = target,
++			.flags = flags | mode,
++		};
++		__set_default(vxi, mode, &new);
++	}
++	return 0;
++}
++
++
++static int do_unset_mapping(struct vx_info *vxi,
++	dev_t device, dev_t target, int flags, umode_t mode)
++{
++	int ret = -EINVAL;
++
++	if (device) {
++		ret = __remove_mapping(vxi, device, mode);
++		if (ret < 0)
++			goto out;
++	} else {
++		ret = __remove_default(vxi, mode);
++		if (ret < 0)
++			goto out;
++	}
++
++out:
++	return ret;
++}
++
++
++static inline int __user_device(const char __user *name, dev_t *dev,
++	umode_t *mode)
++{
++	struct nameidata nd;
++	int ret;
++
++	if (!name) {
++		*dev = 0;
++		return 0;
++	}
++	ret = user_lpath(name, &nd.path);
++	if (ret)
++		return ret;
++	if (nd.path.dentry->d_inode) {
++		*dev = nd.path.dentry->d_inode->i_rdev;
++		*mode = nd.path.dentry->d_inode->i_mode;
++	}
++	path_put(&nd.path);
++	return 0;
++}
++
++static inline int __mapping_mode(dev_t device, dev_t target,
++	umode_t device_mode, umode_t target_mode, umode_t *mode)
++{
++	if (device)
++		*mode = device_mode & S_IFMT;
++	else if (target)
++		*mode = target_mode & S_IFMT;
++	else
++		return -EINVAL;
++
++	/* if both given, device and target mode have to match */
++	if (device && target &&
++		((device_mode ^ target_mode) & S_IFMT))
++		return -EINVAL;
++	return 0;
++}
++
++
++static inline int do_mapping(struct vx_info *vxi, const char __user *device_path,
++	const char __user *target_path, int flags, int set)
++{
++	dev_t device = ~0, target = ~0;
++	umode_t device_mode = 0, target_mode = 0, mode;
++	int ret;
++
++	ret = __user_device(device_path, &device, &device_mode);
++	if (ret)
++		return ret;
++	ret = __user_device(target_path, &target, &target_mode);
++	if (ret)
++		return ret;
++
++	ret = __mapping_mode(device, target,
++		device_mode, target_mode, &mode);
++	if (ret)
++		return ret;
++
++	if (set)
++		return do_set_mapping(vxi, device, target,
++			flags, mode);
++	else
++		return do_unset_mapping(vxi, device, target,
++			flags, mode);
++}
++
++
++int vc_set_mapping(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_set_mapping_v0 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_mapping(vxi, vc_data.device, vc_data.target,
++		vc_data.flags, 1);
++}
++
++int vc_unset_mapping(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_set_mapping_v0 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_mapping(vxi, vc_data.device, vc_data.target,
++		vc_data.flags, 0);
++}
++
++
++#ifdef	CONFIG_COMPAT
++
++int vc_set_mapping_x32(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_set_mapping_v0_x32 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_mapping(vxi, compat_ptr(vc_data.device_ptr),
++		compat_ptr(vc_data.target_ptr), vc_data.flags, 1);
++}
++
++int vc_unset_mapping_x32(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_set_mapping_v0_x32 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_mapping(vxi, compat_ptr(vc_data.device_ptr),
++		compat_ptr(vc_data.target_ptr), vc_data.flags, 0);
++}
++
++#endif	/* CONFIG_COMPAT */
++
++
+--- a/kernel/vserver/dlimit.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/dlimit.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,529 @@
++/*
++ *  linux/kernel/vserver/dlimit.c
++ *
++ *  Virtual Server: Context Disk Limits
++ *
++ *  Copyright (C) 2004-2009  Herbert Pötzl
++ *
++ *  V0.01  initial version
++ *  V0.02  compat32 splitup
++ *  V0.03  extended interface
++ *
++ */
++
++#include <linux/statfs.h>
++#include <linux/sched.h>
++#include <linux/namei.h>
++#include <linux/vs_tag.h>
++#include <linux/vs_dlimit.h>
++#include <linux/vserver/dlimit_cmd.h>
++
++#include <asm/uaccess.h>
++
++/*	__alloc_dl_info()
++
++	* allocate an initialized dl_info struct
++	* doesn't make it visible (hash)			*/
++
++static struct dl_info *__alloc_dl_info(struct super_block *sb, tag_t tag)
++{
++	struct dl_info *new = NULL;
++
++	vxdprintk(VXD_CBIT(dlim, 5),
++		"alloc_dl_info(%p,%d)*", sb, tag);
++
++	/* would this benefit from a slab cache? */
++	new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
++	if (!new)
++		return 0;
++
++	memset(new, 0, sizeof(struct dl_info));
++	new->dl_tag = tag;
++	new->dl_sb = sb;
++	INIT_RCU_HEAD(&new->dl_rcu);
++	INIT_HLIST_NODE(&new->dl_hlist);
++	spin_lock_init(&new->dl_lock);
++	atomic_set(&new->dl_refcnt, 0);
++	atomic_set(&new->dl_usecnt, 0);
++
++	/* rest of init goes here */
++
++	vxdprintk(VXD_CBIT(dlim, 4),
++		"alloc_dl_info(%p,%d) = %p", sb, tag, new);
++	return new;
++}
++
++/*	__dealloc_dl_info()
++
++	* final disposal of dl_info				*/
++
++static void __dealloc_dl_info(struct dl_info *dli)
++{
++	vxdprintk(VXD_CBIT(dlim, 4),
++		"dealloc_dl_info(%p)", dli);
++
++	dli->dl_hlist.next = LIST_POISON1;
++	dli->dl_tag = -1;
++	dli->dl_sb = 0;
++
++	BUG_ON(atomic_read(&dli->dl_usecnt));
++	BUG_ON(atomic_read(&dli->dl_refcnt));
++
++	kfree(dli);
++}
++
++
++/*	hash table for dl_info hash */
++
++#define DL_HASH_SIZE	13
++
++struct hlist_head dl_info_hash[DL_HASH_SIZE];
++
++static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
++
++
++static inline unsigned int __hashval(struct super_block *sb, tag_t tag)
++{
++	return ((tag ^ (unsigned long)sb) % DL_HASH_SIZE);
++}
++
++
++
++/*	__hash_dl_info()
++
++	* add the dli to the global hash table
++	* requires the hash_lock to be held			*/
++
++static inline void __hash_dl_info(struct dl_info *dli)
++{
++	struct hlist_head *head;
++
++	vxdprintk(VXD_CBIT(dlim, 6),
++		"__hash_dl_info: %p[#%d]", dli, dli->dl_tag);
++	get_dl_info(dli);
++	head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_tag)];
++	hlist_add_head_rcu(&dli->dl_hlist, head);
++}
++
++/*	__unhash_dl_info()
++
++	* remove the dli from the global hash table
++	* requires the hash_lock to be held			*/
++
++static inline void __unhash_dl_info(struct dl_info *dli)
++{
++	vxdprintk(VXD_CBIT(dlim, 6),
++		"__unhash_dl_info: %p[#%d]", dli, dli->dl_tag);
++	hlist_del_rcu(&dli->dl_hlist);
++	put_dl_info(dli);
++}
++
++
++/*	__lookup_dl_info()
++
++	* requires the rcu_read_lock()
++	* doesn't increment the dl_refcnt			*/
++
++static inline struct dl_info *__lookup_dl_info(struct super_block *sb, tag_t tag)
++{
++	struct hlist_head *head = &dl_info_hash[__hashval(sb, tag)];
++	struct hlist_node *pos;
++	struct dl_info *dli;
++
++	hlist_for_each_entry_rcu(dli, pos, head, dl_hlist) {
++
++		if (dli->dl_tag == tag && dli->dl_sb == sb) {
++			return dli;
++		}
++	}
++	return NULL;
++}
++
++
++struct dl_info *locate_dl_info(struct super_block *sb, tag_t tag)
++{
++	struct dl_info *dli;
++
++	rcu_read_lock();
++	dli = get_dl_info(__lookup_dl_info(sb, tag));
++	vxdprintk(VXD_CBIT(dlim, 7),
++		"locate_dl_info(%p,#%d) = %p", sb, tag, dli);
++	rcu_read_unlock();
++	return dli;
++}
++
++void rcu_free_dl_info(struct rcu_head *head)
++{
++	struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
++	int usecnt, refcnt;
++
++	BUG_ON(!dli || !head);
++
++	usecnt = atomic_read(&dli->dl_usecnt);
++	BUG_ON(usecnt < 0);
++
++	refcnt = atomic_read(&dli->dl_refcnt);
++	BUG_ON(refcnt < 0);
++
++	vxdprintk(VXD_CBIT(dlim, 3),
++		"rcu_free_dl_info(%p)", dli);
++	if (!usecnt)
++		__dealloc_dl_info(dli);
++	else
++		printk("!!! rcu didn't free\n");
++}
++
++
++
++
++static int do_addrem_dlimit(uint32_t id, const char __user *name,
++	uint32_t flags, int add)
++{
++	struct path path;
++	int ret;
++
++	ret = user_lpath(name, &path);
++	if (!ret) {
++		struct super_block *sb;
++		struct dl_info *dli;
++
++		ret = -EINVAL;
++		if (!path.dentry->d_inode)
++			goto out_release;
++		if (!(sb = path.dentry->d_inode->i_sb))
++			goto out_release;
++
++		if (add) {
++			dli = __alloc_dl_info(sb, id);
++			spin_lock(&dl_info_hash_lock);
++
++			ret = -EEXIST;
++			if (__lookup_dl_info(sb, id))
++				goto out_unlock;
++			__hash_dl_info(dli);
++			dli = NULL;
++		} else {
++			spin_lock(&dl_info_hash_lock);
++			dli = __lookup_dl_info(sb, id);
++
++			ret = -ESRCH;
++			if (!dli)
++				goto out_unlock;
++			__unhash_dl_info(dli);
++		}
++		ret = 0;
++	out_unlock:
++		spin_unlock(&dl_info_hash_lock);
++		if (add && dli)
++			__dealloc_dl_info(dli);
++	out_release:
++		path_put(&path);
++	}
++	return ret;
++}
++
++int vc_add_dlimit(uint32_t id, void __user *data)
++{
++	struct vcmd_ctx_dlimit_base_v0 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1);
++}
++
++int vc_rem_dlimit(uint32_t id, void __user *data)
++{
++	struct vcmd_ctx_dlimit_base_v0 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0);
++}
++
++#ifdef	CONFIG_COMPAT
++
++int vc_add_dlimit_x32(uint32_t id, void __user *data)
++{
++	struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_addrem_dlimit(id,
++		compat_ptr(vc_data.name_ptr), vc_data.flags, 1);
++}
++
++int vc_rem_dlimit_x32(uint32_t id, void __user *data)
++{
++	struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_addrem_dlimit(id,
++		compat_ptr(vc_data.name_ptr), vc_data.flags, 0);
++}
++
++#endif	/* CONFIG_COMPAT */
++
++
++static inline
++int do_set_dlimit(uint32_t id, const char __user *name,
++	uint32_t space_used, uint32_t space_total,
++	uint32_t inodes_used, uint32_t inodes_total,
++	uint32_t reserved, uint32_t flags)
++{
++	struct path path;
++	int ret;
++
++	ret = user_lpath(name, &path);
++	if (!ret) {
++		struct super_block *sb;
++		struct dl_info *dli;
++
++		ret = -EINVAL;
++		if (!path.dentry->d_inode)
++			goto out_release;
++		if (!(sb = path.dentry->d_inode->i_sb))
++			goto out_release;
++
++		/* sanity checks */
++		if ((reserved != CDLIM_KEEP &&
++			reserved > 100) ||
++			(inodes_used != CDLIM_KEEP &&
++			inodes_used > inodes_total) ||
++			(space_used != CDLIM_KEEP &&
++			space_used > space_total))
++			goto out_release;
++
++		ret = -ESRCH;
++		dli = locate_dl_info(sb, id);
++		if (!dli)
++			goto out_release;
++
++		spin_lock(&dli->dl_lock);
++
++		if (inodes_used != CDLIM_KEEP)
++			dli->dl_inodes_used = inodes_used;
++		if (inodes_total != CDLIM_KEEP)
++			dli->dl_inodes_total = inodes_total;
++		if (space_used != CDLIM_KEEP)
++			dli->dl_space_used = dlimit_space_32to64(
++				space_used, flags, DLIMS_USED);
++
++		if (space_total == CDLIM_INFINITY)
++			dli->dl_space_total = DLIM_INFINITY;
++		else if (space_total != CDLIM_KEEP)
++			dli->dl_space_total = dlimit_space_32to64(
++				space_total, flags, DLIMS_TOTAL);
++
++		if (reserved != CDLIM_KEEP)
++			dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100;
++
++		spin_unlock(&dli->dl_lock);
++
++		put_dl_info(dli);
++		ret = 0;
++
++	out_release:
++		path_put(&path);
++	}
++	return ret;
++}
++
++int vc_set_dlimit(uint32_t id, void __user *data)
++{
++	struct vcmd_ctx_dlimit_v0 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_set_dlimit(id, vc_data.name,
++		vc_data.space_used, vc_data.space_total,
++		vc_data.inodes_used, vc_data.inodes_total,
++		vc_data.reserved, vc_data.flags);
++}
++
++#ifdef	CONFIG_COMPAT
++
++int vc_set_dlimit_x32(uint32_t id, void __user *data)
++{
++	struct vcmd_ctx_dlimit_v0_x32 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_set_dlimit(id, compat_ptr(vc_data.name_ptr),
++		vc_data.space_used, vc_data.space_total,
++		vc_data.inodes_used, vc_data.inodes_total,
++		vc_data.reserved, vc_data.flags);
++}
++
++#endif	/* CONFIG_COMPAT */
++
++
++static inline
++int do_get_dlimit(uint32_t id, const char __user *name,
++	uint32_t *space_used, uint32_t *space_total,
++	uint32_t *inodes_used, uint32_t *inodes_total,
++	uint32_t *reserved, uint32_t *flags)
++{
++	struct path path;
++	int ret;
++
++	ret = user_lpath(name, &path);
++	if (!ret) {
++		struct super_block *sb;
++		struct dl_info *dli;
++
++		ret = -EINVAL;
++		if (!path.dentry->d_inode)
++			goto out_release;
++		if (!(sb = path.dentry->d_inode->i_sb))
++			goto out_release;
++
++		ret = -ESRCH;
++		dli = locate_dl_info(sb, id);
++		if (!dli)
++			goto out_release;
++
++		spin_lock(&dli->dl_lock);
++		*inodes_used = dli->dl_inodes_used;
++		*inodes_total = dli->dl_inodes_total;
++
++		*space_used = dlimit_space_64to32(
++			dli->dl_space_used, flags, DLIMS_USED);
++
++		if (dli->dl_space_total == DLIM_INFINITY)
++			*space_total = CDLIM_INFINITY;
++		else
++			*space_total = dlimit_space_64to32(
++				dli->dl_space_total, flags, DLIMS_TOTAL);
++
++		*reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
++		spin_unlock(&dli->dl_lock);
++
++		put_dl_info(dli);
++		ret = -EFAULT;
++
++		ret = 0;
++	out_release:
++		path_put(&path);
++	}
++	return ret;
++}
++
++
++int vc_get_dlimit(uint32_t id, void __user *data)
++{
++	struct vcmd_ctx_dlimit_v0 vc_data;
++	int ret;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = do_get_dlimit(id, vc_data.name,
++		&vc_data.space_used, &vc_data.space_total,
++		&vc_data.inodes_used, &vc_data.inodes_total,
++		&vc_data.reserved, &vc_data.flags);
++	if (ret)
++		return ret;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++#ifdef	CONFIG_COMPAT
++
++int vc_get_dlimit_x32(uint32_t id, void __user *data)
++{
++	struct vcmd_ctx_dlimit_v0_x32 vc_data;
++	int ret;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr),
++		&vc_data.space_used, &vc_data.space_total,
++		&vc_data.inodes_used, &vc_data.inodes_total,
++		&vc_data.reserved, &vc_data.flags);
++	if (ret)
++		return ret;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++#endif	/* CONFIG_COMPAT */
++
++
++void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
++{
++	struct dl_info *dli;
++	__u64 blimit, bfree, bavail;
++	__u32 ifree;
++
++	dli = locate_dl_info(sb, dx_current_tag());
++	if (!dli)
++		return;
++
++	spin_lock(&dli->dl_lock);
++	if (dli->dl_inodes_total == (unsigned long)DLIM_INFINITY)
++		goto no_ilim;
++
++	/* reduce max inodes available to limit */
++	if (buf->f_files > dli->dl_inodes_total)
++		buf->f_files = dli->dl_inodes_total;
++
++	ifree = dli->dl_inodes_total - dli->dl_inodes_used;
++	/* reduce free inodes to min */
++	if (ifree < buf->f_ffree)
++		buf->f_ffree = ifree;
++
++no_ilim:
++	if (dli->dl_space_total == DLIM_INFINITY)
++		goto no_blim;
++
++	blimit = dli->dl_space_total >> sb->s_blocksize_bits;
++
++	if (dli->dl_space_total < dli->dl_space_used)
++		bfree = 0;
++	else
++		bfree = (dli->dl_space_total - dli->dl_space_used)
++			>> sb->s_blocksize_bits;
++
++	bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
++	if (bavail < dli->dl_space_used)
++		bavail = 0;
++	else
++		bavail = (bavail - dli->dl_space_used)
++			>> sb->s_blocksize_bits;
++
++	/* reduce max space available to limit */
++	if (buf->f_blocks > blimit)
++		buf->f_blocks = blimit;
++
++	/* reduce free space to min */
++	if (bfree < buf->f_bfree)
++		buf->f_bfree = bfree;
++
++	/* reduce avail space to min */
++	if (bavail < buf->f_bavail)
++		buf->f_bavail = bavail;
++
++no_blim:
++	spin_unlock(&dli->dl_lock);
++	put_dl_info(dli);
++
++	return;
++}
++
++#include <linux/module.h>
++
++EXPORT_SYMBOL_GPL(locate_dl_info);
++EXPORT_SYMBOL_GPL(rcu_free_dl_info);
++
+--- a/kernel/vserver/helper.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/helper.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,223 @@
++/*
++ *  linux/kernel/vserver/helper.c
++ *
++ *  Virtual Context Support
++ *
++ *  Copyright (C) 2004-2007  Herbert Pötzl
++ *
++ *  V0.01  basic helper
++ *
++ */
++
++#include <linux/kmod.h>
++#include <linux/reboot.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vserver/signal.h>
++
++
++char vshelper_path[255] = "/sbin/vshelper";
++
++
++static int do_vshelper(char *name, char *argv[], char *envp[], int sync)
++{
++	int ret;
++
++	if ((ret = call_usermodehelper(name, argv, envp, sync))) {
++		printk(	KERN_WARNING
++			"%s: (%s %s) returned %s with %d\n",
++			name, argv[1], argv[2],
++			sync ? "sync" : "async", ret);
++	}
++	vxdprintk(VXD_CBIT(switch, 4),
++		"%s: (%s %s) returned %s with %d",
++		name, argv[1], argv[2], sync ? "sync" : "async", ret);
++	return ret;
++}
++
++/*
++ *      vshelper path is set via /proc/sys
++ *      invoked by vserver sys_reboot(), with
++ *      the following arguments
++ *
++ *      argv [0] = vshelper_path;
++ *      argv [1] = action: "restart", "halt", "poweroff", ...
++ *      argv [2] = context identifier
++ *
++ *      envp [*] = type-specific parameters
++ */
++
++long vs_reboot_helper(struct vx_info *vxi, int cmd, void __user *arg)
++{
++	char id_buf[8], cmd_buf[16];
++	char uid_buf[16], pid_buf[16];
++	int ret;
++
++	char *argv[] = {vshelper_path, NULL, id_buf, 0};
++	char *envp[] = {"HOME=/", "TERM=linux",
++			"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
++			uid_buf, pid_buf, cmd_buf, 0};
++
++	if (vx_info_state(vxi, VXS_HELPER))
++		return -EAGAIN;
++	vxi->vx_state |= VXS_HELPER;
++
++	snprintf(id_buf, sizeof(id_buf)-1, "%d", vxi->vx_id);
++
++	snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
++	snprintf(uid_buf, sizeof(uid_buf)-1, "VS_UID=%d", current_uid());
++	snprintf(pid_buf, sizeof(pid_buf)-1, "VS_PID=%d", current->pid);
++
++	switch (cmd) {
++	case LINUX_REBOOT_CMD_RESTART:
++		argv[1] = "restart";
++		break;
++
++	case LINUX_REBOOT_CMD_HALT:
++		argv[1] = "halt";
++		break;
++
++	case LINUX_REBOOT_CMD_POWER_OFF:
++		argv[1] = "poweroff";
++		break;
++
++	case LINUX_REBOOT_CMD_SW_SUSPEND:
++		argv[1] = "swsusp";
++		break;
++
++	case LINUX_REBOOT_CMD_OOM:
++		argv[1] = "oom";
++		break;
++
++	default:
++		vxi->vx_state &= ~VXS_HELPER;
++		return 0;
++	}
++
++	ret = do_vshelper(vshelper_path, argv, envp, 0);
++	vxi->vx_state &= ~VXS_HELPER;
++	__wakeup_vx_info(vxi);
++	return (ret) ? -EPERM : 0;
++}
++
++
++long vs_reboot(unsigned int cmd, void __user *arg)
++{
++	struct vx_info *vxi = current_vx_info();
++	long ret = 0;
++
++	vxdprintk(VXD_CBIT(misc, 5),
++		"vs_reboot(%p[#%d],%u)",
++		vxi, vxi ? vxi->vx_id : 0, cmd);
++
++	ret = vs_reboot_helper(vxi, cmd, arg);
++	if (ret)
++		return ret;
++
++	vxi->reboot_cmd = cmd;
++	if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) {
++		switch (cmd) {
++		case LINUX_REBOOT_CMD_RESTART:
++		case LINUX_REBOOT_CMD_HALT:
++		case LINUX_REBOOT_CMD_POWER_OFF:
++			vx_info_kill(vxi, 0, SIGKILL);
++			vx_info_kill(vxi, 1, SIGKILL);
++		default:
++			break;
++		}
++	}
++	return 0;
++}
++
++long vs_oom_action(unsigned int cmd)
++{
++	struct vx_info *vxi = current_vx_info();
++	long ret = 0;
++
++	vxdprintk(VXD_CBIT(misc, 5),
++		"vs_oom_action(%p[#%d],%u)",
++		vxi, vxi ? vxi->vx_id : 0, cmd);
++
++	ret = vs_reboot_helper(vxi, cmd, NULL);
++	if (ret)
++		return ret;
++
++	vxi->reboot_cmd = cmd;
++	if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) {
++		vx_info_kill(vxi, 0, SIGKILL);
++		vx_info_kill(vxi, 1, SIGKILL);
++	}
++	return 0;
++}
++
++/*
++ *      argv [0] = vshelper_path;
++ *      argv [1] = action: "startup", "shutdown"
++ *      argv [2] = context identifier
++ *
++ *      envp [*] = type-specific parameters
++ */
++
++long vs_state_change(struct vx_info *vxi, unsigned int cmd)
++{
++	char id_buf[8], cmd_buf[16];
++	char *argv[] = {vshelper_path, NULL, id_buf, 0};
++	char *envp[] = {"HOME=/", "TERM=linux",
++			"PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
++
++	if (!vx_info_flags(vxi, VXF_SC_HELPER, 0))
++		return 0;
++
++	snprintf(id_buf, sizeof(id_buf)-1, "%d", vxi->vx_id);
++	snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
++
++	switch (cmd) {
++	case VSC_STARTUP:
++		argv[1] = "startup";
++		break;
++	case VSC_SHUTDOWN:
++		argv[1] = "shutdown";
++		break;
++	default:
++		return 0;
++	}
++
++	return do_vshelper(vshelper_path, argv, envp, 1);
++}
++
++
++/*
++ *      argv [0] = vshelper_path;
++ *      argv [1] = action: "netup", "netdown"
++ *      argv [2] = context identifier
++ *
++ *      envp [*] = type-specific parameters
++ */
++
++long vs_net_change(struct nx_info *nxi, unsigned int cmd)
++{
++	char id_buf[8], cmd_buf[16];
++	char *argv[] = {vshelper_path, NULL, id_buf, 0};
++	char *envp[] = {"HOME=/", "TERM=linux",
++			"PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
++
++	if (!nx_info_flags(nxi, NXF_SC_HELPER, 0))
++		return 0;
++
++	snprintf(id_buf, sizeof(id_buf)-1, "%d", nxi->nx_id);
++	snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
++
++	switch (cmd) {
++	case VSC_NETUP:
++		argv[1] = "netup";
++		break;
++	case VSC_NETDOWN:
++		argv[1] = "netdown";
++		break;
++	default:
++		return 0;
++	}
++
++	return do_vshelper(vshelper_path, argv, envp, 1);
++}
++
+--- a/kernel/vserver/history.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/history.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,258 @@
++/*
++ *  kernel/vserver/history.c
++ *
++ *  Virtual Context History Backtrace
++ *
++ *  Copyright (C) 2004-2007  Herbert Pötzl
++ *
++ *  V0.01  basic structure
++ *  V0.02  hash/unhash and trace
++ *  V0.03  preemption fixes
++ *
++ */
++
++#include <linux/module.h>
++#include <asm/uaccess.h>
++
++#include <linux/vserver/context.h>
++#include <linux/vserver/debug.h>
++#include <linux/vserver/debug_cmd.h>
++#include <linux/vserver/history.h>
++
++
++#ifdef	CONFIG_VSERVER_HISTORY
++#define VXH_SIZE	CONFIG_VSERVER_HISTORY_SIZE
++#else
++#define VXH_SIZE	64
++#endif
++
++struct _vx_history {
++	unsigned int counter;
++
++	struct _vx_hist_entry entry[VXH_SIZE + 1];
++};
++
++
++DEFINE_PER_CPU(struct _vx_history, vx_history_buffer);
++
++unsigned volatile int vxh_active = 1;
++
++static atomic_t sequence = ATOMIC_INIT(0);
++
++
++/*	vxh_advance()
++
++	* requires disabled preemption				*/
++
++struct _vx_hist_entry *vxh_advance(void *loc)
++{
++	unsigned int cpu = smp_processor_id();
++	struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu);
++	struct _vx_hist_entry *entry;
++	unsigned int index;
++
++	index = vxh_active ? (hist->counter++ % VXH_SIZE) : VXH_SIZE;
++	entry = &hist->entry[index];
++
++	entry->seq = atomic_inc_return(&sequence);
++	entry->loc = loc;
++	return entry;
++}
++
++EXPORT_SYMBOL_GPL(vxh_advance);
++
++
++#define VXH_LOC_FMTS	"(#%04x,*%d):%p"
++
++#define VXH_LOC_ARGS(e)	(e)->seq, cpu, (e)->loc
++
++
++#define VXH_VXI_FMTS	"%p[#%d,%d.%d]"
++
++#define VXH_VXI_ARGS(e)	(e)->vxi.ptr,				\
++			(e)->vxi.ptr ? (e)->vxi.xid : 0,	\
++			(e)->vxi.ptr ? (e)->vxi.usecnt : 0,	\
++			(e)->vxi.ptr ? (e)->vxi.tasks : 0
++
++void	vxh_dump_entry(struct _vx_hist_entry *e, unsigned cpu)
++{
++	switch (e->type) {
++	case VXH_THROW_OOPS:
++		printk( VXH_LOC_FMTS " oops \n", VXH_LOC_ARGS(e));
++		break;
++
++	case VXH_GET_VX_INFO:
++	case VXH_PUT_VX_INFO:
++		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n",
++			VXH_LOC_ARGS(e),
++			(e->type == VXH_GET_VX_INFO) ? "get" : "put",
++			VXH_VXI_ARGS(e));
++		break;
++
++	case VXH_INIT_VX_INFO:
++	case VXH_SET_VX_INFO:
++	case VXH_CLR_VX_INFO:
++		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n",
++			VXH_LOC_ARGS(e),
++			(e->type == VXH_INIT_VX_INFO) ? "init" :
++			((e->type == VXH_SET_VX_INFO) ? "set" : "clr"),
++			VXH_VXI_ARGS(e), e->sc.data);
++		break;
++
++	case VXH_CLAIM_VX_INFO:
++	case VXH_RELEASE_VX_INFO:
++		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n",
++			VXH_LOC_ARGS(e),
++			(e->type == VXH_CLAIM_VX_INFO) ? "claim" : "release",
++			VXH_VXI_ARGS(e), e->sc.data);
++		break;
++
++	case VXH_ALLOC_VX_INFO:
++	case VXH_DEALLOC_VX_INFO:
++		printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n",
++			VXH_LOC_ARGS(e),
++			(e->type == VXH_ALLOC_VX_INFO) ? "alloc" : "dealloc",
++			VXH_VXI_ARGS(e));
++		break;
++
++	case VXH_HASH_VX_INFO:
++	case VXH_UNHASH_VX_INFO:
++		printk( VXH_LOC_FMTS " __%s_vx_info " VXH_VXI_FMTS "\n",
++			VXH_LOC_ARGS(e),
++			(e->type == VXH_HASH_VX_INFO) ? "hash" : "unhash",
++			VXH_VXI_ARGS(e));
++		break;
++
++	case VXH_LOC_VX_INFO:
++	case VXH_LOOKUP_VX_INFO:
++	case VXH_CREATE_VX_INFO:
++		printk( VXH_LOC_FMTS " __%s_vx_info [#%d] -> " VXH_VXI_FMTS "\n",
++			VXH_LOC_ARGS(e),
++			(e->type == VXH_CREATE_VX_INFO) ? "create" :
++			((e->type == VXH_LOC_VX_INFO) ? "loc" : "lookup"),
++			e->ll.arg, VXH_VXI_ARGS(e));
++		break;
++	}
++}
++
++static void __vxh_dump_history(void)
++{
++	unsigned int i, cpu;
++
++	printk("History:\tSEQ: %8x\tNR_CPUS: %d\n",
++		atomic_read(&sequence), NR_CPUS);
++
++	for (i = 0; i < VXH_SIZE; i++) {
++		for_each_online_cpu(cpu) {
++			struct _vx_history *hist =
++				&per_cpu(vx_history_buffer, cpu);
++			unsigned int index = (hist->counter - i) % VXH_SIZE;
++			struct _vx_hist_entry *entry = &hist->entry[index];
++
++			vxh_dump_entry(entry, cpu);
++		}
++	}
++}
++
++void	vxh_dump_history(void)
++{
++	vxh_active = 0;
++#ifdef CONFIG_SMP
++	local_irq_enable();
++	smp_send_stop();
++	local_irq_disable();
++#endif
++	__vxh_dump_history();
++}
++
++
++/* vserver syscall commands below here */
++
++
++int vc_dump_history(uint32_t id)
++{
++	vxh_active = 0;
++	__vxh_dump_history();
++	vxh_active = 1;
++
++	return 0;
++}
++
++
++int do_read_history(struct __user _vx_hist_entry *data,
++	int cpu, uint32_t *index, uint32_t *count)
++{
++	int pos, ret = 0;
++	struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu);
++	int end = hist->counter;
++	int start = end - VXH_SIZE + 2;
++	int idx = *index;
++
++	/* special case: get current pos */
++	if (!*count) {
++		*index = end;
++		return 0;
++	}
++
++	/* have we lost some data? */
++	if (idx < start)
++		idx = start;
++
++	for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) {
++		struct _vx_hist_entry *entry =
++			&hist->entry[idx % VXH_SIZE];
++
++		/* send entry to userspace */
++		ret = copy_to_user(&data[pos], entry, sizeof(*entry));
++		if (ret)
++			break;
++	}
++	/* save new index and count */
++	*index = idx;
++	*count = pos;
++	return ret ? ret : (*index < end);
++}
++
++int vc_read_history(uint32_t id, void __user *data)
++{
++	struct vcmd_read_history_v0 vc_data;
++	int ret;
++
++	if (id >= NR_CPUS)
++		return -EINVAL;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = do_read_history((struct __user _vx_hist_entry *)vc_data.data,
++		id, &vc_data.index, &vc_data.count);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return ret;
++}
++
++#ifdef	CONFIG_COMPAT
++
++int vc_read_history_x32(uint32_t id, void __user *data)
++{
++	struct vcmd_read_history_v0_x32 vc_data;
++	int ret;
++
++	if (id >= NR_CPUS)
++		return -EINVAL;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = do_read_history((struct __user _vx_hist_entry *)
++		compat_ptr(vc_data.data_ptr),
++		id, &vc_data.index, &vc_data.count);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return ret;
++}
++
++#endif	/* CONFIG_COMPAT */
++
+--- a/kernel/vserver/inet.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/inet.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,225 @@
++
++#include <linux/in.h>
++#include <linux/inetdevice.h>
++#include <linux/vs_inet.h>
++#include <linux/vs_inet6.h>
++#include <linux/vserver/debug.h>
++#include <net/route.h>
++#include <net/addrconf.h>
++
++
++int nx_v4_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2)
++{
++	int ret = 0;
++
++	if (!nxi1 || !nxi2 || nxi1 == nxi2)
++		ret = 1;
++	else {
++		struct nx_addr_v4 *ptr;
++
++		for (ptr = &nxi1->v4; ptr; ptr = ptr->next) {
++			if (v4_nx_addr_in_nx_info(nxi2, ptr, -1)) {
++				ret = 1;
++				break;
++			}
++		}
++	}
++
++	vxdprintk(VXD_CBIT(net, 2),
++		"nx_v4_addr_conflict(%p,%p): %d",
++		nxi1, nxi2, ret);
++
++	return ret;
++}
++
++
++#ifdef	CONFIG_IPV6
++
++int nx_v6_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2)
++{
++	int ret = 0;
++
++	if (!nxi1 || !nxi2 || nxi1 == nxi2)
++		ret = 1;
++	else {
++		struct nx_addr_v6 *ptr;
++
++		for (ptr = &nxi1->v6; ptr; ptr = ptr->next) {
++			if (v6_nx_addr_in_nx_info(nxi2, ptr, -1)) {
++				ret = 1;
++				break;
++			}
++		}
++	}
++
++	vxdprintk(VXD_CBIT(net, 2),
++		"nx_v6_addr_conflict(%p,%p): %d",
++		nxi1, nxi2, ret);
++
++	return ret;
++}
++
++#endif
++
++int v4_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
++{
++	struct in_device *in_dev;
++	struct in_ifaddr **ifap;
++	struct in_ifaddr *ifa;
++	int ret = 0;
++
++	if (!dev)
++		goto out;
++	in_dev = in_dev_get(dev);
++	if (!in_dev)
++		goto out;
++
++	for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
++		ifap = &ifa->ifa_next) {
++		if (v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW)) {
++			ret = 1;
++			break;
++		}
++	}
++	in_dev_put(in_dev);
++out:
++	return ret;
++}
++
++
++#ifdef	CONFIG_IPV6
++
++int v6_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
++{
++	struct inet6_dev *in_dev;
++	struct inet6_ifaddr **ifap;
++	struct inet6_ifaddr *ifa;
++	int ret = 0;
++
++	if (!dev)
++		goto out;
++	in_dev = in6_dev_get(dev);
++	if (!in_dev)
++		goto out;
++
++	for (ifap = &in_dev->addr_list; (ifa = *ifap) != NULL;
++		ifap = &ifa->if_next) {
++		if (v6_addr_in_nx_info(nxi, &ifa->addr, -1)) {
++			ret = 1;
++			break;
++		}
++	}
++	in6_dev_put(in_dev);
++out:
++	return ret;
++}
++
++#endif
++
++int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
++{
++	int ret = 1;
++
++	if (!nxi)
++		goto out;
++	if (nxi->v4.type && v4_dev_in_nx_info(dev, nxi))
++		goto out;
++#ifdef	CONFIG_IPV6
++	ret = 2;
++	if (nxi->v6.type && v6_dev_in_nx_info(dev, nxi))
++		goto out;
++#endif
++	ret = 0;
++out:
++	vxdprintk(VXD_CBIT(net, 3),
++		"dev_in_nx_info(%p,%p[#%d]) = %d",
++		dev, nxi, nxi ? nxi->nx_id : 0, ret);
++	return ret;
++}
++
++int ip_v4_find_src(struct net *net, struct nx_info *nxi,
++	struct rtable **rp, struct flowi *fl)
++{
++	if (!nxi)
++		return 0;
++
++	/* FIXME: handle lback only case */
++	if (!NX_IPV4(nxi))
++		return -EPERM;
++
++	vxdprintk(VXD_CBIT(net, 4),
++		"ip_v4_find_src(%p[#%u]) " NIPQUAD_FMT " -> " NIPQUAD_FMT,
++		nxi, nxi ? nxi->nx_id : 0,
++		NIPQUAD(fl->fl4_src), NIPQUAD(fl->fl4_dst));
++
++	/* single IP is unconditional */
++	if (nx_info_flags(nxi, NXF_SINGLE_IP, 0) &&
++		(fl->fl4_src == INADDR_ANY))
++		fl->fl4_src = nxi->v4.ip[0].s_addr;
++
++	if (fl->fl4_src == INADDR_ANY) {
++		struct nx_addr_v4 *ptr;
++		__be32 found = 0;
++		int err;
++
++		err = __ip_route_output_key(net, rp, fl);
++		if (!err) {
++			found = (*rp)->rt_src;
++			ip_rt_put(*rp);
++			vxdprintk(VXD_CBIT(net, 4),
++				"ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT,
++				nxi, nxi ? nxi->nx_id : 0, fl->oif, NIPQUAD(found));
++			if (v4_addr_in_nx_info(nxi, found, NXA_MASK_BIND))
++				goto found;
++		}
++
++		for (ptr = &nxi->v4; ptr; ptr = ptr->next) {
++			__be32 primary = ptr->ip[0].s_addr;
++			__be32 mask = ptr->mask.s_addr;
++			__be32 neta = primary & mask;
++
++			vxdprintk(VXD_CBIT(net, 4), "ip_v4_find_src(%p[#%u]) chk: "
++				NIPQUAD_FMT "/" NIPQUAD_FMT "/" NIPQUAD_FMT,
++				nxi, nxi ? nxi->nx_id : 0, NIPQUAD(primary),
++				NIPQUAD(mask), NIPQUAD(neta));
++			if ((found & mask) != neta)
++				continue;
++
++			fl->fl4_src = primary;
++			err = __ip_route_output_key(net, rp, fl);
++			vxdprintk(VXD_CBIT(net, 4),
++				"ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT,
++				nxi, nxi ? nxi->nx_id : 0, fl->oif, NIPQUAD(primary));
++			if (!err) {
++				found = (*rp)->rt_src;
++				ip_rt_put(*rp);
++				if (found == primary)
++					goto found;
++			}
++		}
++		/* still no source ip? */
++		found = ipv4_is_loopback(fl->fl4_dst)
++			? IPI_LOOPBACK : nxi->v4.ip[0].s_addr;
++	found:
++		/* assign src ip to flow */
++		fl->fl4_src = found;
++
++	} else {
++		if (!v4_addr_in_nx_info(nxi, fl->fl4_src, NXA_MASK_BIND))
++			return -EPERM;
++	}
++
++	if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) {
++		if (ipv4_is_loopback(fl->fl4_dst))
++			fl->fl4_dst = nxi->v4_lback.s_addr;
++		if (ipv4_is_loopback(fl->fl4_src))
++			fl->fl4_src = nxi->v4_lback.s_addr;
++	} else if (ipv4_is_loopback(fl->fl4_dst) &&
++		!nx_info_flags(nxi, NXF_LBACK_ALLOW, 0))
++		return -EPERM;
++
++	return 0;
++}
++
++EXPORT_SYMBOL_GPL(ip_v4_find_src);
++
+--- a/kernel/vserver/init.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/init.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,45 @@
++/*
++ *  linux/kernel/init.c
++ *
++ *  Virtual Server Init
++ *
++ *  Copyright (C) 2004-2007  Herbert Pötzl
++ *
++ *  V0.01  basic structure
++ *
++ */
++
++#include <linux/init.h>
++
++int	vserver_register_sysctl(void);
++void	vserver_unregister_sysctl(void);
++
++
++static int __init init_vserver(void)
++{
++	int ret = 0;
++
++#ifdef	CONFIG_VSERVER_DEBUG
++	vserver_register_sysctl();
++#endif
++	return ret;
++}
++
++
++static void __exit exit_vserver(void)
++{
++
++#ifdef	CONFIG_VSERVER_DEBUG
++	vserver_unregister_sysctl();
++#endif
++	return;
++}
++
++/* FIXME: GFP_ZONETYPES gone
++long vx_slab[GFP_ZONETYPES]; */
++long vx_area;
++
++
++module_init(init_vserver);
++module_exit(exit_vserver);
++
+--- a/kernel/vserver/inode.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/inode.c	2011-06-10 18:58:46.000000000 +0200
+@@ -0,0 +1,433 @@
++/*
++ *  linux/kernel/vserver/inode.c
++ *
++ *  Virtual Server: File System Support
++ *
++ *  Copyright (C) 2004-2007  Herbert Pötzl
++ *
++ *  V0.01  separated from vcontext V0.05
++ *  V0.02  moved to tag (instead of xid)
++ *
++ */
++
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/devpts_fs.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/mount.h>
++#include <linux/parser.h>
++#include <linux/namei.h>
++#include <linux/vserver/inode.h>
++#include <linux/vserver/inode_cmd.h>
++#include <linux/vs_base.h>
++#include <linux/vs_tag.h>
++
++#include <asm/uaccess.h>
++
++
++static int __vc_get_iattr(struct inode *in, uint32_t *tag, uint32_t *flags, uint32_t *mask)
++{
++	struct proc_dir_entry *entry;
++
++	if (!in || !in->i_sb)
++		return -ESRCH;
++
++	*flags = IATTR_TAG
++		| (IS_IMMUTABLE(in) ? IATTR_IMMUTABLE : 0)
++		| (IS_IXUNLINK(in) ? IATTR_IXUNLINK : 0)
++		| (IS_BARRIER(in) ? IATTR_BARRIER : 0)
++		| (IS_COW(in) ? IATTR_COW : 0);
++	*mask = IATTR_IXUNLINK | IATTR_IMMUTABLE | IATTR_COW;
++
++	if (S_ISDIR(in->i_mode))
++		*mask |= IATTR_BARRIER;
++
++	if (IS_TAGGED(in)) {
++		*tag = in->i_tag;
++		*mask |= IATTR_TAG;
++	}
++
++	switch (in->i_sb->s_magic) {
++	case PROC_SUPER_MAGIC:
++		entry = PROC_I(in)->pde;
++
++		/* check for specific inodes? */
++		if (entry)
++			*mask |= IATTR_FLAGS;
++		if (entry)
++			*flags |= (entry->vx_flags & IATTR_FLAGS);
++		else
++			*flags |= (PROC_I(in)->vx_flags & IATTR_FLAGS);
++		break;
++
++	case DEVPTS_SUPER_MAGIC:
++		*tag = in->i_tag;
++		*mask |= IATTR_TAG;
++		break;
++
++	default:
++		break;
++	}
++	return 0;
++}
++
++int vc_get_iattr(void __user *data)
++{
++	struct path path;
++	struct vcmd_ctx_iattr_v1 vc_data = { .tag = -1 };
++	int ret;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = user_lpath(vc_data.name, &path);
++	if (!ret) {
++		ret = __vc_get_iattr(path.dentry->d_inode,
++			&vc_data.tag, &vc_data.flags, &vc_data.mask);
++		path_put(&path);
++	}
++	if (ret)
++		return ret;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		ret = -EFAULT;
++	return ret;
++}
++
++#ifdef	CONFIG_COMPAT
++
++int vc_get_iattr_x32(void __user *data)
++{
++	struct path path;
++	struct vcmd_ctx_iattr_v1_x32 vc_data = { .tag = -1 };
++	int ret;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = user_lpath(compat_ptr(vc_data.name_ptr), &path);
++	if (!ret) {
++		ret = __vc_get_iattr(path.dentry->d_inode,
++			&vc_data.tag, &vc_data.flags, &vc_data.mask);
++		path_put(&path);
++	}
++	if (ret)
++		return ret;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		ret = -EFAULT;
++	return ret;
++}
++
++#endif	/* CONFIG_COMPAT */
++
++
++int vc_fget_iattr(uint32_t fd, void __user *data)
++{
++	struct file *filp;
++	struct vcmd_ctx_fiattr_v0 vc_data = { .tag = -1 };
++	int ret;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	filp = fget(fd);
++	if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode)
++		return -EBADF;
++
++	ret = __vc_get_iattr(filp->f_dentry->d_inode,
++		&vc_data.tag, &vc_data.flags, &vc_data.mask);
++
++	fput(filp);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		ret = -EFAULT;
++	return ret;
++}
++
++
++static int __vc_set_iattr(struct dentry *de, uint32_t *tag, uint32_t *flags, uint32_t *mask)
++{
++	struct inode *in = de->d_inode;
++	int error = 0, is_proc = 0, has_tag = 0;
++	struct iattr attr = { 0 };
++
++	if (!in || !in->i_sb)
++		return -ESRCH;
++
++	is_proc = (in->i_sb->s_magic == PROC_SUPER_MAGIC);
++	if ((*mask & IATTR_FLAGS) && !is_proc)
++		return -EINVAL;
++
++	has_tag = IS_TAGGED(in) ||
++		(in->i_sb->s_magic == DEVPTS_SUPER_MAGIC);
++	if ((*mask & IATTR_TAG) && !has_tag)
++		return -EINVAL;
++
++	mutex_lock(&in->i_mutex);
++	if (*mask & IATTR_TAG) {
++		attr.ia_tag = *tag;
++		attr.ia_valid |= ATTR_TAG;
++	}
++
++	if (*mask & IATTR_FLAGS) {
++		struct proc_dir_entry *entry = PROC_I(in)->pde;
++		unsigned int iflags = PROC_I(in)->vx_flags;
++
++		iflags = (iflags & ~(*mask & IATTR_FLAGS))
++			| (*flags & IATTR_FLAGS);
++		PROC_I(in)->vx_flags = iflags;
++		if (entry)
++			entry->vx_flags = iflags;
++	}
++
++	if (*mask & (IATTR_IMMUTABLE | IATTR_IXUNLINK |
++		IATTR_BARRIER | IATTR_COW)) {
++		int iflags = in->i_flags;
++		int vflags = in->i_vflags;
++
++		if (*mask & IATTR_IMMUTABLE) {
++			if (*flags & IATTR_IMMUTABLE)
++				iflags |= S_IMMUTABLE;
++			else
++				iflags &= ~S_IMMUTABLE;
++		}
++		if (*mask & IATTR_IXUNLINK) {
++			if (*flags & IATTR_IXUNLINK)
++				iflags |= S_IXUNLINK;
++			else
++				iflags &= ~S_IXUNLINK;
++		}
++		if (S_ISDIR(in->i_mode) && (*mask & IATTR_BARRIER)) {
++			if (*flags & IATTR_BARRIER)
++				vflags |= V_BARRIER;
++			else
++				vflags &= ~V_BARRIER;
++		}
++		if (S_ISREG(in->i_mode) && (*mask & IATTR_COW)) {
++			if (*flags & IATTR_COW)
++				vflags |= V_COW;
++			else
++				vflags &= ~V_COW;
++		}
++		if (in->i_op && in->i_op->sync_flags) {
++			error = in->i_op->sync_flags(in, iflags, vflags);
++			if (error)
++				goto out;
++		}
++	}
++
++	if (attr.ia_valid) {
++		if (in->i_op && in->i_op->setattr)
++			error = in->i_op->setattr(de, &attr);
++		else {
++			error = inode_change_ok(in, &attr);
++			if (!error)
++				error = inode_setattr(in, &attr);
++		}
++	}
++
++out:
++	mutex_unlock(&in->i_mutex);
++	return error;
++}
++
++int vc_set_iattr(void __user *data)
++{
++	struct path path;
++	struct vcmd_ctx_iattr_v1 vc_data;
++	int ret;
++
++	if (!capable(CAP_LINUX_IMMUTABLE))
++		return -EPERM;
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = user_lpath(vc_data.name, &path);
++	if (!ret) {
++		ret = __vc_set_iattr(path.dentry,
++			&vc_data.tag, &vc_data.flags, &vc_data.mask);
++		path_put(&path);
++	}
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		ret = -EFAULT;
++	return ret;
++}
++
++#ifdef	CONFIG_COMPAT
++
++int vc_set_iattr_x32(void __user *data)
++{
++	struct path path;
++	struct vcmd_ctx_iattr_v1_x32 vc_data;
++	int ret;
++
++	if (!capable(CAP_LINUX_IMMUTABLE))
++		return -EPERM;
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = user_lpath(compat_ptr(vc_data.name_ptr), &path);
++	if (!ret) {
++		ret = __vc_set_iattr(path.dentry,
++			&vc_data.tag, &vc_data.flags, &vc_data.mask);
++		path_put(&path);
++	}
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		ret = -EFAULT;
++	return ret;
++}
++
++#endif	/* CONFIG_COMPAT */
++
++int vc_fset_iattr(uint32_t fd, void __user *data)
++{
++	struct file *filp;
++	struct vcmd_ctx_fiattr_v0 vc_data;
++	int ret;
++
++	if (!capable(CAP_LINUX_IMMUTABLE))
++		return -EPERM;
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	filp = fget(fd);
++	if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode)
++		return -EBADF;
++
++	ret = __vc_set_iattr(filp->f_dentry, &vc_data.tag,
++		&vc_data.flags, &vc_data.mask);
++
++	fput(filp);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return ret;
++}
++
++
++enum { Opt_notagcheck, Opt_tag, Opt_notag, Opt_tagid, Opt_err };
++
++static match_table_t tokens = {
++	{Opt_notagcheck, "notagcheck"},
++#ifdef	CONFIG_PROPAGATE
++	{Opt_notag, "notag"},
++	{Opt_tag, "tag"},
++	{Opt_tagid, "tagid=%u"},
++#endif
++	{Opt_err, NULL}
++};
++
++
++static void __dx_parse_remove(char *string, char *opt)
++{
++	char *p = strstr(string, opt);
++	char *q = p;
++
++	if (p) {
++		while (*q != '\0' && *q != ',')
++			q++;
++		while (*q)
++			*p++ = *q++;
++		while (*p)
++			*p++ = '\0';
++	}
++}
++
++int dx_parse_tag(char *string, tag_t *tag, int remove, int *mnt_flags,
++		 unsigned long *flags)
++{
++	int set = 0;
++	substring_t args[MAX_OPT_ARGS];
++	int token, option = 0;
++	char *s, *p, *opts;
++
++	if (!string)
++		return 0;
++	s = kstrdup(string, GFP_KERNEL | GFP_ATOMIC);
++	if (!s)
++		return 0;
++
++	opts = s;
++	while ((p = strsep(&opts, ",")) != NULL) {
++		token = match_token(p, tokens, args);
++
++		vxdprintk(VXD_CBIT(tag, 7),
++			"dx_parse_tag(»%s«): %d:#%d",
++			p, token, option);
++
++		switch (token) {
++#ifdef CONFIG_PROPAGATE
++		case Opt_tag:
++			if (tag)
++				*tag = 0;
++			if (remove)
++				__dx_parse_remove(s, "tag");
++			*mnt_flags |= MNT_TAGID;
++			set |= MNT_TAGID;
++			break;
++		case Opt_notag:
++			if (remove)
++				__dx_parse_remove(s, "notag");
++			*mnt_flags |= MNT_NOTAG;
++			set |= MNT_NOTAG;
++			break;
++		case Opt_tagid:
++			if (tag && !match_int(args, &option))
++				*tag = option;
++			if (remove)
++				__dx_parse_remove(s, "tagid");
++			*mnt_flags |= MNT_TAGID;
++			set |= MNT_TAGID;
++			break;
++#endif
++		case Opt_notagcheck:
++			if (remove)
++				__dx_parse_remove(s, "notagcheck");
++			*flags |= MS_NOTAGCHECK;
++			set |= MS_NOTAGCHECK;
++			break;
++		}
++	}
++	if (set)
++		strcpy(string, s);
++	kfree(s);
++	return set;
++}
++
++#ifdef	CONFIG_PROPAGATE
++
++void __dx_propagate_tag(struct nameidata *nd, struct inode *inode)
++{
++	tag_t new_tag = 0;
++	struct vfsmount *mnt;
++	int propagate;
++
++	if (!nd)
++		return;
++	mnt = nd->path.mnt;
++	if (!mnt)
++		return;
++
++	propagate = (mnt->mnt_flags & MNT_TAGID);
++	if (propagate)
++		new_tag = mnt->mnt_tag;
++
++	vxdprintk(VXD_CBIT(tag, 7),
++		"dx_propagate_tag(%p[#%lu.%d]): %d,%d",
++		inode, inode->i_ino, inode->i_tag,
++		new_tag, (propagate) ? 1 : 0);
++
++	if (propagate)
++		inode->i_tag = new_tag;
++}
++
++#include <linux/module.h>
++
++EXPORT_SYMBOL_GPL(__dx_propagate_tag);
++
++#endif	/* CONFIG_PROPAGATE */
++
+--- a/kernel/vserver/limit.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/limit.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,392 @@
++/*
++ *  linux/kernel/vserver/limit.c
++ *
++ *  Virtual Server: Context Limits
++ *
++ *  Copyright (C) 2004-2010  Herbert Pötzl
++ *
++ *  V0.01  broken out from vcontext V0.05
++ *  V0.02  changed vcmds to vxi arg
++ *  V0.03  added memory cgroup support
++ *
++ */
++
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/memcontrol.h>
++#include <linux/res_counter.h>
++#include <linux/vs_limit.h>
++#include <linux/vserver/limit.h>
++#include <linux/vserver/limit_cmd.h>
++
++#include <asm/uaccess.h>
++
++
++const char *vlimit_name[NUM_LIMITS] = {
++	[RLIMIT_CPU]		= "CPU",
++	[RLIMIT_RSS]		= "RSS",
++	[RLIMIT_NPROC]		= "NPROC",
++	[RLIMIT_NOFILE]		= "NOFILE",
++	[RLIMIT_MEMLOCK]	= "VML",
++	[RLIMIT_AS]		= "VM",
++	[RLIMIT_LOCKS]		= "LOCKS",
++	[RLIMIT_SIGPENDING]	= "SIGP",
++	[RLIMIT_MSGQUEUE]	= "MSGQ",
++
++	[VLIMIT_NSOCK]		= "NSOCK",
++	[VLIMIT_OPENFD]		= "OPENFD",
++	[VLIMIT_ANON]		= "ANON",
++	[VLIMIT_SHMEM]		= "SHMEM",
++	[VLIMIT_DENTRY]		= "DENTRY",
++};
++
++EXPORT_SYMBOL_GPL(vlimit_name);
++
++#define MASK_ENTRY(x)	(1 << (x))
++
++const struct vcmd_ctx_rlimit_mask_v0 vlimit_mask = {
++		/* minimum */
++	0
++	,	/* softlimit */
++	MASK_ENTRY( RLIMIT_RSS		) |
++	MASK_ENTRY( VLIMIT_ANON		) |
++	0
++	,       /* maximum */
++	MASK_ENTRY( RLIMIT_RSS		) |
++	MASK_ENTRY( RLIMIT_NPROC	) |
++	MASK_ENTRY( RLIMIT_NOFILE	) |
++	MASK_ENTRY( RLIMIT_MEMLOCK	) |
++	MASK_ENTRY( RLIMIT_AS		) |
++	MASK_ENTRY( RLIMIT_LOCKS	) |
++	MASK_ENTRY( RLIMIT_MSGQUEUE	) |
++
++	MASK_ENTRY( VLIMIT_NSOCK	) |
++	MASK_ENTRY( VLIMIT_OPENFD	) |
++	MASK_ENTRY( VLIMIT_ANON		) |
++	MASK_ENTRY( VLIMIT_SHMEM	) |
++	MASK_ENTRY( VLIMIT_DENTRY	) |
++	0
++};
++		/* accounting only */
++uint32_t account_mask =
++	MASK_ENTRY( VLIMIT_SEMARY	) |
++	MASK_ENTRY( VLIMIT_NSEMS	) |
++	MASK_ENTRY( VLIMIT_MAPPED	) |
++	0;
++
++
++static int is_valid_vlimit(int id)
++{
++	uint32_t mask = vlimit_mask.minimum |
++		vlimit_mask.softlimit | vlimit_mask.maximum;
++	return mask & (1 << id);
++}
++
++static int is_accounted_vlimit(int id)
++{
++	if (is_valid_vlimit(id))
++		return 1;
++	return account_mask & (1 << id);
++}
++
++
++static inline uint64_t vc_get_soft(struct vx_info *vxi, int id)
++{
++	rlim_t limit = __rlim_soft(&vxi->limit, id);
++	return VX_VLIM(limit);
++}
++
++static inline uint64_t vc_get_hard(struct vx_info *vxi, int id)
++{
++	rlim_t limit = __rlim_hard(&vxi->limit, id);
++	return VX_VLIM(limit);
++}
++
++static int do_get_rlimit(struct vx_info *vxi, uint32_t id,
++	uint64_t *minimum, uint64_t *softlimit, uint64_t *maximum)
++{
++	if (!is_valid_vlimit(id))
++		return -EINVAL;
++
++	if (minimum)
++		*minimum = CRLIM_UNSET;
++	if (softlimit)
++		*softlimit = vc_get_soft(vxi, id);
++	if (maximum)
++		*maximum = vc_get_hard(vxi, id);
++	return 0;
++}
++
++int vc_get_rlimit(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_rlimit_v0 vc_data;
++	int ret;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = do_get_rlimit(vxi, vc_data.id,
++		&vc_data.minimum, &vc_data.softlimit, &vc_data.maximum);
++	if (ret)
++		return ret;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++static int do_set_rlimit(struct vx_info *vxi, uint32_t id,
++	uint64_t minimum, uint64_t softlimit, uint64_t maximum)
++{
++	if (!is_valid_vlimit(id))
++		return -EINVAL;
++
++	if (maximum != CRLIM_KEEP)
++		__rlim_hard(&vxi->limit, id) = VX_RLIM(maximum);
++	if (softlimit != CRLIM_KEEP)
++		__rlim_soft(&vxi->limit, id) = VX_RLIM(softlimit);
++
++	/* clamp soft limit */
++	if (__rlim_soft(&vxi->limit, id) > __rlim_hard(&vxi->limit, id))
++		__rlim_soft(&vxi->limit, id) = __rlim_hard(&vxi->limit, id);
++
++	return 0;
++}
++
++int vc_set_rlimit(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_rlimit_v0 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_set_rlimit(vxi, vc_data.id,
++		vc_data.minimum, vc_data.softlimit, vc_data.maximum);
++}
++
++#ifdef	CONFIG_IA32_EMULATION
++
++int vc_set_rlimit_x32(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_rlimit_v0_x32 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_set_rlimit(vxi, vc_data.id,
++		vc_data.minimum, vc_data.softlimit, vc_data.maximum);
++}
++
++int vc_get_rlimit_x32(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_rlimit_v0_x32 vc_data;
++	int ret;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = do_get_rlimit(vxi, vc_data.id,
++		&vc_data.minimum, &vc_data.softlimit, &vc_data.maximum);
++	if (ret)
++		return ret;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++#endif	/* CONFIG_IA32_EMULATION */
++
++
++int vc_get_rlimit_mask(uint32_t id, void __user *data)
++{
++	if (copy_to_user(data, &vlimit_mask, sizeof(vlimit_mask)))
++		return -EFAULT;
++	return 0;
++}
++
++
++static inline void vx_reset_hits(struct _vx_limit *limit)
++{
++	int lim;
++
++	for (lim = 0; lim < NUM_LIMITS; lim++) {
++		atomic_set(&__rlim_lhit(limit, lim), 0);
++	}
++}
++
++int vc_reset_hits(struct vx_info *vxi, void __user *data)
++{
++	vx_reset_hits(&vxi->limit);
++	return 0;
++}
++
++static inline void vx_reset_minmax(struct _vx_limit *limit)
++{
++	rlim_t value;
++	int lim;
++
++	for (lim = 0; lim < NUM_LIMITS; lim++) {
++		value = __rlim_get(limit, lim);
++		__rlim_rmax(limit, lim) = value;
++		__rlim_rmin(limit, lim) = value;
++	}
++}
++
++int vc_reset_minmax(struct vx_info *vxi, void __user *data)
++{
++	vx_reset_minmax(&vxi->limit);
++	return 0;
++}
++
++
++int vc_rlimit_stat(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_rlimit_stat_v0 vc_data;
++	struct _vx_limit *limit = &vxi->limit;
++	int id;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	id = vc_data.id;
++	if (!is_accounted_vlimit(id))
++		return -EINVAL;
++
++	vx_limit_fixup(limit, id);
++	vc_data.hits = atomic_read(&__rlim_lhit(limit, id));
++	vc_data.value = __rlim_get(limit, id);
++	vc_data.minimum = __rlim_rmin(limit, id);
++	vc_data.maximum = __rlim_rmax(limit, id);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++
++void vx_vsi_meminfo(struct sysinfo *val)
++{
++#ifdef	CONFIG_CGROUP_MEM_RES_CTLR
++	struct mem_cgroup *mcg = mem_cgroup_from_task(current);
++	u64 res_limit, res_usage;
++
++	if (!mcg)
++		return;
++
++	res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT);
++	res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE);
++
++	if (res_limit != RESOURCE_MAX)
++		val->totalram = (res_limit >> PAGE_SHIFT);
++	val->freeram = val->totalram - (res_usage >> PAGE_SHIFT);
++	val->bufferram = 0;
++#else	/* !CONFIG_CGROUP_MEM_RES_CTLR */
++	struct vx_info *vxi = current_vx_info();
++	unsigned long totalram, freeram;
++	rlim_t v;
++
++	/* we blindly accept the max */
++	v = __rlim_soft(&vxi->limit, RLIMIT_RSS);
++	totalram = (v != RLIM_INFINITY) ? v : val->totalram;
++
++	/* total minus used equals free */
++	v = __vx_cres_array_fixup(&vxi->limit, VLA_RSS);
++	freeram = (v < totalram) ? totalram - v : 0;
++
++	val->totalram = totalram;
++	val->freeram = freeram;
++#endif	/* CONFIG_CGROUP_MEM_RES_CTLR */
++	val->totalhigh = 0;
++	val->freehigh = 0;
++	return;
++}
++
++void vx_vsi_swapinfo(struct sysinfo *val)
++{
++#ifdef	CONFIG_CGROUP_MEM_RES_CTLR
++#ifdef	CONFIG_CGROUP_MEM_RES_CTLR_SWAP
++	struct mem_cgroup *mcg = mem_cgroup_from_task(current);
++	u64 res_limit, res_usage, memsw_limit, memsw_usage;
++	s64 swap_limit, swap_usage;
++
++	if (!mcg)
++		return;
++
++	res_limit = mem_cgroup_res_read_u64(mcg, RES_LIMIT);
++	res_usage = mem_cgroup_res_read_u64(mcg, RES_USAGE);
++	memsw_limit = mem_cgroup_memsw_read_u64(mcg, RES_LIMIT);
++	memsw_usage = mem_cgroup_memsw_read_u64(mcg, RES_USAGE);
++
++	if (res_limit == RESOURCE_MAX)
++		return;
++
++	swap_limit = memsw_limit - res_limit;
++	if (memsw_limit != RESOURCE_MAX)
++		val->totalswap = swap_limit >> PAGE_SHIFT;
++
++	swap_usage = memsw_usage - res_usage;
++	val->freeswap = (swap_usage < swap_limit) ?
++		val->totalswap - (swap_usage >> PAGE_SHIFT) : 0;
++#else	/* !CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
++	val->totalswap = 0;
++	val->freeswap = 0;
++#endif	/* !CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
++#else	/* !CONFIG_CGROUP_MEM_RES_CTLR */
++	struct vx_info *vxi = current_vx_info();
++	unsigned long totalswap, freeswap;
++	rlim_t v, w;
++
++	v = __rlim_soft(&vxi->limit, RLIMIT_RSS);
++	if (v == RLIM_INFINITY) {
++		val->freeswap = val->totalswap;
++		return;
++	}
++
++	/* we blindly accept the max */
++	w = __rlim_hard(&vxi->limit, RLIMIT_RSS);
++	totalswap = (w != RLIM_INFINITY) ? (w - v) : val->totalswap;
++
++	/* currently 'used' swap */
++	w = __vx_cres_array_fixup(&vxi->limit, VLA_RSS);
++	w -= (w > v) ? v : w;
++
++	/* total minus used equals free */
++	freeswap = (w < totalswap) ? totalswap - w : 0;
++
++	val->totalswap = totalswap;
++	val->freeswap = freeswap;
++#endif	/* CONFIG_CGROUP_MEM_RES_CTLR */
++	return;
++}
++
++long vx_vsi_cached(struct sysinfo *val)
++{
++#ifdef	CONFIG_CGROUP_MEM_RES_CTLR
++	struct mem_cgroup *mcg = mem_cgroup_from_task(current);
++
++	return mem_cgroup_stat_read_cache(mcg);
++#else
++	return 0;
++#endif
++}
++
++
++unsigned long vx_badness(struct task_struct *task, struct mm_struct *mm)
++{
++	struct vx_info *vxi = mm->mm_vx_info;
++	unsigned long points;
++	rlim_t v, w;
++
++	if (!vxi)
++		return 0;
++
++	points = vxi->vx_badness_bias;
++
++	v = __vx_cres_array_fixup(&vxi->limit, VLA_RSS);
++	w = __rlim_soft(&vxi->limit, RLIMIT_RSS);
++	points += (v > w) ? (v - w) : 0;
++
++	return points;
++}
++
+--- a/kernel/vserver/limit_init.h	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/limit_init.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,31 @@
++
++
++static inline void vx_info_init_limit(struct _vx_limit *limit)
++{
++	int lim;
++
++	for (lim = 0; lim < NUM_LIMITS; lim++) {
++		__rlim_soft(limit, lim) = RLIM_INFINITY;
++		__rlim_hard(limit, lim) = RLIM_INFINITY;
++		__rlim_set(limit, lim, 0);
++		atomic_set(&__rlim_lhit(limit, lim), 0);
++		__rlim_rmin(limit, lim) = 0;
++		__rlim_rmax(limit, lim) = 0;
++	}
++}
++
++static inline void vx_info_exit_limit(struct _vx_limit *limit)
++{
++	rlim_t value;
++	int lim;
++
++	for (lim = 0; lim < NUM_LIMITS; lim++) {
++		if ((1 << lim) & VLIM_NOCHECK)
++			continue;
++		value = __rlim_get(limit, lim);
++		vxwprintk_xid(value,
++			"!!! limit: %p[%s,%d] = %ld on exit.",
++			limit, vlimit_name[lim], lim, (long)value);
++	}
++}
++
+--- a/kernel/vserver/limit_proc.h	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/limit_proc.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,57 @@
++#ifndef _VX_LIMIT_PROC_H
++#define _VX_LIMIT_PROC_H
++
++#include <linux/vserver/limit_int.h>
++
++
++#define VX_LIMIT_FMT	":\t%8ld\t%8ld/%8ld\t%8lld/%8lld\t%6d\n"
++#define VX_LIMIT_TOP	\
++	"Limit\t current\t     min/max\t\t    soft/hard\t\thits\n"
++
++#define VX_LIMIT_ARG(r)				\
++	(unsigned long)__rlim_get(limit, r),	\
++	(unsigned long)__rlim_rmin(limit, r),	\
++	(unsigned long)__rlim_rmax(limit, r),	\
++	VX_VLIM(__rlim_soft(limit, r)),		\
++	VX_VLIM(__rlim_hard(limit, r)),		\
++	atomic_read(&__rlim_lhit(limit, r))
++
++static inline int vx_info_proc_limit(struct _vx_limit *limit, char *buffer)
++{
++	vx_limit_fixup(limit, -1);
++	return sprintf(buffer, VX_LIMIT_TOP
++		"PROC"	VX_LIMIT_FMT
++		"VM"	VX_LIMIT_FMT
++		"VML"	VX_LIMIT_FMT
++		"RSS"	VX_LIMIT_FMT
++		"ANON"	VX_LIMIT_FMT
++		"RMAP"	VX_LIMIT_FMT
++		"FILES" VX_LIMIT_FMT
++		"OFD"	VX_LIMIT_FMT
++		"LOCKS" VX_LIMIT_FMT
++		"SOCK"	VX_LIMIT_FMT
++		"MSGQ"	VX_LIMIT_FMT
++		"SHM"	VX_LIMIT_FMT
++		"SEMA"	VX_LIMIT_FMT
++		"SEMS"	VX_LIMIT_FMT
++		"DENT"	VX_LIMIT_FMT,
++		VX_LIMIT_ARG(RLIMIT_NPROC),
++		VX_LIMIT_ARG(RLIMIT_AS),
++		VX_LIMIT_ARG(RLIMIT_MEMLOCK),
++		VX_LIMIT_ARG(RLIMIT_RSS),
++		VX_LIMIT_ARG(VLIMIT_ANON),
++		VX_LIMIT_ARG(VLIMIT_MAPPED),
++		VX_LIMIT_ARG(RLIMIT_NOFILE),
++		VX_LIMIT_ARG(VLIMIT_OPENFD),
++		VX_LIMIT_ARG(RLIMIT_LOCKS),
++		VX_LIMIT_ARG(VLIMIT_NSOCK),
++		VX_LIMIT_ARG(RLIMIT_MSGQUEUE),
++		VX_LIMIT_ARG(VLIMIT_SHMEM),
++		VX_LIMIT_ARG(VLIMIT_SEMARY),
++		VX_LIMIT_ARG(VLIMIT_NSEMS),
++		VX_LIMIT_ARG(VLIMIT_DENTRY));
++}
++
++#endif	/* _VX_LIMIT_PROC_H */
++
++
+--- a/kernel/vserver/monitor.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/monitor.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,138 @@
++/*
++ *  kernel/vserver/monitor.c
++ *
++ *  Virtual Context Scheduler Monitor
++ *
++ *  Copyright (C) 2006-2007 Herbert Pötzl
++ *
++ *  V0.01  basic design
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/jiffies.h>
++#include <asm/uaccess.h>
++#include <asm/atomic.h>
++
++#include <linux/vserver/monitor.h>
++#include <linux/vserver/debug_cmd.h>
++
++
++#ifdef	CONFIG_VSERVER_MONITOR
++#define VXM_SIZE	CONFIG_VSERVER_MONITOR_SIZE
++#else
++#define VXM_SIZE	64
++#endif
++
++struct _vx_monitor {
++	unsigned int counter;
++
++	struct _vx_mon_entry entry[VXM_SIZE+1];
++};
++
++
++DEFINE_PER_CPU(struct _vx_monitor, vx_monitor_buffer);
++
++unsigned volatile int vxm_active = 1;
++
++static atomic_t sequence = ATOMIC_INIT(0);
++
++
++/*	vxm_advance()
++
++	* requires disabled preemption				*/
++
++struct _vx_mon_entry *vxm_advance(int cpu)
++{
++	struct _vx_monitor *mon = &per_cpu(vx_monitor_buffer, cpu);
++	struct _vx_mon_entry *entry;
++	unsigned int index;
++
++	index = vxm_active ? (mon->counter++ % VXM_SIZE) : VXM_SIZE;
++	entry = &mon->entry[index];
++
++	entry->ev.seq = atomic_inc_return(&sequence);
++	entry->ev.jif = jiffies;
++	return entry;
++}
++
++EXPORT_SYMBOL_GPL(vxm_advance);
++
++
++int do_read_monitor(struct __user _vx_mon_entry *data,
++	int cpu, uint32_t *index, uint32_t *count)
++{
++	int pos, ret = 0;
++	struct _vx_monitor *mon = &per_cpu(vx_monitor_buffer, cpu);
++	int end = mon->counter;
++	int start = end - VXM_SIZE + 2;
++	int idx = *index;
++
++	/* special case: get current pos */
++	if (!*count) {
++		*index = end;
++		return 0;
++	}
++
++	/* have we lost some data? */
++	if (idx < start)
++		idx = start;
++
++	for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) {
++		struct _vx_mon_entry *entry =
++			&mon->entry[idx % VXM_SIZE];
++
++		/* send entry to userspace */
++		ret = copy_to_user(&data[pos], entry, sizeof(*entry));
++		if (ret)
++			break;
++	}
++	/* save new index and count */
++	*index = idx;
++	*count = pos;
++	return ret ? ret : (*index < end);
++}
++
++int vc_read_monitor(uint32_t id, void __user *data)
++{
++	struct vcmd_read_monitor_v0 vc_data;
++	int ret;
++
++	if (id >= NR_CPUS)
++		return -EINVAL;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = do_read_monitor((struct __user _vx_mon_entry *)vc_data.data,
++		id, &vc_data.index, &vc_data.count);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return ret;
++}
++
++#ifdef	CONFIG_COMPAT
++
++int vc_read_monitor_x32(uint32_t id, void __user *data)
++{
++	struct vcmd_read_monitor_v0_x32 vc_data;
++	int ret;
++
++	if (id >= NR_CPUS)
++		return -EINVAL;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	ret = do_read_monitor((struct __user _vx_mon_entry *)
++		compat_ptr(vc_data.data_ptr),
++		id, &vc_data.index, &vc_data.count);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return ret;
++}
++
++#endif	/* CONFIG_COMPAT */
++
+--- a/kernel/vserver/network.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/network.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,864 @@
++/*
++ *  linux/kernel/vserver/network.c
++ *
++ *  Virtual Server: Network Support
++ *
++ *  Copyright (C) 2003-2007  Herbert Pötzl
++ *
++ *  V0.01  broken out from vcontext V0.05
++ *  V0.02  cleaned up implementation
++ *  V0.03  added equiv nx commands
++ *  V0.04  switch to RCU based hash
++ *  V0.05  and back to locking again
++ *  V0.06  changed vcmds to nxi arg
++ *  V0.07  have __create claim() the nxi
++ *
++ */
++
++#include <linux/err.h>
++#include <linux/slab.h>
++#include <linux/rcupdate.h>
++
++#include <linux/vs_network.h>
++#include <linux/vs_pid.h>
++#include <linux/vserver/network_cmd.h>
++
++
++atomic_t nx_global_ctotal	= ATOMIC_INIT(0);
++atomic_t nx_global_cactive	= ATOMIC_INIT(0);
++
++static struct kmem_cache *nx_addr_v4_cachep = NULL;
++static struct kmem_cache *nx_addr_v6_cachep = NULL;
++
++
++static int __init init_network(void)
++{
++	nx_addr_v4_cachep = kmem_cache_create("nx_v4_addr_cache",
++		sizeof(struct nx_addr_v4), 0,
++		SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++	nx_addr_v6_cachep = kmem_cache_create("nx_v6_addr_cache",
++		sizeof(struct nx_addr_v6), 0,
++		SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
++	return 0;
++}
++
++
++/*	__alloc_nx_addr_v4()					*/
++
++static inline struct nx_addr_v4 *__alloc_nx_addr_v4(void)
++{
++	struct nx_addr_v4 *nxa = kmem_cache_alloc(
++		nx_addr_v4_cachep, GFP_KERNEL);
++
++	if (!IS_ERR(nxa))
++		memset(nxa, 0, sizeof(*nxa));
++	return nxa;
++}
++
++/*	__dealloc_nx_addr_v4()					*/
++
++static inline void __dealloc_nx_addr_v4(struct nx_addr_v4 *nxa)
++{
++	kmem_cache_free(nx_addr_v4_cachep, nxa);
++}
++
++/*	__dealloc_nx_addr_v4_all()				*/
++
++static inline void __dealloc_nx_addr_v4_all(struct nx_addr_v4 *nxa)
++{
++	while (nxa) {
++		struct nx_addr_v4 *next = nxa->next;
++
++		__dealloc_nx_addr_v4(nxa);
++		nxa = next;
++	}
++}
++
++
++#ifdef CONFIG_IPV6
++
++/*	__alloc_nx_addr_v6()					*/
++
++static inline struct nx_addr_v6 *__alloc_nx_addr_v6(void)
++{
++	struct nx_addr_v6 *nxa = kmem_cache_alloc(
++		nx_addr_v6_cachep, GFP_KERNEL);
++
++	if (!IS_ERR(nxa))
++		memset(nxa, 0, sizeof(*nxa));
++	return nxa;
++}
++
++/*	__dealloc_nx_addr_v6()					*/
++
++static inline void __dealloc_nx_addr_v6(struct nx_addr_v6 *nxa)
++{
++	kmem_cache_free(nx_addr_v6_cachep, nxa);
++}
++
++/*	__dealloc_nx_addr_v6_all()				*/
++
++static inline void __dealloc_nx_addr_v6_all(struct nx_addr_v6 *nxa)
++{
++	while (nxa) {
++		struct nx_addr_v6 *next = nxa->next;
++
++		__dealloc_nx_addr_v6(nxa);
++		nxa = next;
++	}
++}
++
++#endif	/* CONFIG_IPV6 */
++
++/*	__alloc_nx_info()
++
++	* allocate an initialized nx_info struct
++	* doesn't make it visible (hash)			*/
++
++static struct nx_info *__alloc_nx_info(nid_t nid)
++{
++	struct nx_info *new = NULL;
++
++	vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid);
++
++	/* would this benefit from a slab cache? */
++	new = kmalloc(sizeof(struct nx_info), GFP_KERNEL);
++	if (!new)
++		return 0;
++
++	memset(new, 0, sizeof(struct nx_info));
++	new->nx_id = nid;
++	INIT_HLIST_NODE(&new->nx_hlist);
++	atomic_set(&new->nx_usecnt, 0);
++	atomic_set(&new->nx_tasks, 0);
++	new->nx_state = 0;
++
++	new->nx_flags = NXF_INIT_SET;
++
++	/* rest of init goes here */
++
++	new->v4_lback.s_addr = htonl(INADDR_LOOPBACK);
++	new->v4_bcast.s_addr = htonl(INADDR_BROADCAST);
++
++	vxdprintk(VXD_CBIT(nid, 0),
++		"alloc_nx_info(%d) = %p", nid, new);
++	atomic_inc(&nx_global_ctotal);
++	return new;
++}
++
++/*	__dealloc_nx_info()
++
++	* final disposal of nx_info				*/
++
++static void __dealloc_nx_info(struct nx_info *nxi)
++{
++	vxdprintk(VXD_CBIT(nid, 0),
++		"dealloc_nx_info(%p)", nxi);
++
++	nxi->nx_hlist.next = LIST_POISON1;
++	nxi->nx_id = -1;
++
++	BUG_ON(atomic_read(&nxi->nx_usecnt));
++	BUG_ON(atomic_read(&nxi->nx_tasks));
++
++	__dealloc_nx_addr_v4_all(nxi->v4.next);
++
++	nxi->nx_state |= NXS_RELEASED;
++	kfree(nxi);
++	atomic_dec(&nx_global_ctotal);
++}
++
++static void __shutdown_nx_info(struct nx_info *nxi)
++{
++	nxi->nx_state |= NXS_SHUTDOWN;
++	vs_net_change(nxi, VSC_NETDOWN);
++}
++
++/*	exported stuff						*/
++
++void free_nx_info(struct nx_info *nxi)
++{
++	/* context shutdown is mandatory */
++	BUG_ON(nxi->nx_state != NXS_SHUTDOWN);
++
++	/* context must not be hashed */
++	BUG_ON(nxi->nx_state & NXS_HASHED);
++
++	BUG_ON(atomic_read(&nxi->nx_usecnt));
++	BUG_ON(atomic_read(&nxi->nx_tasks));
++
++	__dealloc_nx_info(nxi);
++}
++
++
++void __nx_set_lback(struct nx_info *nxi)
++{
++	int nid = nxi->nx_id;
++	__be32 lback = htonl(INADDR_LOOPBACK ^ ((nid & 0xFFFF) << 8));
++
++	nxi->v4_lback.s_addr = lback;
++}
++
++extern int __nx_inet_add_lback(__be32 addr);
++extern int __nx_inet_del_lback(__be32 addr);
++
++
++/*	hash table for nx_info hash */
++
++#define NX_HASH_SIZE	13
++
++struct hlist_head nx_info_hash[NX_HASH_SIZE];
++
++static spinlock_t nx_info_hash_lock = SPIN_LOCK_UNLOCKED;
++
++
++static inline unsigned int __hashval(nid_t nid)
++{
++	return (nid % NX_HASH_SIZE);
++}
++
++
++
++/*	__hash_nx_info()
++
++	* add the nxi to the global hash table
++	* requires the hash_lock to be held			*/
++
++static inline void __hash_nx_info(struct nx_info *nxi)
++{
++	struct hlist_head *head;
++
++	vxd_assert_lock(&nx_info_hash_lock);
++	vxdprintk(VXD_CBIT(nid, 4),
++		"__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
++
++	/* context must not be hashed */
++	BUG_ON(nx_info_state(nxi, NXS_HASHED));
++
++	nxi->nx_state |= NXS_HASHED;
++	head = &nx_info_hash[__hashval(nxi->nx_id)];
++	hlist_add_head(&nxi->nx_hlist, head);
++	atomic_inc(&nx_global_cactive);
++}
++
++/*	__unhash_nx_info()
++
++	* remove the nxi from the global hash table
++	* requires the hash_lock to be held			*/
++
++static inline void __unhash_nx_info(struct nx_info *nxi)
++{
++	vxd_assert_lock(&nx_info_hash_lock);
++	vxdprintk(VXD_CBIT(nid, 4),
++		"__unhash_nx_info: %p[#%d.%d.%d]", nxi, nxi->nx_id,
++		atomic_read(&nxi->nx_usecnt), atomic_read(&nxi->nx_tasks));
++
++	/* context must be hashed */
++	BUG_ON(!nx_info_state(nxi, NXS_HASHED));
++	/* but without tasks */
++	BUG_ON(atomic_read(&nxi->nx_tasks));
++
++	nxi->nx_state &= ~NXS_HASHED;
++	hlist_del(&nxi->nx_hlist);
++	atomic_dec(&nx_global_cactive);
++}
++
++
++/*	__lookup_nx_info()
++
++	* requires the hash_lock to be held
++	* doesn't increment the nx_refcnt			*/
++
++static inline struct nx_info *__lookup_nx_info(nid_t nid)
++{
++	struct hlist_head *head = &nx_info_hash[__hashval(nid)];
++	struct hlist_node *pos;
++	struct nx_info *nxi;
++
++	vxd_assert_lock(&nx_info_hash_lock);
++	hlist_for_each(pos, head) {
++		nxi = hlist_entry(pos, struct nx_info, nx_hlist);
++
++		if (nxi->nx_id == nid)
++			goto found;
++	}
++	nxi = NULL;
++found:
++	vxdprintk(VXD_CBIT(nid, 0),
++		"__lookup_nx_info(#%u): %p[#%u]",
++		nid, nxi, nxi ? nxi->nx_id : 0);
++	return nxi;
++}
++
++
++/*	__create_nx_info()
++
++	* create the requested context
++	* get(), claim() and hash it				*/
++
++static struct nx_info *__create_nx_info(int id)
++{
++	struct nx_info *new, *nxi = NULL;
++
++	vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id);
++
++	if (!(new = __alloc_nx_info(id)))
++		return ERR_PTR(-ENOMEM);
++
++	/* required to make dynamic xids unique */
++	spin_lock(&nx_info_hash_lock);
++
++	/* static context requested */
++	if ((nxi = __lookup_nx_info(id))) {
++		vxdprintk(VXD_CBIT(nid, 0),
++			"create_nx_info(%d) = %p (already there)", id, nxi);
++		if (nx_info_flags(nxi, NXF_STATE_SETUP, 0))
++			nxi = ERR_PTR(-EBUSY);
++		else
++			nxi = ERR_PTR(-EEXIST);
++		goto out_unlock;
++	}
++	/* new context */
++	vxdprintk(VXD_CBIT(nid, 0),
++		"create_nx_info(%d) = %p (new)", id, new);
++	claim_nx_info(new, NULL);
++	__nx_set_lback(new);
++	__hash_nx_info(get_nx_info(new));
++	nxi = new, new = NULL;
++
++out_unlock:
++	spin_unlock(&nx_info_hash_lock);
++	if (new)
++		__dealloc_nx_info(new);
++	return nxi;
++}
++
++
++
++/*	exported stuff						*/
++
++
++void unhash_nx_info(struct nx_info *nxi)
++{
++	__shutdown_nx_info(nxi);
++	spin_lock(&nx_info_hash_lock);
++	__unhash_nx_info(nxi);
++	spin_unlock(&nx_info_hash_lock);
++}
++
++/*	lookup_nx_info()
++
++	* search for a nx_info and get() it
++	* negative id means current				*/
++
++struct nx_info *lookup_nx_info(int id)
++{
++	struct nx_info *nxi = NULL;
++
++	if (id < 0) {
++		nxi = get_nx_info(current_nx_info());
++	} else if (id > 1) {
++		spin_lock(&nx_info_hash_lock);
++		nxi = get_nx_info(__lookup_nx_info(id));
++		spin_unlock(&nx_info_hash_lock);
++	}
++	return nxi;
++}
++
++/*	nid_is_hashed()
++
++	* verify that nid is still hashed			*/
++
++int nid_is_hashed(nid_t nid)
++{
++	int hashed;
++
++	spin_lock(&nx_info_hash_lock);
++	hashed = (__lookup_nx_info(nid) != NULL);
++	spin_unlock(&nx_info_hash_lock);
++	return hashed;
++}
++
++
++#ifdef	CONFIG_PROC_FS
++
++/*	get_nid_list()
++
++	* get a subset of hashed nids for proc
++	* assumes size is at least one				*/
++
++int get_nid_list(int index, unsigned int *nids, int size)
++{
++	int hindex, nr_nids = 0;
++
++	/* only show current and children */
++	if (!nx_check(0, VS_ADMIN | VS_WATCH)) {
++		if (index > 0)
++			return 0;
++		nids[nr_nids] = nx_current_nid();
++		return 1;
++	}
++
++	for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) {
++		struct hlist_head *head = &nx_info_hash[hindex];
++		struct hlist_node *pos;
++
++		spin_lock(&nx_info_hash_lock);
++		hlist_for_each(pos, head) {
++			struct nx_info *nxi;
++
++			if (--index > 0)
++				continue;
++
++			nxi = hlist_entry(pos, struct nx_info, nx_hlist);
++			nids[nr_nids] = nxi->nx_id;
++			if (++nr_nids >= size) {
++				spin_unlock(&nx_info_hash_lock);
++				goto out;
++			}
++		}
++		/* keep the lock time short */
++		spin_unlock(&nx_info_hash_lock);
++	}
++out:
++	return nr_nids;
++}
++#endif
++
++
++/*
++ *	migrate task to new network
++ *	gets nxi, puts old_nxi on change
++ */
++
++int nx_migrate_task(struct task_struct *p, struct nx_info *nxi)
++{
++	struct nx_info *old_nxi;
++	int ret = 0;
++
++	if (!p || !nxi)
++		BUG();
++
++	vxdprintk(VXD_CBIT(nid, 5),
++		"nx_migrate_task(%p,%p[#%d.%d.%d])",
++		p, nxi, nxi->nx_id,
++		atomic_read(&nxi->nx_usecnt),
++		atomic_read(&nxi->nx_tasks));
++
++	if (nx_info_flags(nxi, NXF_INFO_PRIVATE, 0) &&
++		!nx_info_flags(nxi, NXF_STATE_SETUP, 0))
++		return -EACCES;
++
++	if (nx_info_state(nxi, NXS_SHUTDOWN))
++		return -EFAULT;
++
++	/* maybe disallow this completely? */
++	old_nxi = task_get_nx_info(p);
++	if (old_nxi == nxi)
++		goto out;
++
++	task_lock(p);
++	if (old_nxi)
++		clr_nx_info(&p->nx_info);
++	claim_nx_info(nxi, p);
++	set_nx_info(&p->nx_info, nxi);
++	p->nid = nxi->nx_id;
++	task_unlock(p);
++
++	vxdprintk(VXD_CBIT(nid, 5),
++		"moved task %p into nxi:%p[#%d]",
++		p, nxi, nxi->nx_id);
++
++	if (old_nxi)
++		release_nx_info(old_nxi, p);
++	ret = 0;
++out:
++	put_nx_info(old_nxi);
++	return ret;
++}
++
++
++void nx_set_persistent(struct nx_info *nxi)
++{
++	vxdprintk(VXD_CBIT(nid, 6),
++		"nx_set_persistent(%p[#%d])", nxi, nxi->nx_id);
++
++	get_nx_info(nxi);
++	claim_nx_info(nxi, NULL);
++}
++
++void nx_clear_persistent(struct nx_info *nxi)
++{
++	vxdprintk(VXD_CBIT(nid, 6),
++		"nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id);
++
++	release_nx_info(nxi, NULL);
++	put_nx_info(nxi);
++}
++
++void nx_update_persistent(struct nx_info *nxi)
++{
++	if (nx_info_flags(nxi, NXF_PERSISTENT, 0))
++		nx_set_persistent(nxi);
++	else
++		nx_clear_persistent(nxi);
++}
++
++/* vserver syscall commands below here */
++
++/* taks nid and nx_info functions */
++
++#include <asm/uaccess.h>
++
++
++int vc_task_nid(uint32_t id)
++{
++	nid_t nid;
++
++	if (id) {
++		struct task_struct *tsk;
++
++		read_lock(&tasklist_lock);
++		tsk = find_task_by_real_pid(id);
++		nid = (tsk) ? tsk->nid : -ESRCH;
++		read_unlock(&tasklist_lock);
++	} else
++		nid = nx_current_nid();
++	return nid;
++}
++
++
++int vc_nx_info(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_nx_info_v0 vc_data;
++
++	vc_data.nid = nxi->nx_id;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++
++/* network functions */
++
++int vc_net_create(uint32_t nid, void __user *data)
++{
++	struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET };
++	struct nx_info *new_nxi;
++	int ret;
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	if ((nid > MAX_S_CONTEXT) || (nid < 2))
++		return -EINVAL;
++
++	new_nxi = __create_nx_info(nid);
++	if (IS_ERR(new_nxi))
++		return PTR_ERR(new_nxi);
++
++	/* initial flags */
++	new_nxi->nx_flags = vc_data.flagword;
++
++	ret = -ENOEXEC;
++	if (vs_net_change(new_nxi, VSC_NETUP))
++		goto out;
++
++	ret = nx_migrate_task(current, new_nxi);
++	if (ret)
++		goto out;
++
++	/* return context id on success */
++	ret = new_nxi->nx_id;
++
++	/* get a reference for persistent contexts */
++	if ((vc_data.flagword & NXF_PERSISTENT))
++		nx_set_persistent(new_nxi);
++out:
++	release_nx_info(new_nxi, NULL);
++	put_nx_info(new_nxi);
++	return ret;
++}
++
++
++int vc_net_migrate(struct nx_info *nxi, void __user *data)
++{
++	return nx_migrate_task(current, nxi);
++}
++
++
++
++int do_add_v4_addr(struct nx_info *nxi, __be32 ip, __be32 ip2, __be32 mask,
++	uint16_t type, uint16_t flags)
++{
++	struct nx_addr_v4 *nxa = &nxi->v4;
++
++	if (NX_IPV4(nxi)) {
++		/* locate last entry */
++		for (; nxa->next; nxa = nxa->next);
++		nxa->next = __alloc_nx_addr_v4();
++		nxa = nxa->next;
++
++		if (IS_ERR(nxa))
++			return PTR_ERR(nxa);
++	}
++
++	if (nxi->v4.next)
++		/* remove single ip for ip list */
++		nxi->nx_flags &= ~NXF_SINGLE_IP;
++
++	nxa->ip[0].s_addr = ip;
++	nxa->ip[1].s_addr = ip2;
++	nxa->mask.s_addr = mask;
++	nxa->type = type;
++	nxa->flags = flags;
++	return 0;
++}
++
++
++int vc_net_add(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_net_addr_v0 vc_data;
++	int index, ret = 0;
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	switch (vc_data.type) {
++	case NXA_TYPE_IPV4:
++		if ((vc_data.count < 1) || (vc_data.count > 4))
++			return -EINVAL;
++
++		index = 0;
++		while (index < vc_data.count) {
++			ret = do_add_v4_addr(nxi, vc_data.ip[index].s_addr, 0,
++				vc_data.mask[index].s_addr, NXA_TYPE_ADDR, 0);
++			if (ret)
++				return ret;
++			index++;
++		}
++		ret = index;
++		break;
++
++	case NXA_TYPE_IPV4|NXA_MOD_BCAST:
++		nxi->v4_bcast = vc_data.ip[0];
++		ret = 1;
++		break;
++
++	case NXA_TYPE_IPV4|NXA_MOD_LBACK:
++		nxi->v4_lback = vc_data.ip[0];
++		ret = 1;
++		break;
++
++	default:
++		ret = -EINVAL;
++		break;
++	}
++	return ret;
++}
++
++int vc_net_remove(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_net_addr_v0 vc_data;
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	switch (vc_data.type) {
++	case NXA_TYPE_ANY:
++		__dealloc_nx_addr_v4_all(xchg(&nxi->v4.next, NULL));
++		memset(&nxi->v4, 0, sizeof(nxi->v4));
++		break;
++
++	default:
++		return -EINVAL;
++	}
++	return 0;
++}
++
++
++int vc_net_add_ipv4(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_net_addr_ipv4_v1 vc_data;
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	switch (vc_data.type) {
++	case NXA_TYPE_ADDR:
++	case NXA_TYPE_RANGE:
++	case NXA_TYPE_MASK:
++		return do_add_v4_addr(nxi, vc_data.ip.s_addr, 0,
++			vc_data.mask.s_addr, vc_data.type, vc_data.flags);
++
++	case NXA_TYPE_ADDR | NXA_MOD_BCAST:
++		nxi->v4_bcast = vc_data.ip;
++		break;
++
++	case NXA_TYPE_ADDR | NXA_MOD_LBACK:
++		nxi->v4_lback = vc_data.ip;
++		break;
++
++	default:
++		return -EINVAL;
++	}
++	return 0;
++}
++
++int vc_net_remove_ipv4(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_net_addr_ipv4_v1 vc_data;
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	switch (vc_data.type) {
++/*	case NXA_TYPE_ADDR:
++		break;		*/
++
++	case NXA_TYPE_ANY:
++		__dealloc_nx_addr_v4_all(xchg(&nxi->v4.next, NULL));
++		memset(&nxi->v4, 0, sizeof(nxi->v4));
++		break;
++
++	default:
++		return -EINVAL;
++	}
++	return 0;
++}
++
++
++#ifdef CONFIG_IPV6
++
++int do_add_v6_addr(struct nx_info *nxi,
++	struct in6_addr *ip, struct in6_addr *mask,
++	uint32_t prefix, uint16_t type, uint16_t flags)
++{
++	struct nx_addr_v6 *nxa = &nxi->v6;
++
++	if (NX_IPV6(nxi)) {
++		/* locate last entry */
++		for (; nxa->next; nxa = nxa->next);
++		nxa->next = __alloc_nx_addr_v6();
++		nxa = nxa->next;
++
++		if (IS_ERR(nxa))
++			return PTR_ERR(nxa);
++	}
++
++	nxa->ip = *ip;
++	nxa->mask = *mask;
++	nxa->prefix = prefix;
++	nxa->type = type;
++	nxa->flags = flags;
++	return 0;
++}
++
++
++int vc_net_add_ipv6(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_net_addr_ipv6_v1 vc_data;
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	switch (vc_data.type) {
++	case NXA_TYPE_ADDR:
++	case NXA_TYPE_MASK:
++		return do_add_v6_addr(nxi, &vc_data.ip, &vc_data.mask,
++			vc_data.prefix, vc_data.type, vc_data.flags);
++	default:
++		return -EINVAL;
++	}
++	return 0;
++}
++
++int vc_net_remove_ipv6(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_net_addr_ipv6_v1 vc_data;
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	switch (vc_data.type) {
++	case NXA_TYPE_ANY:
++		__dealloc_nx_addr_v6_all(xchg(&nxi->v6.next, NULL));
++		memset(&nxi->v6, 0, sizeof(nxi->v6));
++		break;
++
++	default:
++		return -EINVAL;
++	}
++	return 0;
++}
++
++#endif	/* CONFIG_IPV6 */
++
++
++int vc_get_nflags(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_net_flags_v0 vc_data;
++
++	vc_data.flagword = nxi->nx_flags;
++
++	/* special STATE flag handling */
++	vc_data.mask = vs_mask_flags(~0ULL, nxi->nx_flags, NXF_ONE_TIME);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++int vc_set_nflags(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_net_flags_v0 vc_data;
++	uint64_t mask, trigger;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	/* special STATE flag handling */
++	mask = vs_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME);
++	trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword);
++
++	nxi->nx_flags = vs_mask_flags(nxi->nx_flags,
++		vc_data.flagword, mask);
++	if (trigger & NXF_PERSISTENT)
++		nx_update_persistent(nxi);
++
++	return 0;
++}
++
++int vc_get_ncaps(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_net_caps_v0 vc_data;
++
++	vc_data.ncaps = nxi->nx_ncaps;
++	vc_data.cmask = ~0ULL;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++int vc_set_ncaps(struct nx_info *nxi, void __user *data)
++{
++	struct vcmd_net_caps_v0 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	nxi->nx_ncaps = vs_mask_flags(nxi->nx_ncaps,
++		vc_data.ncaps, vc_data.cmask);
++	return 0;
++}
++
++
++#include <linux/module.h>
++
++module_init(init_network);
++
++EXPORT_SYMBOL_GPL(free_nx_info);
++EXPORT_SYMBOL_GPL(unhash_nx_info);
++
+--- a/kernel/vserver/proc.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/proc.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,1098 @@
++/*
++ *  linux/kernel/vserver/proc.c
++ *
++ *  Virtual Context Support
++ *
++ *  Copyright (C) 2003-2007  Herbert Pötzl
++ *
++ *  V0.01  basic structure
++ *  V0.02  adaptation vs1.3.0
++ *  V0.03  proc permissions
++ *  V0.04  locking/generic
++ *  V0.05  next generation procfs
++ *  V0.06  inode validation
++ *  V0.07  generic rewrite vid
++ *  V0.08  remove inode type
++ *
++ */
++
++#include <linux/proc_fs.h>
++#include <linux/fs_struct.h>
++#include <linux/mount.h>
++#include <asm/unistd.h>
++
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vs_cvirt.h>
++
++#include <linux/in.h>
++#include <linux/inetdevice.h>
++#include <linux/vs_inet.h>
++#include <linux/vs_inet6.h>
++
++#include <linux/vserver/global.h>
++
++#include "cvirt_proc.h"
++#include "cacct_proc.h"
++#include "limit_proc.h"
++#include "sched_proc.h"
++#include "vci_config.h"
++
++
++static inline char *print_cap_t(char *buffer, kernel_cap_t *c)
++{
++	unsigned __capi;
++
++	CAP_FOR_EACH_U32(__capi) {
++		buffer += sprintf(buffer, "%08x",
++			c->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
++	}
++	return buffer;
++}
++
++
++static struct proc_dir_entry *proc_virtual;
++
++static struct proc_dir_entry *proc_virtnet;
++
++
++/* first the actual feeds */
++
++
++static int proc_vci(char *buffer)
++{
++	return sprintf(buffer,
++		"VCIVersion:\t%04x:%04x\n"
++		"VCISyscall:\t%d\n"
++		"VCIKernel:\t%08x\n",
++		VCI_VERSION >> 16,
++		VCI_VERSION & 0xFFFF,
++		__NR_vserver,
++		vci_kernel_config());
++}
++
++static int proc_virtual_info(char *buffer)
++{
++	return proc_vci(buffer);
++}
++
++static int proc_virtual_status(char *buffer)
++{
++	return sprintf(buffer,
++		"#CTotal:\t%d\n"
++		"#CActive:\t%d\n"
++		"#NSProxy:\t%d\t%d %d %d %d %d %d\n"
++		"#InitTask:\t%d\t%d %d\n",
++		atomic_read(&vx_global_ctotal),
++		atomic_read(&vx_global_cactive),
++		atomic_read(&vs_global_nsproxy),
++		atomic_read(&vs_global_fs),
++		atomic_read(&vs_global_mnt_ns),
++		atomic_read(&vs_global_uts_ns),
++		atomic_read(&nr_ipc_ns),
++		atomic_read(&vs_global_user_ns),
++		atomic_read(&vs_global_pid_ns),
++		atomic_read(&init_task.usage),
++		atomic_read(&init_task.nsproxy->count),
++		init_task.fs->users);
++}
++
++
++int proc_vxi_info(struct vx_info *vxi, char *buffer)
++{
++	int length;
++
++	length = sprintf(buffer,
++		"ID:\t%d\n"
++		"Info:\t%p\n"
++		"Init:\t%d\n"
++		"OOM:\t%lld\n",
++		vxi->vx_id,
++		vxi,
++		vxi->vx_initpid,
++		vxi->vx_badness_bias);
++	return length;
++}
++
++int proc_vxi_status(struct vx_info *vxi, char *buffer)
++{
++	char *orig = buffer;
++
++	buffer += sprintf(buffer,
++		"UseCnt:\t%d\n"
++		"Tasks:\t%d\n"
++		"Flags:\t%016llx\n",
++		atomic_read(&vxi->vx_usecnt),
++		atomic_read(&vxi->vx_tasks),
++		(unsigned long long)vxi->vx_flags);
++
++	buffer += sprintf(buffer, "BCaps:\t");
++	buffer = print_cap_t(buffer, &vxi->vx_bcaps);
++	buffer += sprintf(buffer, "\n");
++
++	buffer += sprintf(buffer,
++		"CCaps:\t%016llx\n"
++		"Spaces:\t%08lx %08lx\n",
++		(unsigned long long)vxi->vx_ccaps,
++		vxi->vx_nsmask[0], vxi->vx_nsmask[1]);
++	return buffer - orig;
++}
++
++int proc_vxi_limit(struct vx_info *vxi, char *buffer)
++{
++	return vx_info_proc_limit(&vxi->limit, buffer);
++}
++
++int proc_vxi_sched(struct vx_info *vxi, char *buffer)
++{
++	int cpu, length;
++
++	length = vx_info_proc_sched(&vxi->sched, buffer);
++	for_each_online_cpu(cpu) {
++		length += vx_info_proc_sched_pc(
++			&vx_per_cpu(vxi, sched_pc, cpu),
++			buffer + length, cpu);
++	}
++	return length;
++}
++
++int proc_vxi_nsproxy0(struct vx_info *vxi, char *buffer)
++{
++	return vx_info_proc_nsproxy(vxi->vx_nsproxy[0], buffer);
++}
++
++int proc_vxi_nsproxy1(struct vx_info *vxi, char *buffer)
++{
++	return vx_info_proc_nsproxy(vxi->vx_nsproxy[1], buffer);
++}
++
++int proc_vxi_cvirt(struct vx_info *vxi, char *buffer)
++{
++	int cpu, length;
++
++	vx_update_load(vxi);
++	length = vx_info_proc_cvirt(&vxi->cvirt, buffer);
++	for_each_online_cpu(cpu) {
++		length += vx_info_proc_cvirt_pc(
++			&vx_per_cpu(vxi, cvirt_pc, cpu),
++			buffer + length, cpu);
++	}
++	return length;
++}
++
++int proc_vxi_cacct(struct vx_info *vxi, char *buffer)
++{
++	return vx_info_proc_cacct(&vxi->cacct, buffer);
++}
++
++
++static int proc_virtnet_info(char *buffer)
++{
++	return proc_vci(buffer);
++}
++
++static int proc_virtnet_status(char *buffer)
++{
++	return sprintf(buffer,
++		"#CTotal:\t%d\n"
++		"#CActive:\t%d\n",
++		atomic_read(&nx_global_ctotal),
++		atomic_read(&nx_global_cactive));
++}
++
++int proc_nxi_info(struct nx_info *nxi, char *buffer)
++{
++	struct nx_addr_v4 *v4a;
++#ifdef	CONFIG_IPV6
++	struct nx_addr_v6 *v6a;
++#endif
++	int length, i;
++
++	length = sprintf(buffer,
++		"ID:\t%d\n"
++		"Info:\t%p\n"
++		"Bcast:\t" NIPQUAD_FMT "\n"
++		"Lback:\t" NIPQUAD_FMT "\n",
++		nxi->nx_id,
++		nxi,
++		NIPQUAD(nxi->v4_bcast.s_addr),
++		NIPQUAD(nxi->v4_lback.s_addr));
++
++	if (!NX_IPV4(nxi))
++		goto skip_v4;
++	for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next)
++		length += sprintf(buffer + length, "%d:\t" NXAV4_FMT "\n",
++			i, NXAV4(v4a));
++skip_v4:
++#ifdef	CONFIG_IPV6
++	if (!NX_IPV6(nxi))
++		goto skip_v6;
++	for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next)
++		length += sprintf(buffer + length, "%d:\t" NXAV6_FMT "\n",
++			i, NXAV6(v6a));
++skip_v6:
++#endif
++	return length;
++}
++
++int proc_nxi_status(struct nx_info *nxi, char *buffer)
++{
++	int length;
++
++	length = sprintf(buffer,
++		"UseCnt:\t%d\n"
++		"Tasks:\t%d\n"
++		"Flags:\t%016llx\n"
++		"NCaps:\t%016llx\n",
++		atomic_read(&nxi->nx_usecnt),
++		atomic_read(&nxi->nx_tasks),
++		(unsigned long long)nxi->nx_flags,
++		(unsigned long long)nxi->nx_ncaps);
++	return length;
++}
++
++
++
++/* here the inode helpers */
++
++struct vs_entry {
++	int len;
++	char *name;
++	mode_t mode;
++	struct inode_operations *iop;
++	struct file_operations *fop;
++	union proc_op op;
++};
++
++static struct inode *vs_proc_make_inode(struct super_block *sb, struct vs_entry *p)
++{
++	struct inode *inode = new_inode(sb);
++
++	if (!inode)
++		goto out;
++
++	inode->i_mode = p->mode;
++	if (p->iop)
++		inode->i_op = p->iop;
++	if (p->fop)
++		inode->i_fop = p->fop;
++
++	inode->i_nlink = (p->mode & S_IFDIR) ? 2 : 1;
++	inode->i_flags |= S_IMMUTABLE;
++
++	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
++
++	inode->i_uid = 0;
++	inode->i_gid = 0;
++	inode->i_tag = 0;
++out:
++	return inode;
++}
++
++static struct dentry *vs_proc_instantiate(struct inode *dir,
++	struct dentry *dentry, int id, void *ptr)
++{
++	struct vs_entry *p = ptr;
++	struct inode *inode = vs_proc_make_inode(dir->i_sb, p);
++	struct dentry *error = ERR_PTR(-EINVAL);
++
++	if (!inode)
++		goto out;
++
++	PROC_I(inode)->op = p->op;
++	PROC_I(inode)->fd = id;
++	d_add(dentry, inode);
++	error = NULL;
++out:
++	return error;
++}
++
++/* Lookups */
++
++typedef struct dentry *instantiate_t(struct inode *, struct dentry *, int, void *);
++
++/*
++ * Fill a directory entry.
++ *
++ * If possible create the dcache entry and derive our inode number and
++ * file type from dcache entry.
++ *
++ * Since all of the proc inode numbers are dynamically generated, the inode
++ * numbers do not exist until the inode is cache.  This means creating the
++ * the dcache entry in readdir is necessary to keep the inode numbers
++ * reported by readdir in sync with the inode numbers reported
++ * by stat.
++ */
++static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
++	char *name, int len, instantiate_t instantiate, int id, void *ptr)
++{
++	struct dentry *child, *dir = filp->f_dentry;
++	struct inode *inode;
++	struct qstr qname;
++	ino_t ino = 0;
++	unsigned type = DT_UNKNOWN;
++
++	qname.name = name;
++	qname.len  = len;
++	qname.hash = full_name_hash(name, len);
++
++	child = d_lookup(dir, &qname);
++	if (!child) {
++		struct dentry *new;
++		new = d_alloc(dir, &qname);
++		if (new) {
++			child = instantiate(dir->d_inode, new, id, ptr);
++			if (child)
++				dput(new);
++			else
++				child = new;
++		}
++	}
++	if (!child || IS_ERR(child) || !child->d_inode)
++		goto end_instantiate;
++	inode = child->d_inode;
++	if (inode) {
++		ino = inode->i_ino;
++		type = inode->i_mode >> 12;
++	}
++	dput(child);
++end_instantiate:
++	if (!ino)
++		ino = find_inode_number(dir, &qname);
++	if (!ino)
++		ino = 1;
++	return filldir(dirent, name, len, filp->f_pos, ino, type);
++}
++
++
++
++/* get and revalidate vx_info/xid */
++
++static inline
++struct vx_info *get_proc_vx_info(struct inode *inode)
++{
++	return lookup_vx_info(PROC_I(inode)->fd);
++}
++
++static int proc_xid_revalidate(struct dentry *dentry, struct nameidata *nd)
++{
++	struct inode *inode = dentry->d_inode;
++	xid_t xid = PROC_I(inode)->fd;
++
++	if (!xid || xid_is_hashed(xid))
++		return 1;
++	d_drop(dentry);
++	return 0;
++}
++
++
++/* get and revalidate nx_info/nid */
++
++static int proc_nid_revalidate(struct dentry *dentry, struct nameidata *nd)
++{
++	struct inode *inode = dentry->d_inode;
++	nid_t nid = PROC_I(inode)->fd;
++
++	if (!nid || nid_is_hashed(nid))
++		return 1;
++	d_drop(dentry);
++	return 0;
++}
++
++
++
++#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
++
++static ssize_t proc_vs_info_read(struct file *file, char __user *buf,
++			  size_t count, loff_t *ppos)
++{
++	struct inode *inode = file->f_dentry->d_inode;
++	unsigned long page;
++	ssize_t length = 0;
++
++	if (count > PROC_BLOCK_SIZE)
++		count = PROC_BLOCK_SIZE;
++
++	/* fade that out as soon as stable */
++	WARN_ON(PROC_I(inode)->fd);
++
++	if (!(page = __get_free_page(GFP_KERNEL)))
++		return -ENOMEM;
++
++	BUG_ON(!PROC_I(inode)->op.proc_vs_read);
++	length = PROC_I(inode)->op.proc_vs_read((char *)page);
++
++	if (length >= 0)
++		length = simple_read_from_buffer(buf, count, ppos,
++			(char *)page, length);
++
++	free_page(page);
++	return length;
++}
++
++static ssize_t proc_vx_info_read(struct file *file, char __user *buf,
++			  size_t count, loff_t *ppos)
++{
++	struct inode *inode = file->f_dentry->d_inode;
++	struct vx_info *vxi = NULL;
++	xid_t xid = PROC_I(inode)->fd;
++	unsigned long page;
++	ssize_t length = 0;
++
++	if (count > PROC_BLOCK_SIZE)
++		count = PROC_BLOCK_SIZE;
++
++	/* fade that out as soon as stable */
++	WARN_ON(!xid);
++	vxi = lookup_vx_info(xid);
++	if (!vxi)
++		goto out;
++
++	length = -ENOMEM;
++	if (!(page = __get_free_page(GFP_KERNEL)))
++		goto out_put;
++
++	BUG_ON(!PROC_I(inode)->op.proc_vxi_read);
++	length = PROC_I(inode)->op.proc_vxi_read(vxi, (char *)page);
++
++	if (length >= 0)
++		length = simple_read_from_buffer(buf, count, ppos,
++			(char *)page, length);
++
++	free_page(page);
++out_put:
++	put_vx_info(vxi);
++out:
++	return length;
++}
++
++static ssize_t proc_nx_info_read(struct file *file, char __user *buf,
++			  size_t count, loff_t *ppos)
++{
++	struct inode *inode = file->f_dentry->d_inode;
++	struct nx_info *nxi = NULL;
++	nid_t nid = PROC_I(inode)->fd;
++	unsigned long page;
++	ssize_t length = 0;
++
++	if (count > PROC_BLOCK_SIZE)
++		count = PROC_BLOCK_SIZE;
++
++	/* fade that out as soon as stable */
++	WARN_ON(!nid);
++	nxi = lookup_nx_info(nid);
++	if (!nxi)
++		goto out;
++
++	length = -ENOMEM;
++	if (!(page = __get_free_page(GFP_KERNEL)))
++		goto out_put;
++
++	BUG_ON(!PROC_I(inode)->op.proc_nxi_read);
++	length = PROC_I(inode)->op.proc_nxi_read(nxi, (char *)page);
++
++	if (length >= 0)
++		length = simple_read_from_buffer(buf, count, ppos,
++			(char *)page, length);
++
++	free_page(page);
++out_put:
++	put_nx_info(nxi);
++out:
++	return length;
++}
++
++
++
++/* here comes the lower level */
++
++
++#define NOD(NAME, MODE, IOP, FOP, OP) {	\
++	.len  = sizeof(NAME) - 1,	\
++	.name = (NAME),			\
++	.mode = MODE,			\
++	.iop  = IOP,			\
++	.fop  = FOP,			\
++	.op   = OP,			\
++}
++
++
++#define DIR(NAME, MODE, OTYPE)				\
++	NOD(NAME, (S_IFDIR | (MODE)),			\
++		&proc_ ## OTYPE ## _inode_operations,	\
++		&proc_ ## OTYPE ## _file_operations, { } )
++
++#define INF(NAME, MODE, OTYPE)				\
++	NOD(NAME, (S_IFREG | (MODE)), NULL,		\
++		&proc_vs_info_file_operations,		\
++		{ .proc_vs_read = &proc_##OTYPE } )
++
++#define VINF(NAME, MODE, OTYPE)				\
++	NOD(NAME, (S_IFREG | (MODE)), NULL,		\
++		&proc_vx_info_file_operations,		\
++		{ .proc_vxi_read = &proc_##OTYPE } )
++
++#define NINF(NAME, MODE, OTYPE)				\
++	NOD(NAME, (S_IFREG | (MODE)), NULL,		\
++		&proc_nx_info_file_operations,		\
++		{ .proc_nxi_read = &proc_##OTYPE } )
++
++
++static struct file_operations proc_vs_info_file_operations = {
++	.read =		proc_vs_info_read,
++};
++
++static struct file_operations proc_vx_info_file_operations = {
++	.read =		proc_vx_info_read,
++};
++
++static struct dentry_operations proc_xid_dentry_operations = {
++	.d_revalidate =	proc_xid_revalidate,
++};
++
++static struct vs_entry vx_base_stuff[] = {
++	VINF("info",	S_IRUGO, vxi_info),
++	VINF("status",	S_IRUGO, vxi_status),
++	VINF("limit",	S_IRUGO, vxi_limit),
++	VINF("sched",	S_IRUGO, vxi_sched),
++	VINF("nsproxy",	S_IRUGO, vxi_nsproxy0),
++	VINF("nsproxy1",S_IRUGO, vxi_nsproxy1),
++	VINF("cvirt",	S_IRUGO, vxi_cvirt),
++	VINF("cacct",	S_IRUGO, vxi_cacct),
++	{}
++};
++
++
++
++
++static struct dentry *proc_xid_instantiate(struct inode *dir,
++	struct dentry *dentry, int id, void *ptr)
++{
++	dentry->d_op = &proc_xid_dentry_operations;
++	return vs_proc_instantiate(dir, dentry, id, ptr);
++}
++
++static struct dentry *proc_xid_lookup(struct inode *dir,
++	struct dentry *dentry, struct nameidata *nd)
++{
++	struct vs_entry *p = vx_base_stuff;
++	struct dentry *error = ERR_PTR(-ENOENT);
++
++	for (; p->name; p++) {
++		if (p->len != dentry->d_name.len)
++			continue;
++		if (!memcmp(dentry->d_name.name, p->name, p->len))
++			break;
++	}
++	if (!p->name)
++		goto out;
++
++	error = proc_xid_instantiate(dir, dentry, PROC_I(dir)->fd, p);
++out:
++	return error;
++}
++
++static int proc_xid_readdir(struct file *filp,
++	void *dirent, filldir_t filldir)
++{
++	struct dentry *dentry = filp->f_dentry;
++	struct inode *inode = dentry->d_inode;
++	struct vs_entry *p = vx_base_stuff;
++	int size = sizeof(vx_base_stuff) / sizeof(struct vs_entry);
++	int pos, index;
++	u64 ino;
++
++	pos = filp->f_pos;
++	switch (pos) {
++	case 0:
++		ino = inode->i_ino;
++		if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
++			goto out;
++		pos++;
++		/* fall through */
++	case 1:
++		ino = parent_ino(dentry);
++		if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
++			goto out;
++		pos++;
++		/* fall through */
++	default:
++		index = pos - 2;
++		if (index >= size)
++			goto out;
++		for (p += index; p->name; p++) {
++			if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
++				vs_proc_instantiate, PROC_I(inode)->fd, p))
++				goto out;
++			pos++;
++		}
++	}
++out:
++	filp->f_pos = pos;
++	return 1;
++}
++
++
++
++static struct file_operations proc_nx_info_file_operations = {
++	.read =		proc_nx_info_read,
++};
++
++static struct dentry_operations proc_nid_dentry_operations = {
++	.d_revalidate =	proc_nid_revalidate,
++};
++
++static struct vs_entry nx_base_stuff[] = {
++	NINF("info",	S_IRUGO, nxi_info),
++	NINF("status",	S_IRUGO, nxi_status),
++	{}
++};
++
++
++static struct dentry *proc_nid_instantiate(struct inode *dir,
++	struct dentry *dentry, int id, void *ptr)
++{
++	dentry->d_op = &proc_nid_dentry_operations;
++	return vs_proc_instantiate(dir, dentry, id, ptr);
++}
++
++static struct dentry *proc_nid_lookup(struct inode *dir,
++	struct dentry *dentry, struct nameidata *nd)
++{
++	struct vs_entry *p = nx_base_stuff;
++	struct dentry *error = ERR_PTR(-ENOENT);
++
++	for (; p->name; p++) {
++		if (p->len != dentry->d_name.len)
++			continue;
++		if (!memcmp(dentry->d_name.name, p->name, p->len))
++			break;
++	}
++	if (!p->name)
++		goto out;
++
++	error = proc_nid_instantiate(dir, dentry, PROC_I(dir)->fd, p);
++out:
++	return error;
++}
++
++static int proc_nid_readdir(struct file *filp,
++	void *dirent, filldir_t filldir)
++{
++	struct dentry *dentry = filp->f_dentry;
++	struct inode *inode = dentry->d_inode;
++	struct vs_entry *p = nx_base_stuff;
++	int size = sizeof(nx_base_stuff) / sizeof(struct vs_entry);
++	int pos, index;
++	u64 ino;
++
++	pos = filp->f_pos;
++	switch (pos) {
++	case 0:
++		ino = inode->i_ino;
++		if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
++			goto out;
++		pos++;
++		/* fall through */
++	case 1:
++		ino = parent_ino(dentry);
++		if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
++			goto out;
++		pos++;
++		/* fall through */
++	default:
++		index = pos - 2;
++		if (index >= size)
++			goto out;
++		for (p += index; p->name; p++) {
++			if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
++				vs_proc_instantiate, PROC_I(inode)->fd, p))
++				goto out;
++			pos++;
++		}
++	}
++out:
++	filp->f_pos = pos;
++	return 1;
++}
++
++
++#define MAX_MULBY10	((~0U - 9) / 10)
++
++static inline int atovid(const char *str, int len)
++{
++	int vid, c;
++
++	vid = 0;
++	while (len-- > 0) {
++		c = *str - '0';
++		str++;
++		if (c > 9)
++			return -1;
++		if (vid >= MAX_MULBY10)
++			return -1;
++		vid *= 10;
++		vid += c;
++		if (!vid)
++			return -1;
++	}
++	return vid;
++}
++
++/* now the upper level (virtual) */
++
++
++static struct file_operations proc_xid_file_operations = {
++	.read =		generic_read_dir,
++	.readdir =	proc_xid_readdir,
++};
++
++static struct inode_operations proc_xid_inode_operations = {
++	.lookup =	proc_xid_lookup,
++};
++
++static struct vs_entry vx_virtual_stuff[] = {
++	INF("info",	S_IRUGO, virtual_info),
++	INF("status",	S_IRUGO, virtual_status),
++	DIR(NULL,	S_IRUGO | S_IXUGO, xid),
++};
++
++
++static struct dentry *proc_virtual_lookup(struct inode *dir,
++	struct dentry *dentry, struct nameidata *nd)
++{
++	struct vs_entry *p = vx_virtual_stuff;
++	struct dentry *error = ERR_PTR(-ENOENT);
++	int id = 0;
++
++	for (; p->name; p++) {
++		if (p->len != dentry->d_name.len)
++			continue;
++		if (!memcmp(dentry->d_name.name, p->name, p->len))
++			break;
++	}
++	if (p->name)
++		goto instantiate;
++
++	id = atovid(dentry->d_name.name, dentry->d_name.len);
++	if ((id < 0) || !xid_is_hashed(id))
++		goto out;
++
++instantiate:
++	error = proc_xid_instantiate(dir, dentry, id, p);
++out:
++	return error;
++}
++
++static struct file_operations proc_nid_file_operations = {
++	.read =		generic_read_dir,
++	.readdir =	proc_nid_readdir,
++};
++
++static struct inode_operations proc_nid_inode_operations = {
++	.lookup =	proc_nid_lookup,
++};
++
++static struct vs_entry nx_virtnet_stuff[] = {
++	INF("info",	S_IRUGO, virtnet_info),
++	INF("status",	S_IRUGO, virtnet_status),
++	DIR(NULL,	S_IRUGO | S_IXUGO, nid),
++};
++
++
++static struct dentry *proc_virtnet_lookup(struct inode *dir,
++	struct dentry *dentry, struct nameidata *nd)
++{
++	struct vs_entry *p = nx_virtnet_stuff;
++	struct dentry *error = ERR_PTR(-ENOENT);
++	int id = 0;
++
++	for (; p->name; p++) {
++		if (p->len != dentry->d_name.len)
++			continue;
++		if (!memcmp(dentry->d_name.name, p->name, p->len))
++			break;
++	}
++	if (p->name)
++		goto instantiate;
++
++	id = atovid(dentry->d_name.name, dentry->d_name.len);
++	if ((id < 0) || !nid_is_hashed(id))
++		goto out;
++
++instantiate:
++	error = proc_nid_instantiate(dir, dentry, id, p);
++out:
++	return error;
++}
++
++
++#define PROC_MAXVIDS 32
++
++int proc_virtual_readdir(struct file *filp,
++	void *dirent, filldir_t filldir)
++{
++	struct dentry *dentry = filp->f_dentry;
++	struct inode *inode = dentry->d_inode;
++	struct vs_entry *p = vx_virtual_stuff;
++	int size = sizeof(vx_virtual_stuff) / sizeof(struct vs_entry);
++	int pos, index;
++	unsigned int xid_array[PROC_MAXVIDS];
++	char buf[PROC_NUMBUF];
++	unsigned int nr_xids, i;
++	u64 ino;
++
++	pos = filp->f_pos;
++	switch (pos) {
++	case 0:
++		ino = inode->i_ino;
++		if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
++			goto out;
++		pos++;
++		/* fall through */
++	case 1:
++		ino = parent_ino(dentry);
++		if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
++			goto out;
++		pos++;
++		/* fall through */
++	default:
++		index = pos - 2;
++		if (index >= size)
++			goto entries;
++		for (p += index; p->name; p++) {
++			if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
++				vs_proc_instantiate, 0, p))
++				goto out;
++			pos++;
++		}
++	entries:
++		index = pos - size;
++		p = &vx_virtual_stuff[size - 1];
++		nr_xids = get_xid_list(index, xid_array, PROC_MAXVIDS);
++		for (i = 0; i < nr_xids; i++) {
++			int n, xid = xid_array[i];
++			unsigned int j = PROC_NUMBUF;
++
++			n = xid;
++			do
++				buf[--j] = '0' + (n % 10);
++			while (n /= 10);
++
++			if (proc_fill_cache(filp, dirent, filldir,
++				buf + j, PROC_NUMBUF - j,
++				vs_proc_instantiate, xid, p))
++				goto out;
++			pos++;
++		}
++	}
++out:
++	filp->f_pos = pos;
++	return 0;
++}
++
++static int proc_virtual_getattr(struct vfsmount *mnt,
++	struct dentry *dentry, struct kstat *stat)
++{
++	struct inode *inode = dentry->d_inode;
++
++	generic_fillattr(inode, stat);
++	stat->nlink = 2 + atomic_read(&vx_global_cactive);
++	return 0;
++}
++
++static struct file_operations proc_virtual_dir_operations = {
++	.read =		generic_read_dir,
++	.readdir =	proc_virtual_readdir,
++};
++
++static struct inode_operations proc_virtual_dir_inode_operations = {
++	.getattr =	proc_virtual_getattr,
++	.lookup =	proc_virtual_lookup,
++};
++
++
++
++
++
++int proc_virtnet_readdir(struct file *filp,
++	void *dirent, filldir_t filldir)
++{
++	struct dentry *dentry = filp->f_dentry;
++	struct inode *inode = dentry->d_inode;
++	struct vs_entry *p = nx_virtnet_stuff;
++	int size = sizeof(nx_virtnet_stuff) / sizeof(struct vs_entry);
++	int pos, index;
++	unsigned int nid_array[PROC_MAXVIDS];
++	char buf[PROC_NUMBUF];
++	unsigned int nr_nids, i;
++	u64 ino;
++
++	pos = filp->f_pos;
++	switch (pos) {
++	case 0:
++		ino = inode->i_ino;
++		if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
++			goto out;
++		pos++;
++		/* fall through */
++	case 1:
++		ino = parent_ino(dentry);
++		if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
++			goto out;
++		pos++;
++		/* fall through */
++	default:
++		index = pos - 2;
++		if (index >= size)
++			goto entries;
++		for (p += index; p->name; p++) {
++			if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
++				vs_proc_instantiate, 0, p))
++				goto out;
++			pos++;
++		}
++	entries:
++		index = pos - size;
++		p = &nx_virtnet_stuff[size - 1];
++		nr_nids = get_nid_list(index, nid_array, PROC_MAXVIDS);
++		for (i = 0; i < nr_nids; i++) {
++			int n, nid = nid_array[i];
++			unsigned int j = PROC_NUMBUF;
++
++			n = nid;
++			do
++				buf[--j] = '0' + (n % 10);
++			while (n /= 10);
++
++			if (proc_fill_cache(filp, dirent, filldir,
++				buf + j, PROC_NUMBUF - j,
++				vs_proc_instantiate, nid, p))
++				goto out;
++			pos++;
++		}
++	}
++out:
++	filp->f_pos = pos;
++	return 0;
++}
++
++static int proc_virtnet_getattr(struct vfsmount *mnt,
++	struct dentry *dentry, struct kstat *stat)
++{
++	struct inode *inode = dentry->d_inode;
++
++	generic_fillattr(inode, stat);
++	stat->nlink = 2 + atomic_read(&nx_global_cactive);
++	return 0;
++}
++
++static struct file_operations proc_virtnet_dir_operations = {
++	.read =		generic_read_dir,
++	.readdir =	proc_virtnet_readdir,
++};
++
++static struct inode_operations proc_virtnet_dir_inode_operations = {
++	.getattr =	proc_virtnet_getattr,
++	.lookup =	proc_virtnet_lookup,
++};
++
++
++
++void proc_vx_init(void)
++{
++	struct proc_dir_entry *ent;
++
++	ent = proc_mkdir("virtual", 0);
++	if (ent) {
++		ent->proc_fops = &proc_virtual_dir_operations;
++		ent->proc_iops = &proc_virtual_dir_inode_operations;
++	}
++	proc_virtual = ent;
++
++	ent = proc_mkdir("virtnet", 0);
++	if (ent) {
++		ent->proc_fops = &proc_virtnet_dir_operations;
++		ent->proc_iops = &proc_virtnet_dir_inode_operations;
++	}
++	proc_virtnet = ent;
++}
++
++
++
++
++/* per pid info */
++
++
++int proc_pid_vx_info(struct task_struct *p, char *buffer)
++{
++	struct vx_info *vxi;
++	char *orig = buffer;
++
++	buffer += sprintf(buffer, "XID:\t%d\n", vx_task_xid(p));
++
++	vxi = task_get_vx_info(p);
++	if (!vxi)
++		goto out;
++
++	buffer += sprintf(buffer, "BCaps:\t");
++	buffer = print_cap_t(buffer, &vxi->vx_bcaps);
++	buffer += sprintf(buffer, "\n");
++	buffer += sprintf(buffer, "CCaps:\t%016llx\n",
++		(unsigned long long)vxi->vx_ccaps);
++	buffer += sprintf(buffer, "CFlags:\t%016llx\n",
++		(unsigned long long)vxi->vx_flags);
++	buffer += sprintf(buffer, "CIPid:\t%d\n", vxi->vx_initpid);
++
++	put_vx_info(vxi);
++out:
++	return buffer - orig;
++}
++
++
++int proc_pid_nx_info(struct task_struct *p, char *buffer)
++{
++	struct nx_info *nxi;
++	struct nx_addr_v4 *v4a;
++#ifdef	CONFIG_IPV6
++	struct nx_addr_v6 *v6a;
++#endif
++	char *orig = buffer;
++	int i;
++
++	buffer += sprintf(buffer, "NID:\t%d\n", nx_task_nid(p));
++
++	nxi = task_get_nx_info(p);
++	if (!nxi)
++		goto out;
++
++	buffer += sprintf(buffer, "NCaps:\t%016llx\n",
++		(unsigned long long)nxi->nx_ncaps);
++	buffer += sprintf(buffer, "NFlags:\t%016llx\n",
++		(unsigned long long)nxi->nx_flags);
++
++	buffer += sprintf(buffer,
++		"V4Root[bcast]:\t" NIPQUAD_FMT "\n",
++		NIPQUAD(nxi->v4_bcast.s_addr));
++	buffer += sprintf (buffer,
++		"V4Root[lback]:\t" NIPQUAD_FMT "\n",
++		NIPQUAD(nxi->v4_lback.s_addr));
++	if (!NX_IPV4(nxi))
++		goto skip_v4;
++	for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next)
++		buffer += sprintf(buffer, "V4Root[%d]:\t" NXAV4_FMT "\n",
++			i, NXAV4(v4a));
++skip_v4:
++#ifdef	CONFIG_IPV6
++	if (!NX_IPV6(nxi))
++		goto skip_v6;
++	for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next)
++		buffer += sprintf(buffer, "V6Root[%d]:\t" NXAV6_FMT "\n",
++			i, NXAV6(v6a));
++skip_v6:
++#endif
++	put_nx_info(nxi);
++out:
++	return buffer - orig;
++}
++
+--- a/kernel/vserver/sched.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/sched.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,414 @@
++/*
++ *  linux/kernel/vserver/sched.c
++ *
++ *  Virtual Server: Scheduler Support
++ *
++ *  Copyright (C) 2004-2007  Herbert Pötzl
++ *
++ *  V0.01  adapted Sam Vilains version to 2.6.3
++ *  V0.02  removed legacy interface
++ *  V0.03  changed vcmds to vxi arg
++ *  V0.04  removed older and legacy interfaces
++ *
++ */
++
++#include <linux/vs_context.h>
++#include <linux/vs_sched.h>
++#include <linux/vserver/sched_cmd.h>
++
++#include <asm/uaccess.h>
++
++
++#define vxd_check_range(val, min, max) do {		\
++	vxlprintk((val < min) || (val > max),		\
++		"check_range(%ld,%ld,%ld)",		\
++		(long)val, (long)min, (long)max,	\
++		__FILE__, __LINE__);			\
++	} while (0)
++
++
++void vx_update_sched_param(struct _vx_sched *sched,
++	struct _vx_sched_pc *sched_pc)
++{
++	unsigned int set_mask = sched->update_mask;
++
++	if (set_mask & VXSM_FILL_RATE)
++		sched_pc->fill_rate[0] = sched->fill_rate[0];
++	if (set_mask & VXSM_INTERVAL)
++		sched_pc->interval[0] = sched->interval[0];
++	if (set_mask & VXSM_FILL_RATE2)
++		sched_pc->fill_rate[1] = sched->fill_rate[1];
++	if (set_mask & VXSM_INTERVAL2)
++		sched_pc->interval[1] = sched->interval[1];
++	if (set_mask & VXSM_TOKENS)
++		sched_pc->tokens = sched->tokens;
++	if (set_mask & VXSM_TOKENS_MIN)
++		sched_pc->tokens_min = sched->tokens_min;
++	if (set_mask & VXSM_TOKENS_MAX)
++		sched_pc->tokens_max = sched->tokens_max;
++	if (set_mask & VXSM_PRIO_BIAS)
++		sched_pc->prio_bias = sched->prio_bias;
++
++	if (set_mask & VXSM_IDLE_TIME)
++		sched_pc->flags |= VXSF_IDLE_TIME;
++	else
++		sched_pc->flags &= ~VXSF_IDLE_TIME;
++
++	/* reset time */
++	sched_pc->norm_time = jiffies;
++}
++
++
++/*
++ * recalculate the context's scheduling tokens
++ *
++ * ret > 0 : number of tokens available
++ * ret < 0 : on hold, check delta_min[]
++ *	     -1 only jiffies
++ *	     -2 also idle time
++ *
++ */
++int vx_tokens_recalc(struct _vx_sched_pc *sched_pc,
++	unsigned long *norm_time, unsigned long *idle_time, int delta_min[2])
++{
++	long delta;
++	long tokens = 0;
++	int flags = sched_pc->flags;
++
++	/* how much time did pass? */
++	delta = *norm_time - sched_pc->norm_time;
++	// printk("@ %ld, %ld, %ld\n", *norm_time, sched_pc->norm_time, jiffies);
++	vxd_check_range(delta, 0, INT_MAX);
++
++	if (delta >= sched_pc->interval[0]) {
++		long tokens, integral;
++
++		/* calc integral token part */
++		tokens = delta / sched_pc->interval[0];
++		integral = tokens * sched_pc->interval[0];
++		tokens *= sched_pc->fill_rate[0];
++#ifdef	CONFIG_VSERVER_HARDCPU
++		delta_min[0] = delta - integral;
++		vxd_check_range(delta_min[0], 0, sched_pc->interval[0]);
++#endif
++		/* advance time */
++		sched_pc->norm_time += delta;
++
++		/* add tokens */
++		sched_pc->tokens += tokens;
++		sched_pc->token_time += tokens;
++	} else
++		delta_min[0] = delta;
++
++#ifdef	CONFIG_VSERVER_IDLETIME
++	if (!(flags & VXSF_IDLE_TIME))
++		goto skip_idle;
++
++	/* how much was the idle skip? */
++	delta = *idle_time - sched_pc->idle_time;
++	vxd_check_range(delta, 0, INT_MAX);
++
++	if (delta >= sched_pc->interval[1]) {
++		long tokens, integral;
++
++		/* calc fair share token part */
++		tokens = delta / sched_pc->interval[1];
++		integral = tokens * sched_pc->interval[1];
++		tokens *= sched_pc->fill_rate[1];
++		delta_min[1] = delta - integral;
++		vxd_check_range(delta_min[1], 0, sched_pc->interval[1]);
++
++		/* advance idle time */
++		sched_pc->idle_time += integral;
++
++		/* add tokens */
++		sched_pc->tokens += tokens;
++		sched_pc->token_time += tokens;
++	} else
++		delta_min[1] = delta;
++skip_idle:
++#endif
++
++	/* clip at maximum */
++	if (sched_pc->tokens > sched_pc->tokens_max)
++		sched_pc->tokens = sched_pc->tokens_max;
++	tokens = sched_pc->tokens;
++
++	if ((flags & VXSF_ONHOLD)) {
++		/* can we unhold? */
++		if (tokens >= sched_pc->tokens_min) {
++			flags &= ~VXSF_ONHOLD;
++			sched_pc->hold_ticks +=
++				*norm_time - sched_pc->onhold;
++		} else
++			goto on_hold;
++	} else {
++		/* put on hold? */
++		if (tokens <= 0) {
++			flags |= VXSF_ONHOLD;
++			sched_pc->onhold = *norm_time;
++			goto on_hold;
++		}
++	}
++	sched_pc->flags = flags;
++	return tokens;
++
++on_hold:
++	tokens = sched_pc->tokens_min - tokens;
++	sched_pc->flags = flags;
++	// BUG_ON(tokens < 0); probably doesn't hold anymore
++
++#ifdef	CONFIG_VSERVER_HARDCPU
++	/* next interval? */
++	if (!sched_pc->fill_rate[0])
++		delta_min[0] = HZ;
++	else if (tokens > sched_pc->fill_rate[0])
++		delta_min[0] += sched_pc->interval[0] *
++			tokens / sched_pc->fill_rate[0];
++	else
++		delta_min[0] = sched_pc->interval[0] - delta_min[0];
++	vxd_check_range(delta_min[0], 0, INT_MAX);
++
++#ifdef	CONFIG_VSERVER_IDLETIME
++	if (!(flags & VXSF_IDLE_TIME))
++		return -1;
++
++	/* next interval? */
++	if (!sched_pc->fill_rate[1])
++		delta_min[1] = HZ;
++	else if (tokens > sched_pc->fill_rate[1])
++		delta_min[1] += sched_pc->interval[1] *
++			tokens / sched_pc->fill_rate[1];
++	else
++		delta_min[1] = sched_pc->interval[1] - delta_min[1];
++	vxd_check_range(delta_min[1], 0, INT_MAX);
++
++	return -2;
++#else
++	return -1;
++#endif /* CONFIG_VSERVER_IDLETIME */
++#else
++	return 0;
++#endif /* CONFIG_VSERVER_HARDCPU */
++}
++
++static inline unsigned long msec_to_ticks(unsigned long msec)
++{
++	return msecs_to_jiffies(msec);
++}
++
++static inline unsigned long ticks_to_msec(unsigned long ticks)
++{
++	return jiffies_to_msecs(ticks);
++}
++
++static inline unsigned long ticks_to_usec(unsigned long ticks)
++{
++	return jiffies_to_usecs(ticks);
++}
++
++
++static int do_set_sched(struct vx_info *vxi, struct vcmd_sched_v5 *data)
++{
++	unsigned int set_mask = data->mask;
++	unsigned int update_mask;
++	int i, cpu;
++
++	/* Sanity check data values */
++	if (data->tokens_max <= 0)
++		data->tokens_max = HZ;
++	if (data->tokens_min < 0)
++		data->tokens_min = HZ / 3;
++	if (data->tokens_min >= data->tokens_max)
++		data->tokens_min = data->tokens_max;
++
++	if (data->prio_bias > MAX_PRIO_BIAS)
++		data->prio_bias = MAX_PRIO_BIAS;
++	if (data->prio_bias < MIN_PRIO_BIAS)
++		data->prio_bias = MIN_PRIO_BIAS;
++
++	spin_lock(&vxi->sched.tokens_lock);
++
++	/* sync up on delayed updates */
++	for_each_cpu_mask(cpu, vxi->sched.update)
++		vx_update_sched_param(&vxi->sched,
++			&vx_per_cpu(vxi, sched_pc, cpu));
++
++	if (set_mask & VXSM_FILL_RATE)
++		vxi->sched.fill_rate[0] = data->fill_rate[0];
++	if (set_mask & VXSM_FILL_RATE2)
++		vxi->sched.fill_rate[1] = data->fill_rate[1];
++	if (set_mask & VXSM_INTERVAL)
++		vxi->sched.interval[0] = (set_mask & VXSM_MSEC) ?
++			msec_to_ticks(data->interval[0]) : data->interval[0];
++	if (set_mask & VXSM_INTERVAL2)
++		vxi->sched.interval[1] = (set_mask & VXSM_MSEC) ?
++			msec_to_ticks(data->interval[1]) : data->interval[1];
++	if (set_mask & VXSM_TOKENS)
++		vxi->sched.tokens = data->tokens;
++	if (set_mask & VXSM_TOKENS_MIN)
++		vxi->sched.tokens_min = data->tokens_min;
++	if (set_mask & VXSM_TOKENS_MAX)
++		vxi->sched.tokens_max = data->tokens_max;
++	if (set_mask & VXSM_PRIO_BIAS)
++		vxi->sched.prio_bias = data->prio_bias;
++
++	/* Sanity check rate/interval */
++	for (i = 0; i < 2; i++) {
++		if (data->fill_rate[i] < 0)
++			data->fill_rate[i] = 0;
++		if (data->interval[i] <= 0)
++			data->interval[i] = HZ;
++	}
++
++	update_mask = vxi->sched.update_mask & VXSM_SET_MASK;
++	update_mask |= (set_mask & (VXSM_SET_MASK | VXSM_IDLE_TIME));
++	vxi->sched.update_mask = update_mask;
++
++#ifdef	CONFIG_SMP
++	rmb();
++	if (set_mask & VXSM_CPU_ID) {
++		vxi->sched.update = cpumask_of_cpu(data->cpu_id);
++		cpus_and(vxi->sched.update, cpu_online_map,
++			vxi->sched.update);
++	} else
++		vxi->sched.update = cpu_online_map;
++
++	/* forced reload? */
++	if (set_mask & VXSM_FORCE) {
++		for_each_cpu_mask(cpu, vxi->sched.update)
++			vx_update_sched_param(&vxi->sched,
++				&vx_per_cpu(vxi, sched_pc, cpu));
++		vxi->sched.update = CPU_MASK_NONE;
++	}
++#else
++	/* on UP we update immediately */
++	vx_update_sched_param(&vxi->sched,
++		&vx_per_cpu(vxi, sched_pc, 0));
++#endif
++
++	spin_unlock(&vxi->sched.tokens_lock);
++	return 0;
++}
++
++
++#define COPY_IDS(C) C(cpu_id); C(bucket_id)
++#define COPY_PRI(C) C(prio_bias)
++#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max)
++#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]);	\
++		    C(fill_rate[1]); C(interval[1]);
++
++#define COPY_VALUE(name) vc_data.name = data->name
++
++static int do_set_sched_v4(struct vx_info *vxi, struct vcmd_set_sched_v4 *data)
++{
++	struct vcmd_sched_v5 vc_data;
++
++	vc_data.mask = data->set_mask;
++	COPY_IDS(COPY_VALUE);
++	COPY_PRI(COPY_VALUE);
++	COPY_TOK(COPY_VALUE);
++	vc_data.fill_rate[0] = vc_data.fill_rate[1] = data->fill_rate;
++	vc_data.interval[0] = vc_data.interval[1] = data->interval;
++	return do_set_sched(vxi, &vc_data);
++}
++
++int vc_set_sched_v4(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_set_sched_v4 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_set_sched_v4(vxi, &vc_data);
++}
++
++	/* latest interface is v5 */
++
++int vc_set_sched(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_sched_v5 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return do_set_sched(vxi, &vc_data);
++}
++
++
++#define COPY_PRI(C) C(prio_bias)
++#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max)
++#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]);    \
++		    C(fill_rate[1]); C(interval[1]);
++
++#define COPY_VALUE(name) vc_data.name = data->name
++
++
++int vc_get_sched(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_sched_v5 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	if (vc_data.mask & VXSM_CPU_ID) {
++		int cpu = vc_data.cpu_id;
++		struct _vx_sched_pc *data;
++
++		if (!cpu_possible(cpu))
++			return -EINVAL;
++
++		data = &vx_per_cpu(vxi, sched_pc, cpu);
++		COPY_TOK(COPY_VALUE);
++		COPY_PRI(COPY_VALUE);
++		COPY_FRI(COPY_VALUE);
++
++		if (data->flags & VXSF_IDLE_TIME)
++			vc_data.mask |= VXSM_IDLE_TIME;
++	} else {
++		struct _vx_sched *data = &vxi->sched;
++
++		COPY_TOK(COPY_VALUE);
++		COPY_PRI(COPY_VALUE);
++		COPY_FRI(COPY_VALUE);
++	}
++
++	if (vc_data.mask & VXSM_MSEC) {
++		vc_data.interval[0] = ticks_to_msec(vc_data.interval[0]);
++		vc_data.interval[1] = ticks_to_msec(vc_data.interval[1]);
++	}
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
++
++int vc_sched_info(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_sched_info vc_data;
++	int cpu;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	cpu = vc_data.cpu_id;
++	if (!cpu_possible(cpu))
++		return -EINVAL;
++
++	if (vxi) {
++		struct _vx_sched_pc *sched_pc =
++			&vx_per_cpu(vxi, sched_pc, cpu);
++
++		vc_data.user_msec = ticks_to_msec(sched_pc->user_ticks);
++		vc_data.sys_msec = ticks_to_msec(sched_pc->sys_ticks);
++		vc_data.hold_msec = ticks_to_msec(sched_pc->hold_ticks);
++		vc_data.vavavoom = sched_pc->vavavoom;
++	}
++	vc_data.token_usec = ticks_to_usec(1);
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		return -EFAULT;
++	return 0;
++}
++
+--- a/kernel/vserver/sched_init.h	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/sched_init.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,50 @@
++
++static inline void vx_info_init_sched(struct _vx_sched *sched)
++{
++	static struct lock_class_key tokens_lock_key;
++
++	/* scheduling; hard code starting values as constants */
++	sched->fill_rate[0]	= 1;
++	sched->interval[0]	= 4;
++	sched->fill_rate[1]	= 1;
++	sched->interval[1]	= 8;
++	sched->tokens		= HZ >> 2;
++	sched->tokens_min	= HZ >> 4;
++	sched->tokens_max	= HZ >> 1;
++	sched->tokens_lock	= SPIN_LOCK_UNLOCKED;
++	sched->prio_bias	= 0;
++
++	lockdep_set_class(&sched->tokens_lock, &tokens_lock_key);
++}
++
++static inline
++void vx_info_init_sched_pc(struct _vx_sched_pc *sched_pc, int cpu)
++{
++	sched_pc->fill_rate[0]	= 1;
++	sched_pc->interval[0]	= 4;
++	sched_pc->fill_rate[1]	= 1;
++	sched_pc->interval[1]	= 8;
++	sched_pc->tokens	= HZ >> 2;
++	sched_pc->tokens_min	= HZ >> 4;
++	sched_pc->tokens_max	= HZ >> 1;
++	sched_pc->prio_bias	= 0;
++	sched_pc->vavavoom	= 0;
++	sched_pc->token_time	= 0;
++	sched_pc->idle_time	= 0;
++	sched_pc->norm_time	= jiffies;
++
++	sched_pc->user_ticks = 0;
++	sched_pc->sys_ticks = 0;
++	sched_pc->hold_ticks = 0;
++}
++
++static inline void vx_info_exit_sched(struct _vx_sched *sched)
++{
++	return;
++}
++
++static inline
++void vx_info_exit_sched_pc(struct _vx_sched_pc *sched_pc, int cpu)
++{
++	return;
++}
+--- a/kernel/vserver/sched_proc.h	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/sched_proc.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,57 @@
++#ifndef _VX_SCHED_PROC_H
++#define _VX_SCHED_PROC_H
++
++
++static inline
++int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
++{
++	int length = 0;
++
++	length += sprintf(buffer,
++		"FillRate:\t%8d,%d\n"
++		"Interval:\t%8d,%d\n"
++		"TokensMin:\t%8d\n"
++		"TokensMax:\t%8d\n"
++		"PrioBias:\t%8d\n",
++		sched->fill_rate[0],
++		sched->fill_rate[1],
++		sched->interval[0],
++		sched->interval[1],
++		sched->tokens_min,
++		sched->tokens_max,
++		sched->prio_bias);
++	return length;
++}
++
++static inline
++int vx_info_proc_sched_pc(struct _vx_sched_pc *sched_pc,
++	char *buffer, int cpu)
++{
++	int length = 0;
++
++	length += sprintf(buffer + length,
++		"cpu %d: %lld %lld %lld %ld %ld", cpu,
++		(unsigned long long)sched_pc->user_ticks,
++		(unsigned long long)sched_pc->sys_ticks,
++		(unsigned long long)sched_pc->hold_ticks,
++		sched_pc->token_time,
++		sched_pc->idle_time);
++	length += sprintf(buffer + length,
++		" %c%c %d %d %d %d/%d %d/%d",
++		(sched_pc->flags & VXSF_ONHOLD) ? 'H' : 'R',
++		(sched_pc->flags & VXSF_IDLE_TIME) ? 'I' : '-',
++		sched_pc->tokens,
++		sched_pc->tokens_min,
++		sched_pc->tokens_max,
++		sched_pc->fill_rate[0],
++		sched_pc->interval[0],
++		sched_pc->fill_rate[1],
++		sched_pc->interval[1]);
++	length += sprintf(buffer + length,
++		" %d %d\n",
++		sched_pc->prio_bias,
++		sched_pc->vavavoom);
++	return length;
++}
++
++#endif	/* _VX_SCHED_PROC_H */
+--- a/kernel/vserver/signal.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/signal.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,132 @@
++/*
++ *  linux/kernel/vserver/signal.c
++ *
++ *  Virtual Server: Signal Support
++ *
++ *  Copyright (C) 2003-2007  Herbert Pötzl
++ *
++ *  V0.01  broken out from vcontext V0.05
++ *  V0.02  changed vcmds to vxi arg
++ *  V0.03  adjusted siginfo for kill
++ *
++ */
++
++#include <asm/uaccess.h>
++
++#include <linux/vs_context.h>
++#include <linux/vs_pid.h>
++#include <linux/vserver/signal_cmd.h>
++
++
++int vx_info_kill(struct vx_info *vxi, int pid, int sig)
++{
++	int retval, count = 0;
++	struct task_struct *p;
++	struct siginfo *sip = SEND_SIG_PRIV;
++
++	retval = -ESRCH;
++	vxdprintk(VXD_CBIT(misc, 4),
++		"vx_info_kill(%p[#%d],%d,%d)*",
++		vxi, vxi->vx_id, pid, sig);
++	read_lock(&tasklist_lock);
++	switch (pid) {
++	case  0:
++	case -1:
++		for_each_process(p) {
++			int err = 0;
++
++			if (vx_task_xid(p) != vxi->vx_id || p->pid <= 1 ||
++				(pid && vxi->vx_initpid == p->pid))
++				continue;
++
++			err = group_send_sig_info(sig, sip, p);
++			++count;
++			if (err != -EPERM)
++				retval = err;
++		}
++		break;
++
++	case 1:
++		if (vxi->vx_initpid) {
++			pid = vxi->vx_initpid;
++			/* for now, only SIGINT to private init ... */
++			if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) &&
++				/* ... as long as there are tasks left */
++				(atomic_read(&vxi->vx_tasks) > 1))
++				sig = SIGINT;
++		}
++		/* fallthrough */
++	default:
++		p = find_task_by_real_pid(pid);
++		if (p) {
++			if (vx_task_xid(p) == vxi->vx_id)
++				retval = group_send_sig_info(sig, sip, p);
++		}
++		break;
++	}
++	read_unlock(&tasklist_lock);
++	vxdprintk(VXD_CBIT(misc, 4),
++		"vx_info_kill(%p[#%d],%d,%d,%ld) = %d",
++		vxi, vxi->vx_id, pid, sig, (long)sip, retval);
++	return retval;
++}
++
++int vc_ctx_kill(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_ctx_kill_v0 vc_data;
++
++	if (copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	/* special check to allow guest shutdown */
++	if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) &&
++		/* forbid killall pid=0 when init is present */
++		(((vc_data.pid < 1) && vxi->vx_initpid) ||
++		(vc_data.pid > 1)))
++		return -EACCES;
++
++	return vx_info_kill(vxi, vc_data.pid, vc_data.sig);
++}
++
++
++static int __wait_exit(struct vx_info *vxi)
++{
++	DECLARE_WAITQUEUE(wait, current);
++	int ret = 0;
++
++	add_wait_queue(&vxi->vx_wait, &wait);
++	set_current_state(TASK_INTERRUPTIBLE);
++
++wait:
++	if (vx_info_state(vxi,
++		VXS_SHUTDOWN | VXS_HASHED | VXS_HELPER) == VXS_SHUTDOWN)
++		goto out;
++	if (signal_pending(current)) {
++		ret = -ERESTARTSYS;
++		goto out;
++	}
++	schedule();
++	goto wait;
++
++out:
++	set_current_state(TASK_RUNNING);
++	remove_wait_queue(&vxi->vx_wait, &wait);
++	return ret;
++}
++
++
++
++int vc_wait_exit(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_wait_exit_v0 vc_data;
++	int ret;
++
++	ret = __wait_exit(vxi);
++	vc_data.reboot_cmd = vxi->reboot_cmd;
++	vc_data.exit_code = vxi->exit_code;
++
++	if (copy_to_user(data, &vc_data, sizeof(vc_data)))
++		ret = -EFAULT;
++	return ret;
++}
++
+--- a/kernel/vserver/space.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/space.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,375 @@
++/*
++ *  linux/kernel/vserver/space.c
++ *
++ *  Virtual Server: Context Space Support
++ *
++ *  Copyright (C) 2003-2007  Herbert Pötzl
++ *
++ *  V0.01  broken out from context.c 0.07
++ *  V0.02  added task locking for namespace
++ *  V0.03  broken out vx_enter_namespace
++ *  V0.04  added *space support and commands
++ *
++ */
++
++#include <linux/utsname.h>
++#include <linux/nsproxy.h>
++#include <linux/err.h>
++#include <linux/fs_struct.h>
++#include <asm/uaccess.h>
++
++#include <linux/vs_context.h>
++#include <linux/vserver/space.h>
++#include <linux/vserver/space_cmd.h>
++
++atomic_t vs_global_nsproxy	= ATOMIC_INIT(0);
++atomic_t vs_global_fs		= ATOMIC_INIT(0);
++atomic_t vs_global_mnt_ns	= ATOMIC_INIT(0);
++atomic_t vs_global_uts_ns	= ATOMIC_INIT(0);
++atomic_t vs_global_user_ns	= ATOMIC_INIT(0);
++atomic_t vs_global_pid_ns	= ATOMIC_INIT(0);
++
++
++/* namespace functions */
++
++#include <linux/mnt_namespace.h>
++#include <linux/user_namespace.h>
++#include <linux/pid_namespace.h>
++#include <linux/ipc_namespace.h>
++#include <net/net_namespace.h>
++
++
++static const struct vcmd_space_mask_v1 space_mask_v0 = {
++	.mask = CLONE_FS |
++		CLONE_NEWNS |
++		CLONE_NEWUTS |
++		CLONE_NEWIPC |
++		CLONE_NEWUSER |
++		0
++};
++
++static const struct vcmd_space_mask_v1 space_mask = {
++	.mask = CLONE_FS |
++		CLONE_NEWNS |
++		CLONE_NEWUTS |
++		CLONE_NEWIPC |
++		CLONE_NEWUSER |
++#ifdef	CONFIG_PID_NS
++		CLONE_NEWPID |
++#endif
++#ifdef	CONFIG_NET_NS
++		CLONE_NEWNET |
++#endif
++		0
++};
++
++static const struct vcmd_space_mask_v1 default_space_mask = {
++	.mask = CLONE_FS |
++		CLONE_NEWNS |
++		CLONE_NEWUTS |
++		CLONE_NEWIPC |
++		CLONE_NEWUSER |
++#ifdef	CONFIG_PID_NS
++//		CLONE_NEWPID |
++#endif
++		0
++};
++
++/*
++ *	build a new nsproxy mix
++ *      assumes that both proxies are 'const'
++ *	does not touch nsproxy refcounts
++ *	will hold a reference on the result.
++ */
++
++struct nsproxy *vs_mix_nsproxy(struct nsproxy *old_nsproxy,
++	struct nsproxy *new_nsproxy, unsigned long mask)
++{
++	struct mnt_namespace *old_ns;
++	struct uts_namespace *old_uts;
++	struct ipc_namespace *old_ipc;
++#ifdef	CONFIG_PID_NS
++	struct pid_namespace *old_pid;
++#endif
++#ifdef	CONFIG_NET_NS
++	struct net *old_net;
++#endif
++	struct nsproxy *nsproxy;
++
++	nsproxy = copy_nsproxy(old_nsproxy);
++	if (!nsproxy)
++		goto out;
++
++	if (mask & CLONE_NEWNS) {
++		old_ns = nsproxy->mnt_ns;
++		nsproxy->mnt_ns = new_nsproxy->mnt_ns;
++		if (nsproxy->mnt_ns)
++			get_mnt_ns(nsproxy->mnt_ns);
++	} else
++		old_ns = NULL;
++
++	if (mask & CLONE_NEWUTS) {
++		old_uts = nsproxy->uts_ns;
++		nsproxy->uts_ns = new_nsproxy->uts_ns;
++		if (nsproxy->uts_ns)
++			get_uts_ns(nsproxy->uts_ns);
++	} else
++		old_uts = NULL;
++
++	if (mask & CLONE_NEWIPC) {
++		old_ipc = nsproxy->ipc_ns;
++		nsproxy->ipc_ns = new_nsproxy->ipc_ns;
++		if (nsproxy->ipc_ns)
++			get_ipc_ns(nsproxy->ipc_ns);
++	} else
++		old_ipc = NULL;
++
++#ifdef	CONFIG_PID_NS
++	if (mask & CLONE_NEWPID) {
++		old_pid = nsproxy->pid_ns;
++		nsproxy->pid_ns = new_nsproxy->pid_ns;
++		if (nsproxy->pid_ns)
++			get_pid_ns(nsproxy->pid_ns);
++	} else
++		old_pid = NULL;
++#endif
++#ifdef	CONFIG_NET_NS
++	if (mask & CLONE_NEWNET) {
++		old_net = nsproxy->net_ns;
++		nsproxy->net_ns = new_nsproxy->net_ns;
++		if (nsproxy->net_ns)
++			get_net(nsproxy->net_ns);
++	} else
++		old_net = NULL;
++#endif
++	if (old_ns)
++		put_mnt_ns(old_ns);
++	if (old_uts)
++		put_uts_ns(old_uts);
++	if (old_ipc)
++		put_ipc_ns(old_ipc);
++#ifdef	CONFIG_PID_NS
++	if (old_pid)
++		put_pid_ns(old_pid);
++#endif
++#ifdef	CONFIG_NET_NS
++	if (old_net)
++		put_net(old_net);
++#endif
++out:
++	return nsproxy;
++}
++
++
++/*
++ *	merge two nsproxy structs into a new one.
++ *	will hold a reference on the result.
++ */
++
++static inline
++struct nsproxy *__vs_merge_nsproxy(struct nsproxy *old,
++	struct nsproxy *proxy, unsigned long mask)
++{
++	struct nsproxy null_proxy = { .mnt_ns = NULL };
++
++	if (!proxy)
++		return NULL;
++
++	if (mask) {
++		/* vs_mix_nsproxy returns with reference */
++		return vs_mix_nsproxy(old ? old : &null_proxy,
++			proxy, mask);
++	}
++	get_nsproxy(proxy);
++	return proxy;
++}
++
++
++int vx_enter_space(struct vx_info *vxi, unsigned long mask, unsigned index)
++{
++	struct nsproxy *proxy, *proxy_cur, *proxy_new;
++	struct fs_struct *fs_cur, *fs = NULL;
++	int ret, kill = 0;
++
++	vxdprintk(VXD_CBIT(space, 8), "vx_enter_space(%p[#%u],0x%08lx,%d)",
++		vxi, vxi->vx_id, mask, index);
++
++	if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
++		return -EACCES;
++
++	if (!mask)
++		mask = vxi->vx_nsmask[index];
++
++	if ((mask & vxi->vx_nsmask[index]) != mask)
++		return -EINVAL;
++
++	if (mask & CLONE_FS) {
++		fs = copy_fs_struct(vxi->vx_fs[index]);
++		if (!fs)
++			return -ENOMEM;
++	}
++	proxy = vxi->vx_nsproxy[index];
++
++	vxdprintk(VXD_CBIT(space, 9),
++		"vx_enter_space(%p[#%u],0x%08lx,%d) -> (%p,%p)",
++		vxi, vxi->vx_id, mask, index, proxy, fs);
++
++	task_lock(current);
++	fs_cur = current->fs;
++
++	if (mask & CLONE_FS) {
++		write_lock(&fs_cur->lock);
++		current->fs = fs;
++		kill = !--fs_cur->users;
++		write_unlock(&fs_cur->lock);
++	}
++
++	proxy_cur = current->nsproxy;
++	get_nsproxy(proxy_cur);
++	task_unlock(current);
++
++	if (kill)
++		free_fs_struct(fs_cur);
++
++	proxy_new = __vs_merge_nsproxy(proxy_cur, proxy, mask);
++	if (IS_ERR(proxy_new)) {
++		ret = PTR_ERR(proxy_new);
++		goto out_put;
++	}
++
++	proxy_new = xchg(&current->nsproxy, proxy_new);
++	ret = 0;
++
++	if (proxy_new)
++		put_nsproxy(proxy_new);
++out_put:
++	if (proxy_cur)
++		put_nsproxy(proxy_cur);
++	return ret;
++}
++
++
++int vx_set_space(struct vx_info *vxi, unsigned long mask, unsigned index)
++{
++	struct nsproxy *proxy_vxi, *proxy_cur, *proxy_new;
++	struct fs_struct *fs_vxi, *fs;
++	int ret, kill = 0;
++
++	vxdprintk(VXD_CBIT(space, 8), "vx_set_space(%p[#%u],0x%08lx,%d)",
++		vxi, vxi->vx_id, mask, index);
++#if 0
++	if (!mask)
++		mask = default_space_mask.mask;
++#endif
++	if ((mask & space_mask.mask) != mask)
++		return -EINVAL;
++
++	proxy_vxi = vxi->vx_nsproxy[index];
++	fs_vxi = vxi->vx_fs[index];
++
++	if (mask & CLONE_FS) {
++		fs = copy_fs_struct(current->fs);
++		if (!fs)
++			return -ENOMEM;
++	}
++
++	task_lock(current);
++
++	if (mask & CLONE_FS) {
++		write_lock(&fs_vxi->lock);
++		vxi->vx_fs[index] = fs;
++		kill = !--fs_vxi->users;
++		write_unlock(&fs_vxi->lock);
++	}
++
++	proxy_cur = current->nsproxy;
++	get_nsproxy(proxy_cur);
++	task_unlock(current);
++
++	if (kill)
++		free_fs_struct(fs_vxi);
++
++	proxy_new = __vs_merge_nsproxy(proxy_vxi, proxy_cur, mask);
++	if (IS_ERR(proxy_new)) {
++		ret = PTR_ERR(proxy_new);
++		goto out_put;
++	}
++
++	proxy_new = xchg(&vxi->vx_nsproxy[index], proxy_new);
++	vxi->vx_nsmask[index] |= mask;
++	ret = 0;
++
++	if (proxy_new)
++		put_nsproxy(proxy_new);
++out_put:
++	if (proxy_cur)
++		put_nsproxy(proxy_cur);
++	return ret;
++}
++
++
++int vc_enter_space_v1(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_space_mask_v1 vc_data = { .mask = 0 };
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return vx_enter_space(vxi, vc_data.mask, 0);
++}
++
++int vc_enter_space(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_space_mask_v2 vc_data = { .mask = 0 };
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	if (vc_data.index >= VX_SPACES)
++		return -EINVAL;
++
++	return vx_enter_space(vxi, vc_data.mask, vc_data.index);
++}
++
++int vc_set_space_v1(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_space_mask_v1 vc_data = { .mask = 0 };
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	return vx_set_space(vxi, vc_data.mask, 0);
++}
++
++int vc_set_space(struct vx_info *vxi, void __user *data)
++{
++	struct vcmd_space_mask_v2 vc_data = { .mask = 0 };
++
++	if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
++		return -EFAULT;
++
++	if (vc_data.index >= VX_SPACES)
++		return -EINVAL;
++
++	return vx_set_space(vxi, vc_data.mask, vc_data.index);
++}
++
++int vc_get_space_mask(void __user *data, int type)
++{
++	const struct vcmd_space_mask_v1 *mask;
++
++	if (type == 0)
++		mask = &space_mask_v0;
++	else if (type == 1)
++		mask = &space_mask;
++	else
++		mask = &default_space_mask;
++
++	vxdprintk(VXD_CBIT(space, 10),
++		"vc_get_space_mask(%d) = %08llx", type, mask->mask);
++
++	if (copy_to_user(data, mask, sizeof(*mask)))
++		return -EFAULT;
++	return 0;
++}
++
+--- a/kernel/vserver/switch.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/switch.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,546 @@
++/*
++ *  linux/kernel/vserver/switch.c
++ *
++ *  Virtual Server: Syscall Switch
++ *
++ *  Copyright (C) 2003-2007  Herbert Pötzl
++ *
++ *  V0.01  syscall switch
++ *  V0.02  added signal to context
++ *  V0.03  added rlimit functions
++ *  V0.04  added iattr, task/xid functions
++ *  V0.05  added debug/history stuff
++ *  V0.06  added compat32 layer
++ *  V0.07  vcmd args and perms
++ *  V0.08  added status commands
++ *  V0.09  added tag commands
++ *  V0.10  added oom bias
++ *  V0.11  added device commands
++ *
++ */
++
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vserver/switch.h>
++
++#include "vci_config.h"
++
++
++static inline
++int vc_get_version(uint32_t id)
++{
++	return VCI_VERSION;
++}
++
++static inline
++int vc_get_vci(uint32_t id)
++{
++	return vci_kernel_config();
++}
++
++#include <linux/vserver/context_cmd.h>
++#include <linux/vserver/cvirt_cmd.h>
++#include <linux/vserver/cacct_cmd.h>
++#include <linux/vserver/limit_cmd.h>
++#include <linux/vserver/network_cmd.h>
++#include <linux/vserver/sched_cmd.h>
++#include <linux/vserver/debug_cmd.h>
++#include <linux/vserver/inode_cmd.h>
++#include <linux/vserver/dlimit_cmd.h>
++#include <linux/vserver/signal_cmd.h>
++#include <linux/vserver/space_cmd.h>
++#include <linux/vserver/tag_cmd.h>
++#include <linux/vserver/device_cmd.h>
++
++#include <linux/vserver/inode.h>
++#include <linux/vserver/dlimit.h>
++
++
++#ifdef	CONFIG_COMPAT
++#define __COMPAT(name, id, data, compat)	\
++	(compat) ? name ## _x32(id, data) : name(id, data)
++#define __COMPAT_NO_ID(name, data, compat)	\
++	(compat) ? name ## _x32(data) : name(data)
++#else
++#define __COMPAT(name, id, data, compat)	\
++	name(id, data)
++#define __COMPAT_NO_ID(name, data, compat)	\
++	name(data)
++#endif
++
++
++static inline
++long do_vcmd(uint32_t cmd, uint32_t id,
++	struct vx_info *vxi, struct nx_info *nxi,
++	void __user *data, int compat)
++{
++	switch (cmd) {
++
++	case VCMD_get_version:
++		return vc_get_version(id);
++	case VCMD_get_vci:
++		return vc_get_vci(id);
++
++	case VCMD_task_xid:
++		return vc_task_xid(id);
++	case VCMD_vx_info:
++		return vc_vx_info(vxi, data);
++
++	case VCMD_task_nid:
++		return vc_task_nid(id);
++	case VCMD_nx_info:
++		return vc_nx_info(nxi, data);
++
++	case VCMD_task_tag:
++		return vc_task_tag(id);
++
++	case VCMD_set_space_v1:
++		return vc_set_space_v1(vxi, data);
++	/* this is version 2 */
++	case VCMD_set_space:
++		return vc_set_space(vxi, data);
++
++	case VCMD_get_space_mask_v0:
++		return vc_get_space_mask(data, 0);
++	/* this is version 1 */
++	case VCMD_get_space_mask:
++		return vc_get_space_mask(data, 1);
++
++	case VCMD_get_space_default:
++		return vc_get_space_mask(data, -1);
++
++#ifdef	CONFIG_IA32_EMULATION
++	case VCMD_get_rlimit:
++		return __COMPAT(vc_get_rlimit, vxi, data, compat);
++	case VCMD_set_rlimit:
++		return __COMPAT(vc_set_rlimit, vxi, data, compat);
++#else
++	case VCMD_get_rlimit:
++		return vc_get_rlimit(vxi, data);
++	case VCMD_set_rlimit:
++		return vc_set_rlimit(vxi, data);
++#endif
++	case VCMD_get_rlimit_mask:
++		return vc_get_rlimit_mask(id, data);
++	case VCMD_reset_hits:
++		return vc_reset_hits(vxi, data);
++	case VCMD_reset_minmax:
++		return vc_reset_minmax(vxi, data);
++
++	case VCMD_get_vhi_name:
++		return vc_get_vhi_name(vxi, data);
++	case VCMD_set_vhi_name:
++		return vc_set_vhi_name(vxi, data);
++
++	case VCMD_ctx_stat:
++		return vc_ctx_stat(vxi, data);
++	case VCMD_virt_stat:
++		return vc_virt_stat(vxi, data);
++	case VCMD_sock_stat:
++		return vc_sock_stat(vxi, data);
++	case VCMD_rlimit_stat:
++		return vc_rlimit_stat(vxi, data);
++
++	case VCMD_set_cflags:
++		return vc_set_cflags(vxi, data);
++	case VCMD_get_cflags:
++		return vc_get_cflags(vxi, data);
++
++	/* this is version 1 */
++	case VCMD_set_ccaps:
++		return vc_set_ccaps(vxi, data);
++	/* this is version 1 */
++	case VCMD_get_ccaps:
++		return vc_get_ccaps(vxi, data);
++	case VCMD_set_bcaps:
++		return vc_set_bcaps(vxi, data);
++	case VCMD_get_bcaps:
++		return vc_get_bcaps(vxi, data);
++
++	case VCMD_set_badness:
++		return vc_set_badness(vxi, data);
++	case VCMD_get_badness:
++		return vc_get_badness(vxi, data);
++
++	case VCMD_set_nflags:
++		return vc_set_nflags(nxi, data);
++	case VCMD_get_nflags:
++		return vc_get_nflags(nxi, data);
++
++	case VCMD_set_ncaps:
++		return vc_set_ncaps(nxi, data);
++	case VCMD_get_ncaps:
++		return vc_get_ncaps(nxi, data);
++
++	case VCMD_set_sched_v4:
++		return vc_set_sched_v4(vxi, data);
++	/* this is version 5 */
++	case VCMD_set_sched:
++		return vc_set_sched(vxi, data);
++	case VCMD_get_sched:
++		return vc_get_sched(vxi, data);
++	case VCMD_sched_info:
++		return vc_sched_info(vxi, data);
++
++	case VCMD_add_dlimit:
++		return __COMPAT(vc_add_dlimit, id, data, compat);
++	case VCMD_rem_dlimit:
++		return __COMPAT(vc_rem_dlimit, id, data, compat);
++	case VCMD_set_dlimit:
++		return __COMPAT(vc_set_dlimit, id, data, compat);
++	case VCMD_get_dlimit:
++		return __COMPAT(vc_get_dlimit, id, data, compat);
++
++	case VCMD_ctx_kill:
++		return vc_ctx_kill(vxi, data);
++
++	case VCMD_wait_exit:
++		return vc_wait_exit(vxi, data);
++
++	case VCMD_get_iattr:
++		return __COMPAT_NO_ID(vc_get_iattr, data, compat);
++	case VCMD_set_iattr:
++		return __COMPAT_NO_ID(vc_set_iattr, data, compat);
++
++	case VCMD_fget_iattr:
++		return vc_fget_iattr(id, data);
++	case VCMD_fset_iattr:
++		return vc_fset_iattr(id, data);
++
++	case VCMD_enter_space_v0:
++		return vc_enter_space_v1(vxi, NULL);
++	case VCMD_enter_space_v1:
++		return vc_enter_space_v1(vxi, data);
++	/* this is version 2 */
++	case VCMD_enter_space:
++		return vc_enter_space(vxi, data);
++
++	case VCMD_ctx_create_v0:
++		return vc_ctx_create(id, NULL);
++	case VCMD_ctx_create:
++		return vc_ctx_create(id, data);
++	case VCMD_ctx_migrate_v0:
++		return vc_ctx_migrate(vxi, NULL);
++	case VCMD_ctx_migrate:
++		return vc_ctx_migrate(vxi, data);
++
++	case VCMD_net_create_v0:
++		return vc_net_create(id, NULL);
++	case VCMD_net_create:
++		return vc_net_create(id, data);
++	case VCMD_net_migrate:
++		return vc_net_migrate(nxi, data);
++
++	case VCMD_tag_migrate:
++		return vc_tag_migrate(id);
++
++	case VCMD_net_add:
++		return vc_net_add(nxi, data);
++	case VCMD_net_remove:
++		return vc_net_remove(nxi, data);
++
++	case VCMD_net_add_ipv4:
++		return vc_net_add_ipv4(nxi, data);
++	case VCMD_net_remove_ipv4:
++		return vc_net_remove_ipv4(nxi, data);
++#ifdef	CONFIG_IPV6
++	case VCMD_net_add_ipv6:
++		return vc_net_add_ipv6(nxi, data);
++	case VCMD_net_remove_ipv6:
++		return vc_net_remove_ipv6(nxi, data);
++#endif
++/*	case VCMD_add_match_ipv4:
++		return vc_add_match_ipv4(nxi, data);
++	case VCMD_get_match_ipv4:
++		return vc_get_match_ipv4(nxi, data);
++#ifdef	CONFIG_IPV6
++	case VCMD_add_match_ipv6:
++		return vc_add_match_ipv6(nxi, data);
++	case VCMD_get_match_ipv6:
++		return vc_get_match_ipv6(nxi, data);
++#endif	*/
++
++#ifdef	CONFIG_VSERVER_DEVICE
++	case VCMD_set_mapping:
++		return __COMPAT(vc_set_mapping, vxi, data, compat);
++	case VCMD_unset_mapping:
++		return __COMPAT(vc_unset_mapping, vxi, data, compat);
++#endif
++#ifdef	CONFIG_VSERVER_HISTORY
++	case VCMD_dump_history:
++		return vc_dump_history(id);
++	case VCMD_read_history:
++		return __COMPAT(vc_read_history, id, data, compat);
++#endif
++#ifdef	CONFIG_VSERVER_MONITOR
++	case VCMD_read_monitor:
++		return __COMPAT(vc_read_monitor, id, data, compat);
++#endif
++	default:
++		vxwprintk_task(1, "unimplemented VCMD_%02d_%d[%d]",
++			VC_CATEGORY(cmd), VC_COMMAND(cmd), VC_VERSION(cmd));
++	}
++	return -ENOSYS;
++}
++
++
++#define	__VCMD(vcmd, _perm, _args, _flags)		\
++	case VCMD_ ## vcmd: perm = _perm;		\
++		args = _args; flags = _flags; break
++
++
++#define VCA_NONE	0x00
++#define VCA_VXI		0x01
++#define VCA_NXI		0x02
++
++#define VCF_NONE	0x00
++#define VCF_INFO	0x01
++#define VCF_ADMIN	0x02
++#define VCF_ARES	0x06	/* includes admin */
++#define VCF_SETUP	0x08
++
++#define VCF_ZIDOK	0x10	/* zero id okay */
++
++
++static inline
++long do_vserver(uint32_t cmd, uint32_t id, void __user *data, int compat)
++{
++	long ret;
++	int permit = -1, state = 0;
++	int perm = -1, args = 0, flags = 0;
++	struct vx_info *vxi = NULL;
++	struct nx_info *nxi = NULL;
++
++	switch (cmd) {
++	/* unpriviledged commands */
++	__VCMD(get_version,	 0, VCA_NONE,	0);
++	__VCMD(get_vci,		 0, VCA_NONE,	0);
++	__VCMD(get_rlimit_mask,	 0, VCA_NONE,	0);
++	__VCMD(get_space_mask_v0,0, VCA_NONE,   0);
++	__VCMD(get_space_mask,	 0, VCA_NONE,   0);
++	__VCMD(get_space_default,0, VCA_NONE,   0);
++
++	/* info commands */
++	__VCMD(task_xid,	 2, VCA_NONE,	0);
++	__VCMD(reset_hits,	 2, VCA_VXI,	0);
++	__VCMD(reset_minmax,	 2, VCA_VXI,	0);
++	__VCMD(vx_info,		 3, VCA_VXI,	VCF_INFO);
++	__VCMD(get_bcaps,	 3, VCA_VXI,	VCF_INFO);
++	__VCMD(get_ccaps,	 3, VCA_VXI,	VCF_INFO);
++	__VCMD(get_cflags,	 3, VCA_VXI,	VCF_INFO);
++	__VCMD(get_badness,	 3, VCA_VXI,	VCF_INFO);
++	__VCMD(get_vhi_name,	 3, VCA_VXI,	VCF_INFO);
++	__VCMD(get_rlimit,	 3, VCA_VXI,	VCF_INFO);
++
++	__VCMD(ctx_stat,	 3, VCA_VXI,	VCF_INFO);
++	__VCMD(virt_stat,	 3, VCA_VXI,	VCF_INFO);
++	__VCMD(sock_stat,	 3, VCA_VXI,	VCF_INFO);
++	__VCMD(rlimit_stat,	 3, VCA_VXI,	VCF_INFO);
++
++	__VCMD(task_nid,	 2, VCA_NONE,	0);
++	__VCMD(nx_info,		 3, VCA_NXI,	VCF_INFO);
++	__VCMD(get_ncaps,	 3, VCA_NXI,	VCF_INFO);
++	__VCMD(get_nflags,	 3, VCA_NXI,	VCF_INFO);
++
++	__VCMD(task_tag,	 2, VCA_NONE,	0);
++
++	__VCMD(get_iattr,	 2, VCA_NONE,	0);
++	__VCMD(fget_iattr,	 2, VCA_NONE,	0);
++	__VCMD(get_dlimit,	 3, VCA_NONE,	VCF_INFO);
++	__VCMD(get_sched,	 3, VCA_VXI,	VCF_INFO);
++	__VCMD(sched_info,	 3, VCA_VXI,	VCF_INFO | VCF_ZIDOK);
++
++	/* lower admin commands */
++	__VCMD(wait_exit,	 4, VCA_VXI,	VCF_INFO);
++	__VCMD(ctx_create_v0,	 5, VCA_NONE,	0);
++	__VCMD(ctx_create,	 5, VCA_NONE,	0);
++	__VCMD(ctx_migrate_v0,	 5, VCA_VXI,	VCF_ADMIN);
++	__VCMD(ctx_migrate,	 5, VCA_VXI,	VCF_ADMIN);
++	__VCMD(enter_space_v0,	 5, VCA_VXI,	VCF_ADMIN);
++	__VCMD(enter_space_v1,	 5, VCA_VXI,	VCF_ADMIN);
++	__VCMD(enter_space,	 5, VCA_VXI,	VCF_ADMIN);
++
++	__VCMD(net_create_v0,	 5, VCA_NONE,	0);
++	__VCMD(net_create,	 5, VCA_NONE,	0);
++	__VCMD(net_migrate,	 5, VCA_NXI,	VCF_ADMIN);
++
++	__VCMD(tag_migrate,	 5, VCA_NONE,	VCF_ADMIN);
++
++	/* higher admin commands */
++	__VCMD(ctx_kill,	 6, VCA_VXI,	VCF_ARES);
++	__VCMD(set_space_v1,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(set_space,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
++
++	__VCMD(set_ccaps,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(set_bcaps,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(set_cflags,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(set_badness,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
++
++	__VCMD(set_vhi_name,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(set_rlimit,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(set_sched,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(set_sched_v4,	 7, VCA_VXI,	VCF_ARES | VCF_SETUP);
++
++	__VCMD(set_ncaps,	 7, VCA_NXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(set_nflags,	 7, VCA_NXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(net_add,		 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(net_remove,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(net_add_ipv4,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(net_remove_ipv4,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
++#ifdef	CONFIG_IPV6
++	__VCMD(net_add_ipv6,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
++	__VCMD(net_remove_ipv6,	 8, VCA_NXI,	VCF_ARES | VCF_SETUP);
++#endif
++	__VCMD(set_iattr,	 7, VCA_NONE,	0);
++	__VCMD(fset_iattr,	 7, VCA_NONE,	0);
++	__VCMD(set_dlimit,	 7, VCA_NONE,	VCF_ARES);
++	__VCMD(add_dlimit,	 8, VCA_NONE,	VCF_ARES);
++	__VCMD(rem_dlimit,	 8, VCA_NONE,	VCF_ARES);
++
++#ifdef	CONFIG_VSERVER_DEVICE
++	__VCMD(set_mapping,	 8, VCA_VXI,    VCF_ARES|VCF_ZIDOK);
++	__VCMD(unset_mapping,	 8, VCA_VXI,	VCF_ARES|VCF_ZIDOK);
++#endif
++	/* debug level admin commands */
++#ifdef	CONFIG_VSERVER_HISTORY
++	__VCMD(dump_history,	 9, VCA_NONE,	0);
++	__VCMD(read_history,	 9, VCA_NONE,	0);
++#endif
++#ifdef	CONFIG_VSERVER_MONITOR
++	__VCMD(read_monitor,	 9, VCA_NONE,	0);
++#endif
++
++	default:
++		perm = -1;
++	}
++
++	vxdprintk(VXD_CBIT(switch, 0),
++		"vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]",
++		VC_CATEGORY(cmd), VC_COMMAND(cmd),
++		VC_VERSION(cmd), id, data, compat,
++		perm, args, flags);
++
++	ret = -ENOSYS;
++	if (perm < 0)
++		goto out;
++
++	state = 1;
++	if (!capable(CAP_CONTEXT))
++		goto out;
++
++	state = 2;
++	/* moved here from the individual commands */
++	ret = -EPERM;
++	if ((perm > 1) && !capable(CAP_SYS_ADMIN))
++		goto out;
++
++	state = 3;
++	/* vcmd involves resource management  */
++	ret = -EPERM;
++	if ((flags & VCF_ARES) && !capable(CAP_SYS_RESOURCE))
++		goto out;
++
++	state = 4;
++	/* various legacy exceptions */
++	switch (cmd) {
++	/* will go away when spectator is a cap */
++	case VCMD_ctx_migrate_v0:
++	case VCMD_ctx_migrate:
++		if (id == 1) {
++			current->xid = 1;
++			ret = 1;
++			goto out;
++		}
++		break;
++
++	/* will go away when spectator is a cap */
++	case VCMD_net_migrate:
++		if (id == 1) {
++			current->nid = 1;
++			ret = 1;
++			goto out;
++		}
++		break;
++	}
++
++	/* vcmds are fine by default */
++	permit = 1;
++
++	/* admin type vcmds require admin ... */
++	if (flags & VCF_ADMIN)
++		permit = vx_check(0, VS_ADMIN) ? 1 : 0;
++
++	/* ... but setup type vcmds override that */
++	if (!permit && (flags & VCF_SETUP))
++		permit = vx_flags(VXF_STATE_SETUP, 0) ? 2 : 0;
++
++	state = 5;
++	ret = -EPERM;
++	if (!permit)
++		goto out;
++
++	state = 6;
++	if (!id && (flags & VCF_ZIDOK))
++		goto skip_id;
++
++	ret = -ESRCH;
++	if (args & VCA_VXI) {
++		vxi = lookup_vx_info(id);
++		if (!vxi)
++			goto out;
++
++		if ((flags & VCF_ADMIN) &&
++			/* special case kill for shutdown */
++			(cmd != VCMD_ctx_kill) &&
++			/* can context be administrated? */
++			!vx_info_flags(vxi, VXF_STATE_ADMIN, 0)) {
++			ret = -EACCES;
++			goto out_vxi;
++		}
++	}
++	state = 7;
++	if (args & VCA_NXI) {
++		nxi = lookup_nx_info(id);
++		if (!nxi)
++			goto out_vxi;
++
++		if ((flags & VCF_ADMIN) &&
++			/* can context be administrated? */
++			!nx_info_flags(nxi, NXF_STATE_ADMIN, 0)) {
++			ret = -EACCES;
++			goto out_nxi;
++		}
++	}
++skip_id:
++	state = 8;
++	ret = do_vcmd(cmd, id, vxi, nxi, data, compat);
++
++out_nxi:
++	if ((args & VCA_NXI) && nxi)
++		put_nx_info(nxi);
++out_vxi:
++	if ((args & VCA_VXI) && vxi)
++		put_vx_info(vxi);
++out:
++	vxdprintk(VXD_CBIT(switch, 1),
++		"vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]",
++		VC_CATEGORY(cmd), VC_COMMAND(cmd),
++		VC_VERSION(cmd), ret, ret, state, permit);
++	return ret;
++}
++
++asmlinkage long
++sys_vserver(uint32_t cmd, uint32_t id, void __user *data)
++{
++	return do_vserver(cmd, id, data, 0);
++}
++
++#ifdef	CONFIG_COMPAT
++
++asmlinkage long
++sys32_vserver(uint32_t cmd, uint32_t id, void __user *data)
++{
++	return do_vserver(cmd, id, data, 1);
++}
++
++#endif	/* CONFIG_COMPAT */
+--- a/kernel/vserver/sysctl.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/sysctl.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,245 @@
++/*
++ *  kernel/vserver/sysctl.c
++ *
++ *  Virtual Context Support
++ *
++ *  Copyright (C) 2004-2007  Herbert Pötzl
++ *
++ *  V0.01  basic structure
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/ctype.h>
++#include <linux/sysctl.h>
++#include <linux/parser.h>
++#include <asm/uaccess.h>
++
++
++enum {
++	CTL_DEBUG_ERROR		= 0,
++	CTL_DEBUG_SWITCH	= 1,
++	CTL_DEBUG_XID,
++	CTL_DEBUG_NID,
++	CTL_DEBUG_TAG,
++	CTL_DEBUG_NET,
++	CTL_DEBUG_LIMIT,
++	CTL_DEBUG_CRES,
++	CTL_DEBUG_DLIM,
++	CTL_DEBUG_QUOTA,
++	CTL_DEBUG_CVIRT,
++	CTL_DEBUG_SPACE,
++	CTL_DEBUG_MISC,
++};
++
++
++unsigned int vx_debug_switch	= 0;
++unsigned int vx_debug_xid	= 0;
++unsigned int vx_debug_nid	= 0;
++unsigned int vx_debug_tag	= 0;
++unsigned int vx_debug_net	= 0;
++unsigned int vx_debug_limit	= 0;
++unsigned int vx_debug_cres	= 0;
++unsigned int vx_debug_dlim	= 0;
++unsigned int vx_debug_quota	= 0;
++unsigned int vx_debug_cvirt	= 0;
++unsigned int vx_debug_space	= 0;
++unsigned int vx_debug_misc	= 0;
++
++
++static struct ctl_table_header *vserver_table_header;
++static ctl_table vserver_root_table[];
++
++
++void vserver_register_sysctl(void)
++{
++	if (!vserver_table_header) {
++		vserver_table_header = register_sysctl_table(vserver_root_table);
++	}
++
++}
++
++void vserver_unregister_sysctl(void)
++{
++	if (vserver_table_header) {
++		unregister_sysctl_table(vserver_table_header);
++		vserver_table_header = NULL;
++	}
++}
++
++
++static int proc_dodebug(ctl_table *table, int write,
++	void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++	char		tmpbuf[20], *p, c;
++	unsigned int	value;
++	size_t		left, len;
++
++	if ((*ppos && !write) || !*lenp) {
++		*lenp = 0;
++		return 0;
++	}
++
++	left = *lenp;
++
++	if (write) {
++		if (!access_ok(VERIFY_READ, buffer, left))
++			return -EFAULT;
++		p = (char *)buffer;
++		while (left && __get_user(c, p) >= 0 && isspace(c))
++			left--, p++;
++		if (!left)
++			goto done;
++
++		if (left > sizeof(tmpbuf) - 1)
++			return -EINVAL;
++		if (copy_from_user(tmpbuf, p, left))
++			return -EFAULT;
++		tmpbuf[left] = '\0';
++
++		for (p = tmpbuf, value = 0; '0' <= *p && *p <= '9'; p++, left--)
++			value = 10 * value + (*p - '0');
++		if (*p && !isspace(*p))
++			return -EINVAL;
++		while (left && isspace(*p))
++			left--, p++;
++		*(unsigned int *)table->data = value;
++	} else {
++		if (!access_ok(VERIFY_WRITE, buffer, left))
++			return -EFAULT;
++		len = sprintf(tmpbuf, "%d", *(unsigned int *)table->data);
++		if (len > left)
++			len = left;
++		if (__copy_to_user(buffer, tmpbuf, len))
++			return -EFAULT;
++		if ((left -= len) > 0) {
++			if (put_user('\n', (char *)buffer + len))
++				return -EFAULT;
++			left--;
++		}
++	}
++
++done:
++	*lenp -= left;
++	*ppos += *lenp;
++	return 0;
++}
++
++static int zero;
++
++#define	CTL_ENTRY(ctl, name)				\
++	{						\
++		.ctl_name	= ctl,			\
++		.procname	= #name,		\
++		.data		= &vx_ ## name,		\
++		.maxlen		= sizeof(int),		\
++		.mode		= 0644,			\
++		.proc_handler	= &proc_dodebug,	\
++		.strategy	= &sysctl_intvec,	\
++		.extra1		= &zero,		\
++		.extra2		= &zero,		\
++	}
++
++static ctl_table vserver_debug_table[] = {
++	CTL_ENTRY(CTL_DEBUG_SWITCH,	debug_switch),
++	CTL_ENTRY(CTL_DEBUG_XID,	debug_xid),
++	CTL_ENTRY(CTL_DEBUG_NID,	debug_nid),
++	CTL_ENTRY(CTL_DEBUG_TAG,	debug_tag),
++	CTL_ENTRY(CTL_DEBUG_NET,	debug_net),
++	CTL_ENTRY(CTL_DEBUG_LIMIT,	debug_limit),
++	CTL_ENTRY(CTL_DEBUG_CRES,	debug_cres),
++	CTL_ENTRY(CTL_DEBUG_DLIM,	debug_dlim),
++	CTL_ENTRY(CTL_DEBUG_QUOTA,	debug_quota),
++	CTL_ENTRY(CTL_DEBUG_CVIRT,	debug_cvirt),
++	CTL_ENTRY(CTL_DEBUG_SPACE,	debug_space),
++	CTL_ENTRY(CTL_DEBUG_MISC,	debug_misc),
++	{ .ctl_name = 0 }
++};
++
++static ctl_table vserver_root_table[] = {
++	{
++		.ctl_name	= CTL_VSERVER,
++		.procname	= "vserver",
++		.mode		= 0555,
++		.child		= vserver_debug_table
++	},
++	{ .ctl_name = 0 }
++};
++
++
++static match_table_t tokens = {
++	{ CTL_DEBUG_SWITCH,	"switch=%x"	},
++	{ CTL_DEBUG_XID,	"xid=%x"	},
++	{ CTL_DEBUG_NID,	"nid=%x"	},
++	{ CTL_DEBUG_TAG,	"tag=%x"	},
++	{ CTL_DEBUG_NET,	"net=%x"	},
++	{ CTL_DEBUG_LIMIT,	"limit=%x"	},
++	{ CTL_DEBUG_CRES,	"cres=%x"	},
++	{ CTL_DEBUG_DLIM,	"dlim=%x"	},
++	{ CTL_DEBUG_QUOTA,	"quota=%x"	},
++	{ CTL_DEBUG_CVIRT,	"cvirt=%x"	},
++	{ CTL_DEBUG_SPACE,	"space=%x"	},
++	{ CTL_DEBUG_MISC,	"misc=%x"	},
++	{ CTL_DEBUG_ERROR,	NULL		}
++};
++
++#define	HANDLE_CASE(id, name, val)				\
++	case CTL_DEBUG_ ## id:					\
++		vx_debug_ ## name = val;			\
++		printk("vs_debug_" #name "=0x%x\n", val);	\
++		break
++
++
++static int __init vs_debug_setup(char *str)
++{
++	char *p;
++	int token;
++
++	printk("vs_debug_setup(%s)\n", str);
++	while ((p = strsep(&str, ",")) != NULL) {
++		substring_t args[MAX_OPT_ARGS];
++		unsigned int value;
++
++		if (!*p)
++			continue;
++
++		token = match_token(p, tokens, args);
++		value = (token > 0) ? simple_strtoul(args[0].from, NULL, 0) : 0;
++
++		switch (token) {
++		HANDLE_CASE(SWITCH, switch, value);
++		HANDLE_CASE(XID,    xid,    value);
++		HANDLE_CASE(NID,    nid,    value);
++		HANDLE_CASE(TAG,    tag,    value);
++		HANDLE_CASE(NET,    net,    value);
++		HANDLE_CASE(LIMIT,  limit,  value);
++		HANDLE_CASE(CRES,   cres,   value);
++		HANDLE_CASE(DLIM,   dlim,   value);
++		HANDLE_CASE(QUOTA,  quota,  value);
++		HANDLE_CASE(CVIRT,  cvirt,  value);
++		HANDLE_CASE(SPACE,  space,  value);
++		HANDLE_CASE(MISC,   misc,   value);
++		default:
++			return -EINVAL;
++			break;
++		}
++	}
++	return 1;
++}
++
++__setup("vsdebug=", vs_debug_setup);
++
++
++
++EXPORT_SYMBOL_GPL(vx_debug_switch);
++EXPORT_SYMBOL_GPL(vx_debug_xid);
++EXPORT_SYMBOL_GPL(vx_debug_nid);
++EXPORT_SYMBOL_GPL(vx_debug_net);
++EXPORT_SYMBOL_GPL(vx_debug_limit);
++EXPORT_SYMBOL_GPL(vx_debug_cres);
++EXPORT_SYMBOL_GPL(vx_debug_dlim);
++EXPORT_SYMBOL_GPL(vx_debug_quota);
++EXPORT_SYMBOL_GPL(vx_debug_cvirt);
++EXPORT_SYMBOL_GPL(vx_debug_space);
++EXPORT_SYMBOL_GPL(vx_debug_misc);
++
+--- a/kernel/vserver/tag.c	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/tag.c	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,63 @@
++/*
++ *  linux/kernel/vserver/tag.c
++ *
++ *  Virtual Server: Shallow Tag Space
++ *
++ *  Copyright (C) 2007  Herbert Pötzl
++ *
++ *  V0.01  basic implementation
++ *
++ */
++
++#include <linux/sched.h>
++#include <linux/vserver/debug.h>
++#include <linux/vs_pid.h>
++#include <linux/vs_tag.h>
++
++#include <linux/vserver/tag_cmd.h>
++
++
++int dx_migrate_task(struct task_struct *p, tag_t tag)
++{
++	if (!p)
++		BUG();
++
++	vxdprintk(VXD_CBIT(tag, 5),
++		"dx_migrate_task(%p[#%d],#%d)", p, p->tag, tag);
++
++	task_lock(p);
++	p->tag = tag;
++	task_unlock(p);
++
++	vxdprintk(VXD_CBIT(tag, 5),
++		"moved task %p into [#%d]", p, tag);
++	return 0;
++}
++
++/* vserver syscall commands below here */
++
++/* taks xid and vx_info functions */
++
++
++int vc_task_tag(uint32_t id)
++{
++	tag_t tag;
++
++	if (id) {
++		struct task_struct *tsk;
++		read_lock(&tasklist_lock);
++		tsk = find_task_by_real_pid(id);
++		tag = (tsk) ? tsk->tag : -ESRCH;
++		read_unlock(&tasklist_lock);
++	} else
++		tag = dx_current_tag();
++	return tag;
++}
++
++
++int vc_tag_migrate(uint32_t tag)
++{
++	return dx_migrate_task(current, tag & 0xFFFF);
++}
++
++
+--- a/kernel/vserver/vci_config.h	1970-01-01 01:00:00.000000000 +0100
++++ a/kernel/vserver/vci_config.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,81 @@
++
++/*  interface version */
++
++#define VCI_VERSION		0x00020305
++
++
++enum {
++	VCI_KCBIT_NO_DYNAMIC = 0,
++
++	VCI_KCBIT_PROC_SECURE = 4,
++	VCI_KCBIT_HARDCPU = 5,
++	VCI_KCBIT_IDLELIMIT = 6,
++	VCI_KCBIT_IDLETIME = 7,
++
++	VCI_KCBIT_COWBL = 8,
++	VCI_KCBIT_FULLCOWBL = 9,
++	VCI_KCBIT_SPACES = 10,
++	VCI_KCBIT_NETV2 = 11,
++
++	VCI_KCBIT_DEBUG = 16,
++	VCI_KCBIT_HISTORY = 20,
++	VCI_KCBIT_TAGGED = 24,
++	VCI_KCBIT_PPTAG = 28,
++
++	VCI_KCBIT_MORE = 31,
++};
++
++
++static inline uint32_t vci_kernel_config(void)
++{
++	return
++	(1 << VCI_KCBIT_NO_DYNAMIC) |
++
++	/* configured features */
++#ifdef	CONFIG_VSERVER_PROC_SECURE
++	(1 << VCI_KCBIT_PROC_SECURE) |
++#endif
++#ifdef	CONFIG_VSERVER_HARDCPU
++	(1 << VCI_KCBIT_HARDCPU) |
++#endif
++#ifdef	CONFIG_VSERVER_IDLELIMIT
++	(1 << VCI_KCBIT_IDLELIMIT) |
++#endif
++#ifdef	CONFIG_VSERVER_IDLETIME
++	(1 << VCI_KCBIT_IDLETIME) |
++#endif
++#ifdef	CONFIG_VSERVER_COWBL
++	(1 << VCI_KCBIT_COWBL) |
++	(1 << VCI_KCBIT_FULLCOWBL) |
++#endif
++	(1 << VCI_KCBIT_SPACES) |
++	(1 << VCI_KCBIT_NETV2) |
++
++	/* debug options */
++#ifdef	CONFIG_VSERVER_DEBUG
++	(1 << VCI_KCBIT_DEBUG) |
++#endif
++#ifdef	CONFIG_VSERVER_HISTORY
++	(1 << VCI_KCBIT_HISTORY) |
++#endif
++
++	/* inode context tagging */
++#if	defined(CONFIG_TAGGING_NONE)
++	(0 << VCI_KCBIT_TAGGED) |
++#elif	defined(CONFIG_TAGGING_UID16)
++	(1 << VCI_KCBIT_TAGGED) |
++#elif	defined(CONFIG_TAGGING_GID16)
++	(2 << VCI_KCBIT_TAGGED) |
++#elif	defined(CONFIG_TAGGING_ID24)
++	(3 << VCI_KCBIT_TAGGED) |
++#elif	defined(CONFIG_TAGGING_INTERN)
++	(4 << VCI_KCBIT_TAGGED) |
++#elif	defined(CONFIG_TAGGING_RUNTIME)
++	(5 << VCI_KCBIT_TAGGED) |
++#else
++	(7 << VCI_KCBIT_TAGGED) |
++#endif
++	(1 << VCI_KCBIT_PPTAG) |
++	0;
++}
++
+--- a/mm/allocpercpu.c	2009-12-03 20:02:58.000000000 +0100
++++ a/mm/allocpercpu.c	2011-06-10 13:03:02.000000000 +0200
+@@ -160,12 +160,14 @@ EXPORT_SYMBOL(__per_cpu_offset);
+ 
+ void __init setup_per_cpu_areas(void)
+ {
+-	unsigned long size, i;
++	unsigned long size, vspc, i;
+ 	char *ptr;
+ 	unsigned long nr_possible_cpus = num_possible_cpus();
+ 
++	vspc = PERCPU_PERCTX * CONFIG_VSERVER_CONTEXTS;
++
+ 	/* Copy section for each CPU (we discard the original) */
+-	size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
++	size = ALIGN(PERCPU_ENOUGH_ROOM + vspc, PAGE_SIZE);
+ 	ptr = alloc_bootmem_pages(size * nr_possible_cpus);
+ 
+ 	for_each_possible_cpu(i) {
+--- a/mm/filemap_xip.c	2009-12-03 20:02:58.000000000 +0100
++++ a/mm/filemap_xip.c	2011-06-10 13:03:02.000000000 +0200
+@@ -17,6 +17,7 @@
+ #include <linux/sched.h>
+ #include <linux/seqlock.h>
+ #include <linux/mutex.h>
++#include <linux/vs_memory.h>
+ #include <asm/tlbflush.h>
+ #include <asm/io.h>
+ 
+--- a/mm/fremap.c	2009-03-24 14:22:45.000000000 +0100
++++ a/mm/fremap.c	2011-06-10 13:03:02.000000000 +0200
+@@ -16,6 +16,7 @@
+ #include <linux/module.h>
+ #include <linux/syscalls.h>
+ #include <linux/mmu_notifier.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/mmu_context.h>
+ #include <asm/cacheflush.h>
+--- a/mm/hugetlb.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/hugetlb.c	2011-06-10 13:03:02.000000000 +0200
+@@ -24,6 +24,7 @@
+ #include <asm/io.h>
+ 
+ #include <linux/hugetlb.h>
++#include <linux/vs_memory.h>
+ #include "internal.h"
+ 
+ const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
+--- a/mm/memcontrol.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/memcontrol.c	2011-06-10 13:03:02.000000000 +0200
+@@ -549,6 +549,31 @@ struct mem_cgroup *mem_cgroup_from_task(
+ 				struct mem_cgroup, css);
+ }
+ 
++u64 mem_cgroup_res_read_u64(struct mem_cgroup *mem, int member)
++{
++	return res_counter_read_u64(&mem->res, member);
++}
++
++u64 mem_cgroup_memsw_read_u64(struct mem_cgroup *mem, int member)
++{
++	return res_counter_read_u64(&mem->memsw, member);
++}
++
++s64 mem_cgroup_stat_read_cache(struct mem_cgroup *mem)
++{
++	return mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
++}
++
++s64 mem_cgroup_stat_read_anon(struct mem_cgroup *mem)
++{
++	return mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
++}
++
++s64 mem_cgroup_stat_read_mapped(struct mem_cgroup *mem)
++{
++	return mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
++}
++
+ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
+ {
+ 	struct mem_cgroup *mem = NULL;
+--- a/mm/memory.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/memory.c	2011-06-10 13:03:02.000000000 +0200
+@@ -56,6 +56,7 @@
+ #include <linux/kallsyms.h>
+ #include <linux/swapops.h>
+ #include <linux/elf.h>
++// #include <linux/vs_memory.h>
+ 
+ #include <asm/io.h>
+ #include <asm/pgalloc.h>
+@@ -647,6 +648,9 @@ static int copy_pte_range(struct mm_stru
+ 	int progress = 0;
+ 	int rss[2];
+ 
++	if (!vx_rss_avail(dst_mm, ((end - addr)/PAGE_SIZE + 1)))
++		return -ENOMEM;
++
+ again:
+ 	rss[1] = rss[0] = 0;
+ 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
+@@ -2678,6 +2682,9 @@ static int do_anonymous_page(struct mm_s
+ 
+ 	pte_unmap(page_table);
+ 
++	if (!vx_rss_avail(mm, 1))
++		goto oom;
++
+ 	/* Check if we need to add a guard page to the stack */
+ 	if (check_stack_guard_page(vma, address) < 0)
+ 		return VM_FAULT_SIGBUS;
+@@ -2984,6 +2991,7 @@ static inline int handle_pte_fault(struc
+ {
+ 	pte_t entry;
+ 	spinlock_t *ptl;
++	int ret = 0, type = VXPT_UNKNOWN;
+ 
+ 	entry = *pte;
+ 	if (!pte_present(entry)) {
+@@ -3008,9 +3016,12 @@ static inline int handle_pte_fault(struc
+ 	if (unlikely(!pte_same(*pte, entry)))
+ 		goto unlock;
+ 	if (flags & FAULT_FLAG_WRITE) {
+-		if (!pte_write(entry))
+-			return do_wp_page(mm, vma, address,
++		if (!pte_write(entry)) {
++			ret = do_wp_page(mm, vma, address,
+ 					pte, pmd, ptl, entry);
++			type = VXPT_WRITE;
++			goto out;
++		}
+ 		entry = pte_mkdirty(entry);
+ 	}
+ 	entry = pte_mkyoung(entry);
+@@ -3028,7 +3039,10 @@ static inline int handle_pte_fault(struc
+ 	}
+ unlock:
+ 	pte_unmap_unlock(pte, ptl);
+-	return 0;
++	ret = 0;
++out:
++	vx_page_fault(mm, vma, type, ret);
++	return ret;
+ }
+ 
+ /*
+--- a/mm/mlock.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/mlock.c	2011-06-10 13:03:02.000000000 +0200
+@@ -18,6 +18,7 @@
+ #include <linux/rmap.h>
+ #include <linux/mmzone.h>
+ #include <linux/hugetlb.h>
++#include <linux/vs_memory.h>
+ 
+ #include "internal.h"
+ 
+@@ -414,7 +415,7 @@ success:
+ 	nr_pages = (end - start) >> PAGE_SHIFT;
+ 	if (!lock)
+ 		nr_pages = -nr_pages;
+-	mm->locked_vm += nr_pages;
++	vx_vmlocked_add(mm, nr_pages);
+ 
+ 	/*
+ 	 * vm_flags is protected by the mmap_sem held in write mode.
+@@ -487,7 +488,7 @@ static int do_mlock(unsigned long start,
+ 
+ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
+ {
+-	unsigned long locked;
++	unsigned long locked, grow;
+ 	unsigned long lock_limit;
+ 	int error = -ENOMEM;
+ 
+@@ -500,8 +501,10 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
+ 	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
+ 	start &= PAGE_MASK;
+ 
+-	locked = len >> PAGE_SHIFT;
+-	locked += current->mm->locked_vm;
++	grow = len >> PAGE_SHIFT;
++	if (!vx_vmlocked_avail(current->mm, grow))
++		goto out;
++	locked = current->mm->locked_vm + grow;
+ 
+ 	lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ 	lock_limit >>= PAGE_SHIFT;
+@@ -509,6 +512,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
+ 	/* check against resource limits */
+ 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
+ 		error = do_mlock(start, len, 1);
++out:
+ 	up_write(&current->mm->mmap_sem);
+ 	return error;
+ }
+@@ -570,6 +574,8 @@ SYSCALL_DEFINE1(mlockall, int, flags)
+ 	lock_limit >>= PAGE_SHIFT;
+ 
+ 	ret = -ENOMEM;
++	if (!vx_vmlocked_avail(current->mm, current->mm->total_vm))
++		goto out;
+ 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
+ 	    capable(CAP_IPC_LOCK))
+ 		ret = do_mlockall(flags);
+@@ -644,8 +650,10 @@ int account_locked_memory(struct mm_stru
+ 	if (lim < vm)
+ 		goto out;
+ 
+-	mm->total_vm  += pgsz;
+-	mm->locked_vm += pgsz;
++	// mm->total_vm  += pgsz;
++	vx_vmpages_add(mm, pgsz);
++	// mm->locked_vm += pgsz;
++	vx_vmlocked_add(mm, pgsz);
+ 
+ 	error = 0;
+  out:
+@@ -659,8 +667,10 @@ void refund_locked_memory(struct mm_stru
+ 
+ 	down_write(&mm->mmap_sem);
+ 
+-	mm->total_vm  -= pgsz;
+-	mm->locked_vm -= pgsz;
++	// mm->total_vm  -= pgsz;
++	vx_vmpages_sub(mm, pgsz);
++	// mm->locked_vm -= pgsz;
++	vx_vmlocked_sub(mm, pgsz);
+ 
+ 	up_write(&mm->mmap_sem);
+ }
+--- a/mm/mmap.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/mmap.c	2011-06-10 13:03:02.000000000 +0200
+@@ -1224,7 +1224,8 @@ munmap_back:
+ out:
+ 	perf_event_mmap(vma);
+ 
+-	mm->total_vm += len >> PAGE_SHIFT;
++	// mm->total_vm += len >> PAGE_SHIFT;
++	vx_vmpages_add(mm, len >> PAGE_SHIFT);
+ 	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
+ 	if (vm_flags & VM_LOCKED) {
+ 		/*
+@@ -1233,7 +1234,8 @@ out:
+ 		long nr_pages = mlock_vma_pages_range(vma, addr, addr + len);
+ 		if (nr_pages < 0)
+ 			return nr_pages;	/* vma gone! */
+-		mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
++		// mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages;
++		vx_vmlocked_add(mm, (len >> PAGE_SHIFT) - nr_pages);
+ 	} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
+ 		make_pages_present(addr, addr + len);
+ 	return addr;
+@@ -1588,9 +1590,9 @@ static int acct_stack_growth(struct vm_a
+ 		return -ENOMEM;
+ 
+ 	/* Ok, everything looks good - let it rip */
+-	mm->total_vm += grow;
++	vx_vmpages_add(mm, grow);
+ 	if (vma->vm_flags & VM_LOCKED)
+-		mm->locked_vm += grow;
++		vx_vmlocked_add(mm, grow);
+ 	vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
+ 	return 0;
+ }
+@@ -1762,7 +1764,8 @@ static void remove_vma_list(struct mm_st
+ 	do {
+ 		long nrpages = vma_pages(vma);
+ 
+-		mm->total_vm -= nrpages;
++		// mm->total_vm -= nrpages;
++		vx_vmpages_sub(mm, nrpages);
+ 		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
+ 		vma = remove_vma(vma);
+ 	} while (vma);
+@@ -1937,7 +1940,8 @@ int do_munmap(struct mm_struct *mm, unsi
+ 		struct vm_area_struct *tmp = vma;
+ 		while (tmp && tmp->vm_start < end) {
+ 			if (tmp->vm_flags & VM_LOCKED) {
+-				mm->locked_vm -= vma_pages(tmp);
++				// mm->locked_vm -= vma_pages(tmp);
++				vx_vmlocked_sub(mm, vma_pages(tmp));
+ 				munlock_vma_pages_all(tmp);
+ 			}
+ 			tmp = tmp->vm_next;
+@@ -2020,6 +2024,8 @@ unsigned long do_brk(unsigned long addr,
+ 		lock_limit >>= PAGE_SHIFT;
+ 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ 			return -EAGAIN;
++		if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT))
++			return -ENOMEM;
+ 	}
+ 
+ 	/*
+@@ -2046,7 +2052,8 @@ unsigned long do_brk(unsigned long addr,
+ 	if (mm->map_count > sysctl_max_map_count)
+ 		return -ENOMEM;
+ 
+-	if (security_vm_enough_memory(len >> PAGE_SHIFT))
++	if (security_vm_enough_memory(len >> PAGE_SHIFT) ||
++		!vx_vmpages_avail(mm, len >> PAGE_SHIFT))
+ 		return -ENOMEM;
+ 
+ 	/* Can we just expand an old private anonymous mapping? */
+@@ -2072,10 +2079,13 @@ unsigned long do_brk(unsigned long addr,
+ 	vma->vm_page_prot = vm_get_page_prot(flags);
+ 	vma_link(mm, vma, prev, rb_link, rb_parent);
+ out:
+-	mm->total_vm += len >> PAGE_SHIFT;
++	// mm->total_vm += len >> PAGE_SHIFT;
++	vx_vmpages_add(mm, len >> PAGE_SHIFT);
++
+ 	if (flags & VM_LOCKED) {
+ 		if (!mlock_vma_pages_range(vma, addr, addr + len))
+-			mm->locked_vm += (len >> PAGE_SHIFT);
++			// mm->locked_vm += (len >> PAGE_SHIFT);
++			vx_vmlocked_add(mm, len >> PAGE_SHIFT);
+ 	}
+ 	return addr;
+ }
+@@ -2119,6 +2129,11 @@ void exit_mmap(struct mm_struct *mm)
+ 	free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
+ 	tlb_finish_mmu(tlb, 0, end);
+ 
++	set_mm_counter(mm, file_rss, 0);
++	set_mm_counter(mm, anon_rss, 0);
++	vx_vmpages_sub(mm, mm->total_vm);
++	vx_vmlocked_sub(mm, mm->locked_vm);
++
+ 	/*
+ 	 * Walk the list again, actually closing and freeing it,
+ 	 * with preemption enabled, without holding any MM locks.
+@@ -2158,7 +2173,8 @@ int insert_vm_struct(struct mm_struct * 
+ 	if (__vma && __vma->vm_start < vma->vm_end)
+ 		return -ENOMEM;
+ 	if ((vma->vm_flags & VM_ACCOUNT) &&
+-	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
++		(security_vm_enough_memory_mm(mm, vma_pages(vma)) ||
++		!vx_vmpages_avail(mm, vma_pages(vma))))
+ 		return -ENOMEM;
+ 	vma_link(mm, vma, prev, rb_link, rb_parent);
+ 	return 0;
+@@ -2234,6 +2250,8 @@ int may_expand_vm(struct mm_struct *mm, 
+ 
+ 	if (cur + npages > lim)
+ 		return 0;
++	if (!vx_vmpages_avail(mm, npages))
++		return 0;
+ 	return 1;
+ }
+ 
+@@ -2315,7 +2333,7 @@ int install_special_mapping(struct mm_st
+ 	if (ret)
+ 		goto out;
+ 
+-	mm->total_vm += len >> PAGE_SHIFT;
++	vx_vmpages_add(mm, len >> PAGE_SHIFT);
+ 
+ 	perf_event_mmap(vma);
+ 
+--- a/mm/mremap.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/mremap.c	2011-06-10 13:03:02.000000000 +0200
+@@ -20,6 +20,7 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/mmu_notifier.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
+@@ -232,7 +233,7 @@ static unsigned long move_vma(struct vm_
+ 	 * If this were a serious issue, we'd add a flag to do_munmap().
+ 	 */
+ 	hiwater_vm = mm->hiwater_vm;
+-	mm->total_vm += new_len >> PAGE_SHIFT;
++	vx_vmpages_add(mm, new_len >> PAGE_SHIFT);
+ 	vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
+ 
+ 	if (do_munmap(mm, old_addr, old_len) < 0) {
+@@ -250,7 +251,7 @@ static unsigned long move_vma(struct vm_
+ 	}
+ 
+ 	if (vm_flags & VM_LOCKED) {
+-		mm->locked_vm += new_len >> PAGE_SHIFT;
++		vx_vmlocked_add(mm, new_len >> PAGE_SHIFT);
+ 		if (new_len > old_len)
+ 			mlock_vma_pages_range(new_vma, new_addr + old_len,
+ 						       new_addr + new_len);
+@@ -468,10 +469,12 @@ unsigned long do_mremap(unsigned long ad
+ 			vma_adjust(vma, vma->vm_start,
+ 				addr + new_len, vma->vm_pgoff, NULL);
+ 
+-			mm->total_vm += pages;
++			// mm->total_vm += pages;
++			vx_vmpages_add(mm, pages);
+ 			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
+ 			if (vma->vm_flags & VM_LOCKED) {
+-				mm->locked_vm += pages;
++				// mm->locked_vm += pages;
++				vx_vmlocked_add(mm, pages);
+ 				mlock_vma_pages_range(vma, addr + old_len,
+ 						   addr + new_len);
+ 			}
+--- a/mm/nommu.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/nommu.c	2011-06-10 13:03:02.000000000 +0200
+@@ -1349,7 +1349,7 @@ unsigned long do_mmap_pgoff(struct file 
+ 	/* okay... we have a mapping; now we have to register it */
+ 	result = vma->vm_start;
+ 
+-	current->mm->total_vm += len >> PAGE_SHIFT;
++	vx_vmpages_add(current->mm, len >> PAGE_SHIFT);
+ 
+ share:
+ 	add_vma_to_mm(current->mm, vma);
+@@ -1609,7 +1609,7 @@ void exit_mmap(struct mm_struct *mm)
+ 
+ 	kenter("");
+ 
+-	mm->total_vm = 0;
++	vx_vmpages_sub(mm, mm->total_vm);
+ 
+ 	while ((vma = mm->mmap)) {
+ 		mm->mmap = vma->vm_next;
+--- a/mm/oom_kill.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/oom_kill.c	2011-06-10 13:03:02.000000000 +0200
+@@ -27,6 +27,9 @@
+ #include <linux/notifier.h>
+ #include <linux/memcontrol.h>
+ #include <linux/security.h>
++#include <linux/reboot.h>
++#include <linux/vs_memory.h>
++#include <linux/vs_context.h>
+ 
+ int sysctl_panic_on_oom;
+ int sysctl_oom_kill_allocating_task;
+@@ -186,9 +189,21 @@ unsigned long badness(struct task_struct
+ 			points >>= -(oom_adj);
+ 	}
+ 
++	/*
++	 * add points for context badness and
++	 * reduce badness for processes belonging to
++	 * a different context
++	 */
++
++	points += vx_badness(p, mm);
++
++	if ((vx_current_xid() > 1) &&
++		vx_current_xid() != vx_task_xid(p))
++		points /= 16;
++
+ #ifdef DEBUG
+-	printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
+-	p->pid, p->comm, points);
++	printk(KERN_DEBUG "OOMkill: task %d:#%u (%s) got %d points\n",
++		task_pid_nr(p), p->xid, p->comm, points);
+ #endif
+ 	return points;
+ }
+@@ -230,6 +245,7 @@ static struct task_struct *select_bad_pr
+ 	struct task_struct *p;
+ 	struct task_struct *chosen = NULL;
+ 	struct timespec uptime;
++	unsigned xid = vx_current_xid();
+ 	*ppoints = 0;
+ 
+ 	do_posix_clock_monotonic_gettime(&uptime);
+@@ -242,11 +258,14 @@ static struct task_struct *select_bad_pr
+ 		 */
+ 		if (!p->mm)
+ 			continue;
+-		/* skip the init task */
+-		if (is_global_init(p))
++		/* skip the init task, global and per guest */
++		if (task_is_init(p))
+ 			continue;
+ 		if (mem && !task_in_mem_cgroup(p, mem))
+ 			continue;
++		/* skip other guest and host processes if oom in guest */
++		if (xid && vx_task_xid(p) != xid)
++			continue;
+ 
+ 		/*
+ 		 * This task already has access to memory reserves and is
+@@ -357,8 +376,8 @@ static void __oom_kill_task(struct task_
+ 	}
+ 
+ 	if (verbose)
+-		printk(KERN_ERR "Killed process %d (%s)\n",
+-				task_pid_nr(p), p->comm);
++		printk(KERN_ERR "Killed process %s(%d:#%u)\n",
++			p->comm, task_pid_nr(p), p->xid);
+ 
+ 	/*
+ 	 * We give our sacrificial lamb high priority and access to
+@@ -419,8 +438,8 @@ static int oom_kill_process(struct task_
+ 		return 0;
+ 	}
+ 
+-	printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
+-					message, task_pid_nr(p), p->comm, points);
++	printk(KERN_ERR "%s: kill process %s(%d:#%u) score %li or a child\n",
++		message, p->comm, task_pid_nr(p), p->xid, points);
+ 
+ 	/* Try to kill a child first */
+ 	list_for_each_entry(c, &p->children, sibling) {
+@@ -521,6 +540,8 @@ void clear_zonelist_oom(struct zonelist 
+ 	spin_unlock(&zone_scan_lock);
+ }
+ 
++long vs_oom_action(unsigned int);
++
+ /*
+  * Must be called with tasklist_lock held for read.
+  */
+@@ -546,7 +567,11 @@ retry:
+ 	/* Found nothing?!?! Either we hang forever, or we panic. */
+ 	if (!p) {
+ 		read_unlock(&tasklist_lock);
+-		panic("Out of memory and no killable processes...\n");
++		/* avoid panic for guest OOM */
++		if (current->xid)
++			vs_oom_action(LINUX_REBOOT_CMD_OOM);
++		else
++			panic("Out of memory and no killable processes...\n");
+ 	}
+ 
+ 	if (oom_kill_process(p, gfp_mask, order, points, NULL,
+--- a/mm/page_alloc.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/page_alloc.c	2011-06-10 13:03:02.000000000 +0200
+@@ -48,6 +48,8 @@
+ #include <linux/page_cgroup.h>
+ #include <linux/debugobjects.h>
+ #include <linux/kmemleak.h>
++#include <linux/vs_base.h>
++#include <linux/vs_limit.h>
+ #include <trace/events/kmem.h>
+ 
+ #include <asm/tlbflush.h>
+@@ -2144,6 +2146,9 @@ void si_meminfo(struct sysinfo *val)
+ 	val->totalhigh = totalhigh_pages;
+ 	val->freehigh = nr_free_highpages();
+ 	val->mem_unit = PAGE_SIZE;
++
++	if (vx_flags(VXF_VIRT_MEM, 0))
++		vx_vsi_meminfo(val);
+ }
+ 
+ EXPORT_SYMBOL(si_meminfo);
+@@ -2164,6 +2169,9 @@ void si_meminfo_node(struct sysinfo *val
+ 	val->freehigh = 0;
+ #endif
+ 	val->mem_unit = PAGE_SIZE;
++
++	if (vx_flags(VXF_VIRT_MEM, 0))
++		vx_vsi_meminfo(val);
+ }
+ #endif
+ 
+--- a/mm/rmap.c	2009-12-03 20:02:58.000000000 +0100
++++ a/mm/rmap.c	2011-06-10 13:03:02.000000000 +0200
+@@ -55,6 +55,7 @@
+ #include <linux/memcontrol.h>
+ #include <linux/mmu_notifier.h>
+ #include <linux/migrate.h>
++#include <linux/vs_memory.h>
+ 
+ #include <asm/tlbflush.h>
+ 
+--- a/mm/shmem.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/shmem.c	2011-06-10 13:03:02.000000000 +0200
+@@ -1781,7 +1781,7 @@ static int shmem_statfs(struct dentry *d
+ {
+ 	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
+ 
+-	buf->f_type = TMPFS_MAGIC;
++	buf->f_type = TMPFS_SUPER_MAGIC;
+ 	buf->f_bsize = PAGE_CACHE_SIZE;
+ 	buf->f_namelen = NAME_MAX;
+ 	spin_lock(&sbinfo->stat_lock);
+@@ -2346,7 +2346,7 @@ int shmem_fill_super(struct super_block 
+ 	sb->s_maxbytes = SHMEM_MAX_BYTES;
+ 	sb->s_blocksize = PAGE_CACHE_SIZE;
+ 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+-	sb->s_magic = TMPFS_MAGIC;
++	sb->s_magic = TMPFS_SUPER_MAGIC;
+ 	sb->s_op = &shmem_ops;
+ 	sb->s_time_gran = 1;
+ #ifdef CONFIG_TMPFS_POSIX_ACL
+--- a/mm/slab.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/slab.c	2011-06-10 13:03:02.000000000 +0200
+@@ -431,6 +431,8 @@ static void kmem_list3_init(struct kmem_
+ #define STATS_INC_FREEMISS(x)	do { } while (0)
+ #endif
+ 
++#include "slab_vs.h"
++
+ #if DEBUG
+ 
+ /*
+@@ -3251,6 +3253,7 @@ retry:
+ 
+ 	obj = slab_get_obj(cachep, slabp, nodeid);
+ 	check_slabp(cachep, slabp);
++	vx_slab_alloc(cachep, flags);
+ 	l3->free_objects--;
+ 	/* move slabp to correct slabp list: */
+ 	list_del(&slabp->list);
+@@ -3327,6 +3330,7 @@ __cache_alloc_node(struct kmem_cache *ca
+ 	/* ___cache_alloc_node can fall back to other nodes */
+ 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
+   out:
++	vx_slab_alloc(cachep, flags);
+ 	local_irq_restore(save_flags);
+ 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+ 	kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
+@@ -3513,6 +3517,7 @@ static inline void __cache_free(struct k
+ 	check_irq_off();
+ 	kmemleak_free_recursive(objp, cachep->flags);
+ 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
++	vx_slab_free(cachep);
+ 
+ 	kmemcheck_slab_free(cachep, objp, obj_size(cachep));
+ 
+--- a/mm/slab_vs.h	1970-01-01 01:00:00.000000000 +0100
++++ a/mm/slab_vs.h	2011-06-10 13:03:02.000000000 +0200
+@@ -0,0 +1,29 @@
++
++#include <linux/vserver/context.h>
++
++#include <linux/vs_context.h>
++
++static inline
++void vx_slab_alloc(struct kmem_cache *cachep, gfp_t flags)
++{
++	int what = gfp_zone(cachep->gfpflags);
++	struct vx_info *vxi = current_vx_info();
++
++	if (!vxi)
++		return;
++
++	atomic_add(cachep->buffer_size, &vxi->cacct.slab[what]);
++}
++
++static inline
++void vx_slab_free(struct kmem_cache *cachep)
++{
++	int what = gfp_zone(cachep->gfpflags);
++	struct vx_info *vxi = current_vx_info();
++
++	if (!vxi)
++		return;
++
++	atomic_sub(cachep->buffer_size, &vxi->cacct.slab[what]);
++}
++
+--- a/mm/swapfile.c	2011-05-29 23:42:29.000000000 +0200
++++ a/mm/swapfile.c	2011-06-10 13:03:02.000000000 +0200
+@@ -34,6 +34,8 @@
+ #include <asm/tlbflush.h>
+ #include <linux/swapops.h>
+ #include <linux/page_cgroup.h>
++#include <linux/vs_base.h>
++#include <linux/vs_memory.h>
+ 
+ static DEFINE_SPINLOCK(swap_lock);
+ static unsigned int nr_swapfiles;
+@@ -1682,6 +1684,8 @@ static void *swap_next(struct seq_file *
+ 	if (v == SEQ_START_TOKEN)
+ 		ptr = swap_info;
+ 	else {
++		if (vx_flags(VXF_VIRT_MEM, 0))
++			return NULL;
+ 		ptr = v;
+ 		ptr++;
+ 	}
+@@ -1709,6 +1713,16 @@ static int swap_show(struct seq_file *sw
+ 
+ 	if (ptr == SEQ_START_TOKEN) {
+ 		seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
++		if (vx_flags(VXF_VIRT_MEM, 0)) {
++			struct sysinfo si;
++
++			vx_vsi_swapinfo(&si);
++			if (si.totalswap < (1 << 10))
++				return 0;
++			seq_printf(swap, "%s\t\t\t\t\t%s\t%lu\t%lu\t%d\n",
++				"hdv0", "partition", si.totalswap >> 10,
++				(si.totalswap - si.freeswap) >> 10, -1);
++		}
+ 		return 0;
+ 	}
+ 
+@@ -2066,6 +2080,8 @@ void si_swapinfo(struct sysinfo *val)
+ 	val->freeswap = nr_swap_pages + nr_to_be_unused;
+ 	val->totalswap = total_swap_pages + nr_to_be_unused;
+ 	spin_unlock(&swap_lock);
++	if (vx_flags(VXF_VIRT_MEM, 0))
++		vx_vsi_swapinfo(val);
+ }
+ 
+ /*
+--- a/net/core/dev.c	2011-05-29 23:42:29.000000000 +0200
++++ a/net/core/dev.c	2011-06-10 13:03:02.000000000 +0200
+@@ -126,6 +126,7 @@
+ #include <linux/in.h>
+ #include <linux/jhash.h>
+ #include <linux/random.h>
++#include <linux/vs_inet.h>
+ #include <trace/events/napi.h>
+ 
+ #include "net-sysfs.h"
+@@ -591,7 +592,8 @@ struct net_device *__dev_get_by_name(str
+ 	hlist_for_each(p, dev_name_hash(net, name)) {
+ 		struct net_device *dev
+ 			= hlist_entry(p, struct net_device, name_hlist);
+-		if (!strncmp(dev->name, name, IFNAMSIZ))
++		if (!strncmp(dev->name, name, IFNAMSIZ) &&
++		    nx_dev_visible(current_nx_info(), dev))
+ 			return dev;
+ 	}
+ 	return NULL;
+@@ -642,7 +644,8 @@ struct net_device *__dev_get_by_index(st
+ 	hlist_for_each(p, dev_index_hash(net, ifindex)) {
+ 		struct net_device *dev
+ 			= hlist_entry(p, struct net_device, index_hlist);
+-		if (dev->ifindex == ifindex)
++		if ((dev->ifindex == ifindex) &&
++		    nx_dev_visible(current_nx_info(), dev))
+ 			return dev;
+ 	}
+ 	return NULL;
+@@ -695,10 +698,12 @@ struct net_device *dev_getbyhwaddr(struc
+ 
+ 	ASSERT_RTNL();
+ 
+-	for_each_netdev(net, dev)
++	for_each_netdev(net, dev) {
+ 		if (dev->type == type &&
+-		    !memcmp(dev->dev_addr, ha, dev->addr_len))
++		    !memcmp(dev->dev_addr, ha, dev->addr_len) &&
++		    nx_dev_visible(current_nx_info(), dev))
+ 			return dev;
++	}
+ 
+ 	return NULL;
+ }
+@@ -709,9 +714,11 @@ struct net_device *__dev_getfirstbyhwtyp
+ 	struct net_device *dev;
+ 
+ 	ASSERT_RTNL();
+-	for_each_netdev(net, dev)
+-		if (dev->type == type)
++	for_each_netdev(net, dev) {
++		if ((dev->type == type) &&
++		    nx_dev_visible(current_nx_info(), dev))
+ 			return dev;
++	}
+ 
+ 	return NULL;
+ }
+@@ -830,6 +837,8 @@ static int __dev_alloc_name(struct net *
+ 				continue;
+ 			if (i < 0 || i >= max_netdevices)
+ 				continue;
++			if (!nx_dev_visible(current_nx_info(), d))
++				continue;
+ 
+ 			/*  avoid cases where sscanf is not exact inverse of printf */
+ 			snprintf(buf, IFNAMSIZ, name, i);
+@@ -3002,6 +3011,8 @@ static int dev_ifconf(struct net *net, c
+ 
+ 	total = 0;
+ 	for_each_netdev(net, dev) {
++		if (!nx_dev_visible(current_nx_info(), dev))
++			continue;
+ 		for (i = 0; i < NPROTO; i++) {
+ 			if (gifconf_list[i]) {
+ 				int done;
+@@ -3070,6 +3081,9 @@ static void dev_seq_printf_stats(struct 
+ {
+ 	const struct net_device_stats *stats = dev_get_stats(dev);
+ 
++	if (!nx_dev_visible(current_nx_info(), dev))
++		return;
++
+ 	seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
+ 		   "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
+ 		   dev->name, stats->rx_bytes, stats->rx_packets,
+@@ -5335,7 +5349,6 @@ int dev_change_net_namespace(struct net_
+ 	if (dev->dev.parent)
+ 		goto out;
+ #endif
+-
+ 	/* Ensure the device has been registrered */
+ 	err = -EINVAL;
+ 	if (dev->reg_state != NETREG_REGISTERED)
+--- a/net/core/rtnetlink.c	2011-05-29 23:42:29.000000000 +0200
++++ a/net/core/rtnetlink.c	2011-06-10 13:03:02.000000000 +0200
+@@ -688,6 +688,8 @@ static int rtnl_dump_ifinfo(struct sk_bu
+ 
+ 	idx = 0;
+ 	for_each_netdev(net, dev) {
++		if (!nx_dev_visible(skb->sk->sk_nx_info, dev))
++			continue;
+ 		if (idx < s_idx)
+ 			goto cont;
+ 		if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+@@ -1222,6 +1224,9 @@ void rtmsg_ifinfo(int type, struct net_d
+ 	struct sk_buff *skb;
+ 	int err = -ENOBUFS;
+ 
++	if (!nx_dev_visible(current_nx_info(), dev))
++		return;
++
+ 	skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
+ 	if (skb == NULL)
+ 		goto errout;
+--- a/net/core/sock.c	2011-05-29 23:42:29.000000000 +0200
++++ a/net/core/sock.c	2011-06-10 13:03:02.000000000 +0200
+@@ -125,6 +125,10 @@
+ #include <linux/ipsec.h>
+ 
+ #include <linux/filter.h>
++#include <linux/vs_socket.h>
++#include <linux/vs_limit.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
+ 
+ #ifdef CONFIG_INET
+ #include <net/tcp.h>
+@@ -984,6 +988,8 @@ static struct sock *sk_prot_alloc(struct
+ 		if (!try_module_get(prot->owner))
+ 			goto out_free_sec;
+ 	}
++		sock_vx_init(sk);
++		sock_nx_init(sk);
+ 
+ 	return sk;
+ 
+@@ -1063,6 +1069,11 @@ static void __sk_free(struct sock *sk)
+ 		       __func__, atomic_read(&sk->sk_omem_alloc));
+ 
+ 	put_net(sock_net(sk));
++	vx_sock_dec(sk);
++	clr_vx_info(&sk->sk_vx_info);
++	sk->sk_xid = -1;
++	clr_nx_info(&sk->sk_nx_info);
++	sk->sk_nid = -1;
+ 	sk_prot_free(sk->sk_prot_creator, sk);
+ }
+ 
+@@ -1110,6 +1121,8 @@ struct sock *sk_clone(const struct sock 
+ 
+ 		/* SANITY */
+ 		get_net(sock_net(newsk));
++		sock_vx_init(newsk);
++		sock_nx_init(newsk);
+ 		sk_node_init(&newsk->sk_node);
+ 		sock_lock_init(newsk);
+ 		bh_lock_sock(newsk);
+@@ -1164,6 +1177,12 @@ struct sock *sk_clone(const struct sock 
+ 		smp_wmb();
+ 		atomic_set(&newsk->sk_refcnt, 2);
+ 
++		set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
++		newsk->sk_xid = sk->sk_xid;
++		vx_sock_inc(newsk);
++		set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
++		newsk->sk_nid = sk->sk_nid;
++
+ 		/*
+ 		 * Increment the counter in the same struct proto as the master
+ 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
+@@ -1886,6 +1905,12 @@ void sock_init_data(struct socket *sock,
+ 
+ 	sk->sk_stamp = ktime_set(-1L, 0);
+ 
++	set_vx_info(&sk->sk_vx_info, current_vx_info());
++	sk->sk_xid = vx_current_xid();
++	vx_sock_inc(sk);
++	set_nx_info(&sk->sk_nx_info, current_nx_info());
++	sk->sk_nid = nx_current_nid();
++
+ 	/*
+ 	 * Before updating sk_refcnt, we must commit prior changes to memory
+ 	 * (Documentation/RCU/rculist_nulls.txt for details)
+--- a/net/ipv4/af_inet.c	2009-12-03 20:02:59.000000000 +0100
++++ a/net/ipv4/af_inet.c	2011-06-10 13:03:02.000000000 +0200
+@@ -115,6 +115,7 @@
+ #ifdef CONFIG_IP_MROUTE
+ #include <linux/mroute.h>
+ #endif
++#include <linux/vs_limit.h>
+ 
+ 
+ /* The inetsw table contains everything that inet_create needs to
+@@ -325,9 +326,12 @@ lookup_protocol:
+ 	}
+ 
+ 	err = -EPERM;
++	if ((protocol == IPPROTO_ICMP) &&
++		nx_capable(answer->capability, NXC_RAW_ICMP))
++		goto override;
+ 	if (answer->capability > 0 && !capable(answer->capability))
+ 		goto out_rcu_unlock;
+-
++override:
+ 	err = -EAFNOSUPPORT;
+ 	if (!inet_netns_ok(net, protocol))
+ 		goto out_rcu_unlock;
+@@ -447,6 +451,7 @@ int inet_bind(struct socket *sock, struc
+ 	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
+ 	struct sock *sk = sock->sk;
+ 	struct inet_sock *inet = inet_sk(sk);
++	struct nx_v4_sock_addr nsa;
+ 	unsigned short snum;
+ 	int chk_addr_ret;
+ 	int err;
+@@ -460,7 +465,11 @@ int inet_bind(struct socket *sock, struc
+ 	if (addr_len < sizeof(struct sockaddr_in))
+ 		goto out;
+ 
+-	chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
++	err = v4_map_sock_addr(inet, addr, &nsa);
++	if (err)
++		goto out;
++
++	chk_addr_ret = inet_addr_type(sock_net(sk), nsa.saddr);
+ 
+ 	/* Not specified by any standard per-se, however it breaks too
+ 	 * many applications when removed.  It is unfortunate since
+@@ -472,7 +481,7 @@ int inet_bind(struct socket *sock, struc
+ 	err = -EADDRNOTAVAIL;
+ 	if (!sysctl_ip_nonlocal_bind &&
+ 	    !(inet->freebind || inet->transparent) &&
+-	    addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
++	    nsa.saddr != htonl(INADDR_ANY) &&
+ 	    chk_addr_ret != RTN_LOCAL &&
+ 	    chk_addr_ret != RTN_MULTICAST &&
+ 	    chk_addr_ret != RTN_BROADCAST)
+@@ -497,7 +506,7 @@ int inet_bind(struct socket *sock, struc
+ 	if (sk->sk_state != TCP_CLOSE || inet->num)
+ 		goto out_release_sock;
+ 
+-	inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
++	v4_set_sock_addr(inet, &nsa);
+ 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
+ 		inet->saddr = 0;  /* Use device */
+ 
+@@ -694,11 +703,13 @@ int inet_getname(struct socket *sock, st
+ 		     peer == 1))
+ 			return -ENOTCONN;
+ 		sin->sin_port = inet->dport;
+-		sin->sin_addr.s_addr = inet->daddr;
++		sin->sin_addr.s_addr =
++			nx_map_sock_lback(sk->sk_nx_info, inet->daddr);
+ 	} else {
+ 		__be32 addr = inet->rcv_saddr;
+ 		if (!addr)
+ 			addr = inet->saddr;
++		addr = nx_map_sock_lback(sk->sk_nx_info, addr);
+ 		sin->sin_port = inet->sport;
+ 		sin->sin_addr.s_addr = addr;
+ 	}
+--- a/net/ipv4/devinet.c	2011-05-29 23:42:29.000000000 +0200
++++ a/net/ipv4/devinet.c	2011-06-10 13:03:02.000000000 +0200
+@@ -413,6 +413,7 @@ struct in_device *inetdev_by_index(struc
+ 	return in_dev;
+ }
+ 
++
+ /* Called only from RTNL semaphored context. No locks. */
+ 
+ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
+@@ -653,6 +654,8 @@ int devinet_ioctl(struct net *net, unsig
+ 		*colon = ':';
+ 
+ 	if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
++		struct nx_info *nxi = current_nx_info();
++
+ 		if (tryaddrmatch) {
+ 			/* Matthias Andree */
+ 			/* compare label and address (4.4BSD style) */
+@@ -661,6 +664,8 @@ int devinet_ioctl(struct net *net, unsig
+ 			   This is checked above. */
+ 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+ 			     ifap = &ifa->ifa_next) {
++				if (!nx_v4_ifa_visible(nxi, ifa))
++					continue;
+ 				if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
+ 				    sin_orig.sin_addr.s_addr ==
+ 							ifa->ifa_address) {
+@@ -673,9 +678,12 @@ int devinet_ioctl(struct net *net, unsig
+ 		   comparing just the label */
+ 		if (!ifa) {
+ 			for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+-			     ifap = &ifa->ifa_next)
++			     ifap = &ifa->ifa_next) {
++				if (!nx_v4_ifa_visible(nxi, ifa))
++					continue;
+ 				if (!strcmp(ifr.ifr_name, ifa->ifa_label))
+ 					break;
++			}
+ 		}
+ 	}
+ 
+@@ -826,6 +834,8 @@ static int inet_gifconf(struct net_devic
+ 		goto out;
+ 
+ 	for (; ifa; ifa = ifa->ifa_next) {
++		if (!nx_v4_ifa_visible(current_nx_info(), ifa))
++			continue;
+ 		if (!buf) {
+ 			done += sizeof(ifr);
+ 			continue;
+@@ -1185,6 +1195,7 @@ static int inet_dump_ifaddr(struct sk_bu
+ 	struct net_device *dev;
+ 	struct in_device *in_dev;
+ 	struct in_ifaddr *ifa;
++	struct sock *sk = skb->sk;
+ 	int s_ip_idx, s_idx = cb->args[0];
+ 
+ 	s_ip_idx = ip_idx = cb->args[1];
+@@ -1199,6 +1210,8 @@ static int inet_dump_ifaddr(struct sk_bu
+ 
+ 		for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
+ 		     ifa = ifa->ifa_next, ip_idx++) {
++			if (sk && !nx_v4_ifa_visible(sk->sk_nx_info, ifa))
++				continue;
+ 			if (ip_idx < s_ip_idx)
+ 				continue;
+ 			if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
+--- a/net/ipv4/fib_hash.c	2009-09-10 15:26:29.000000000 +0200
++++ a/net/ipv4/fib_hash.c	2011-06-10 13:03:02.000000000 +0200
+@@ -1021,7 +1021,7 @@ static int fib_seq_show(struct seq_file 
+ 	prefix	= f->fn_key;
+ 	mask	= FZ_MASK(iter->zone);
+ 	flags	= fib_flag_trans(fa->fa_type, mask, fi);
+-	if (fi)
++	if (fi && nx_dev_visible(current_nx_info(), fi->fib_dev))
+ 		seq_printf(seq,
+ 			 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
+ 			 fi->fib_dev ? fi->fib_dev->name : "*", prefix,
+--- a/net/ipv4/inet_connection_sock.c	2009-12-03 20:02:59.000000000 +0100
++++ a/net/ipv4/inet_connection_sock.c	2011-06-10 13:03:02.000000000 +0200
+@@ -49,10 +49,40 @@ void inet_get_local_port_range(int *low,
+ }
+ EXPORT_SYMBOL(inet_get_local_port_range);
+ 
++int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
++{
++	__be32	sk1_rcv_saddr = inet_rcv_saddr(sk1),
++		sk2_rcv_saddr = inet_rcv_saddr(sk2);
++
++	if (inet_v6_ipv6only(sk2))
++		return 0;
++
++	if (sk1_rcv_saddr &&
++	    sk2_rcv_saddr &&
++	    sk1_rcv_saddr == sk2_rcv_saddr)
++		return 1;
++
++	if (sk1_rcv_saddr &&
++	    !sk2_rcv_saddr &&
++	    v4_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr, NXA_MASK_BIND))
++		return 1;
++
++	if (sk2_rcv_saddr &&
++	    !sk1_rcv_saddr &&
++	    v4_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr, NXA_MASK_BIND))
++		return 1;
++
++	if (!sk1_rcv_saddr &&
++	    !sk2_rcv_saddr &&
++	    nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info))
++		return 1;
++
++	return 0;
++}
++
+ int inet_csk_bind_conflict(const struct sock *sk,
+ 			   const struct inet_bind_bucket *tb)
+ {
+-	const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
+ 	struct sock *sk2;
+ 	struct hlist_node *node;
+ 	int reuse = sk->sk_reuse;
+@@ -72,9 +102,7 @@ int inet_csk_bind_conflict(const struct 
+ 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
+ 			if (!reuse || !sk2->sk_reuse ||
+ 			    sk2->sk_state == TCP_LISTEN) {
+-				const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
+-				if (!sk2_rcv_saddr || !sk_rcv_saddr ||
+-				    sk2_rcv_saddr == sk_rcv_saddr)
++				if (ipv4_rcv_saddr_equal(sk, sk2))
+ 					break;
+ 			}
+ 		}
+--- a/net/ipv4/inet_diag.c	2011-05-29 23:42:29.000000000 +0200
++++ a/net/ipv4/inet_diag.c	2011-06-10 13:03:02.000000000 +0200
+@@ -32,6 +32,8 @@
+ #include <linux/stddef.h>
+ 
+ #include <linux/inet_diag.h>
++#include <linux/vs_network.h>
++#include <linux/vs_inet.h>
+ 
+ static const struct inet_diag_handler **inet_diag_table;
+ 
+@@ -118,8 +120,8 @@ static int inet_csk_diag_fill(struct soc
+ 
+ 	r->id.idiag_sport = inet->sport;
+ 	r->id.idiag_dport = inet->dport;
+-	r->id.idiag_src[0] = inet->rcv_saddr;
+-	r->id.idiag_dst[0] = inet->daddr;
++	r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, inet->rcv_saddr);
++	r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, inet->daddr);
+ 
+ #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ 	if (r->idiag_family == AF_INET6) {
+@@ -204,8 +206,8 @@ static int inet_twsk_diag_fill(struct in
+ 	r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
+ 	r->id.idiag_sport     = tw->tw_sport;
+ 	r->id.idiag_dport     = tw->tw_dport;
+-	r->id.idiag_src[0]    = tw->tw_rcv_saddr;
+-	r->id.idiag_dst[0]    = tw->tw_daddr;
++	r->id.idiag_src[0]    = nx_map_sock_lback(tw->tw_nx_info, tw->tw_rcv_saddr);
++	r->id.idiag_dst[0]    = nx_map_sock_lback(tw->tw_nx_info, tw->tw_daddr);
+ 	r->idiag_state	      = tw->tw_substate;
+ 	r->idiag_timer	      = 3;
+ 	r->idiag_expires      = DIV_ROUND_UP(tmo * 1000, HZ);
+@@ -262,6 +264,7 @@ static int inet_diag_get_exact(struct sk
+ 	err = -EINVAL;
+ 
+ 	if (req->idiag_family == AF_INET) {
++		/* TODO: lback */
+ 		sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
+ 				 req->id.idiag_dport, req->id.idiag_src[0],
+ 				 req->id.idiag_sport, req->id.idiag_if);
+@@ -506,6 +509,7 @@ static int inet_csk_diag_dump(struct soc
+ 		} else
+ #endif
+ 		{
++			/* TODO: lback */
+ 			entry.saddr = &inet->rcv_saddr;
+ 			entry.daddr = &inet->daddr;
+ 		}
+@@ -544,6 +548,7 @@ static int inet_twsk_diag_dump(struct in
+ 		} else
+ #endif
+ 		{
++			/* TODO: lback */
+ 			entry.saddr = &tw->tw_rcv_saddr;
+ 			entry.daddr = &tw->tw_daddr;
+ 		}
+@@ -590,8 +595,8 @@ static int inet_diag_fill_req(struct sk_
+ 
+ 	r->id.idiag_sport = inet->sport;
+ 	r->id.idiag_dport = ireq->rmt_port;
+-	r->id.idiag_src[0] = ireq->loc_addr;
+-	r->id.idiag_dst[0] = ireq->rmt_addr;
++	r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->loc_addr);
++	r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->rmt_addr);
+ 	r->idiag_expires = jiffies_to_msecs(tmo);
+ 	r->idiag_rqueue = 0;
+ 	r->idiag_wqueue = 0;
+@@ -662,6 +667,7 @@ static int inet_diag_dump_reqs(struct sk
+ 				continue;
+ 
+ 			if (bc) {
++				/* TODO: lback */
+ 				entry.saddr =
+ #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ 					(entry.family == AF_INET6) ?
+@@ -732,6 +738,8 @@ static int inet_diag_dump(struct sk_buff
+ 			sk_nulls_for_each(sk, node, &ilb->head) {
+ 				struct inet_sock *inet = inet_sk(sk);
+ 
++				if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++					continue;
+ 				if (num < s_num) {
+ 					num++;
+ 					continue;
+@@ -798,6 +806,8 @@ skip_listen_ht:
+ 		sk_nulls_for_each(sk, node, &head->chain) {
+ 			struct inet_sock *inet = inet_sk(sk);
+ 
++			if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++				continue;
+ 			if (num < s_num)
+ 				goto next_normal;
+ 			if (!(r->idiag_states & (1 << sk->sk_state)))
+@@ -822,6 +832,8 @@ next_normal:
+ 			inet_twsk_for_each(tw, node,
+ 				    &head->twchain) {
+ 
++				if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))
++					continue;
+ 				if (num < s_num)
+ 					goto next_dying;
+ 				if (r->id.idiag_sport != tw->tw_sport &&
+--- a/net/ipv4/inet_hashtables.c	2009-06-11 17:13:29.000000000 +0200
++++ a/net/ipv4/inet_hashtables.c	2011-06-10 13:03:02.000000000 +0200
+@@ -22,6 +22,7 @@
+ #include <net/inet_connection_sock.h>
+ #include <net/inet_hashtables.h>
+ #include <net/secure_seq.h>
++#include <net/route.h>
+ #include <net/ip.h>
+ 
+ /*
+@@ -134,6 +135,11 @@ static inline int compute_score(struct s
+ 			if (rcv_saddr != daddr)
+ 				return -1;
+ 			score += 2;
++		} else {
++			/* block non nx_info ips */
++			if (!v4_addr_in_nx_info(sk->sk_nx_info,
++				daddr, NXA_MASK_BIND))
++				return -1;
+ 		}
+ 		if (sk->sk_bound_dev_if) {
+ 			if (sk->sk_bound_dev_if != dif)
+@@ -151,7 +157,6 @@ static inline int compute_score(struct s
+  * wildcarded during the search since they can never be otherwise.
+  */
+ 
+-
+ struct sock *__inet_lookup_listener(struct net *net,
+ 				    struct inet_hashinfo *hashinfo,
+ 				    const __be32 daddr, const unsigned short hnum,
+@@ -174,6 +179,7 @@ begin:
+ 			hiscore = score;
+ 		}
+ 	}
++
+ 	/*
+ 	 * if the nulls value we got at the end of this lookup is
+ 	 * not the expected one, we must restart lookup.
+--- a/net/ipv4/netfilter/nf_nat_helper.c	2009-12-03 20:02:59.000000000 +0100
++++ a/net/ipv4/netfilter/nf_nat_helper.c	2011-06-10 13:03:02.000000000 +0200
+@@ -19,6 +19,7 @@
+ #include <net/route.h>
+ 
+ #include <linux/netfilter_ipv4.h>
++#include <net/route.h>
+ #include <net/netfilter/nf_conntrack.h>
+ #include <net/netfilter/nf_conntrack_helper.h>
+ #include <net/netfilter/nf_conntrack_ecache.h>
+--- a/net/ipv4/netfilter.c	2009-09-10 15:26:29.000000000 +0200
++++ a/net/ipv4/netfilter.c	2011-06-10 13:03:02.000000000 +0200
+@@ -4,7 +4,7 @@
+ #include <linux/netfilter_ipv4.h>
+ #include <linux/ip.h>
+ #include <linux/skbuff.h>
+-#include <net/route.h>
++// #include <net/route.h>
+ #include <net/xfrm.h>
+ #include <net/ip.h>
+ #include <net/netfilter/nf_queue.h>
+--- a/net/ipv4/raw.c	2009-12-03 20:02:59.000000000 +0100
++++ a/net/ipv4/raw.c	2011-06-10 13:03:02.000000000 +0200
+@@ -117,7 +117,7 @@ static struct sock *__raw_v4_lookup(stru
+ 
+ 		if (net_eq(sock_net(sk), net) && inet->num == num	&&
+ 		    !(inet->daddr && inet->daddr != raddr) 		&&
+-		    !(inet->rcv_saddr && inet->rcv_saddr != laddr)	&&
++		    v4_sock_addr_match(sk->sk_nx_info, inet, laddr)	&&
+ 		    !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+ 			goto found; /* gotcha */
+ 	}
+@@ -383,6 +383,12 @@ static int raw_send_hdrinc(struct sock *
+ 		icmp_out_count(net, ((struct icmphdr *)
+ 			skb_transport_header(skb))->type);
+ 
++	err = -EPERM;
++	if (!nx_check(0, VS_ADMIN) && !capable(CAP_NET_RAW) &&
++		sk->sk_nx_info &&
++		!v4_addr_in_nx_info(sk->sk_nx_info, iph->saddr, NXA_MASK_BIND))
++		goto error_free;
++
+ 	err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
+ 		      dst_output);
+ 	if (err > 0)
+@@ -563,6 +569,13 @@ static int raw_sendmsg(struct kiocb *ioc
+ 		}
+ 
+ 		security_sk_classify_flow(sk, &fl);
++		if (sk->sk_nx_info) {
++			err = ip_v4_find_src(sock_net(sk),
++				sk->sk_nx_info, &rt, &fl);
++
++			if (err)
++				goto done;
++		}
+ 		err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
+ 	}
+ 	if (err)
+@@ -635,17 +648,19 @@ static int raw_bind(struct sock *sk, str
+ {
+ 	struct inet_sock *inet = inet_sk(sk);
+ 	struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
++	struct nx_v4_sock_addr nsa = { 0 };
+ 	int ret = -EINVAL;
+ 	int chk_addr_ret;
+ 
+ 	if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
+ 		goto out;
+-	chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
++	v4_map_sock_addr(inet, addr, &nsa);
++	chk_addr_ret = inet_addr_type(sock_net(sk), nsa.saddr);
+ 	ret = -EADDRNOTAVAIL;
+-	if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
++	if (nsa.saddr && chk_addr_ret != RTN_LOCAL &&
+ 	    chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
+ 		goto out;
+-	inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
++	v4_set_sock_addr(inet, &nsa);
+ 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
+ 		inet->saddr = 0;  /* Use device */
+ 	sk_dst_reset(sk);
+@@ -697,7 +712,8 @@ static int raw_recvmsg(struct kiocb *ioc
+ 	/* Copy the address. */
+ 	if (sin) {
+ 		sin->sin_family = AF_INET;
+-		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
++		sin->sin_addr.s_addr =
++			nx_map_sock_lback(sk->sk_nx_info, ip_hdr(skb)->saddr);
+ 		sin->sin_port = 0;
+ 		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+ 	}
+@@ -875,7 +891,8 @@ static struct sock *raw_get_first(struct
+ 		struct hlist_node *node;
+ 
+ 		sk_for_each(sk, node, &state->h->ht[state->bucket])
+-			if (sock_net(sk) == seq_file_net(seq))
++			if ((sock_net(sk) == seq_file_net(seq)) &&
++				nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+ 				goto found;
+ 	}
+ 	sk = NULL;
+@@ -891,7 +908,8 @@ static struct sock *raw_get_next(struct 
+ 		sk = sk_next(sk);
+ try_again:
+ 		;
+-	} while (sk && sock_net(sk) != seq_file_net(seq));
++	} while (sk && ((sock_net(sk) != seq_file_net(seq)) ||
++		!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)));
+ 
+ 	if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
+ 		sk = sk_head(&state->h->ht[state->bucket]);
+@@ -950,7 +968,10 @@ static void raw_sock_seq_show(struct seq
+ 
+ 	seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
+ 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
+-		i, src, srcp, dest, destp, sp->sk_state,
++		i,
++		nx_map_sock_lback(current_nx_info(), src), srcp,
++		nx_map_sock_lback(current_nx_info(), dest), destp,
++		sp->sk_state,
+ 		sk_wmem_alloc_get(sp),
+ 		sk_rmem_alloc_get(sp),
+ 		0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+--- a/net/ipv4/tcp.c	2011-05-29 23:42:29.000000000 +0200
++++ a/net/ipv4/tcp.c	2011-06-10 13:03:02.000000000 +0200
+@@ -264,6 +264,7 @@
+ #include <linux/cache.h>
+ #include <linux/err.h>
+ #include <linux/crypto.h>
++#include <linux/in.h>
+ 
+ #include <net/icmp.h>
+ #include <net/tcp.h>
+--- a/net/ipv4/tcp_ipv4.c	2009-12-03 20:03:00.000000000 +0100
++++ a/net/ipv4/tcp_ipv4.c	2011-06-10 13:03:02.000000000 +0200
+@@ -1925,6 +1925,12 @@ static void *listening_get_next(struct s
+ 		req = req->dl_next;
+ 		while (1) {
+ 			while (req) {
++				vxdprintk(VXD_CBIT(net, 6),
++					"sk,req: %p [#%d] (from %d)", req->sk,
++					(req->sk)?req->sk->sk_nid:0, nx_current_nid());
++				if (req->sk &&
++					!nx_check(req->sk->sk_nid, VS_WATCH_P | VS_IDENT))
++					continue;
+ 				if (req->rsk_ops->family == st->family) {
+ 					cur = req;
+ 					goto out;
+@@ -1949,6 +1955,10 @@ get_req:
+ 	}
+ get_sk:
+ 	sk_nulls_for_each_from(sk, node) {
++		vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
++			sk, sk->sk_nid, nx_current_nid());
++		if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++			continue;
+ 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
+ 			cur = sk;
+ 			goto out;
+@@ -2012,6 +2022,11 @@ static void *established_get_first(struc
+ 
+ 		spin_lock_bh(lock);
+ 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
++			vxdprintk(VXD_CBIT(net, 6),
++				"sk,egf: %p [#%d] (from %d)",
++				sk, sk->sk_nid, nx_current_nid());
++			if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++				continue;
+ 			if (sk->sk_family != st->family ||
+ 			    !net_eq(sock_net(sk), net)) {
+ 				continue;
+@@ -2022,6 +2037,11 @@ static void *established_get_first(struc
+ 		st->state = TCP_SEQ_STATE_TIME_WAIT;
+ 		inet_twsk_for_each(tw, node,
+ 				   &tcp_hashinfo.ehash[st->bucket].twchain) {
++			vxdprintk(VXD_CBIT(net, 6),
++				"tw: %p [#%d] (from %d)",
++				tw, tw->tw_nid, nx_current_nid());
++			if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))
++				continue;
+ 			if (tw->tw_family != st->family ||
+ 			    !net_eq(twsk_net(tw), net)) {
+ 				continue;
+@@ -2050,7 +2070,9 @@ static void *established_get_next(struct
+ 		tw = cur;
+ 		tw = tw_next(tw);
+ get_tw:
+-		while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
++		while (tw && (tw->tw_family != st->family ||
++			!net_eq(twsk_net(tw), net) ||
++			!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))) {
+ 			tw = tw_next(tw);
+ 		}
+ 		if (tw) {
+@@ -2073,6 +2095,11 @@ get_tw:
+ 		sk = sk_nulls_next(sk);
+ 
+ 	sk_nulls_for_each_from(sk, node) {
++		vxdprintk(VXD_CBIT(net, 6),
++			"sk,egn: %p [#%d] (from %d)",
++			sk, sk->sk_nid, nx_current_nid());
++		if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++			continue;
+ 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
+ 			goto found;
+ 	}
+@@ -2224,9 +2251,9 @@ static void get_openreq4(struct sock *sk
+ 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
+ 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
+ 		i,
+-		ireq->loc_addr,
++		nx_map_sock_lback(current_nx_info(), ireq->loc_addr),
+ 		ntohs(inet_sk(sk)->sport),
+-		ireq->rmt_addr,
++		nx_map_sock_lback(current_nx_info(), ireq->rmt_addr),
+ 		ntohs(ireq->rmt_port),
+ 		TCP_SYN_RECV,
+ 		0, 0, /* could print option size, but that is af dependent. */
+@@ -2269,7 +2296,10 @@ static void get_tcp4_sock(struct sock *s
+ 
+ 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
+ 			"%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
+-		i, src, srcp, dest, destp, sk->sk_state,
++		i,
++		nx_map_sock_lback(current_nx_info(), src), srcp,
++		nx_map_sock_lback(current_nx_info(), dest), destp,
++		sk->sk_state,
+ 		tp->write_seq - tp->snd_una,
+ 		sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
+ 					     (tp->rcv_nxt - tp->copied_seq),
+@@ -2305,7 +2335,10 @@ static void get_timewait4_sock(struct in
+ 
+ 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
+ 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
+-		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
++		i,
++		nx_map_sock_lback(current_nx_info(), src), srcp,
++		nx_map_sock_lback(current_nx_info(), dest), destp,
++		tw->tw_substate, 0, 0,
+ 		3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
+ 		atomic_read(&tw->tw_refcnt), tw, len);
+ }
+--- a/net/ipv4/tcp_minisocks.c	2009-12-03 20:03:00.000000000 +0100
++++ a/net/ipv4/tcp_minisocks.c	2011-06-10 13:03:02.000000000 +0200
+@@ -26,6 +26,10 @@
+ #include <net/inet_common.h>
+ #include <net/xfrm.h>
+ 
++#include <linux/vs_limit.h>
++#include <linux/vs_socket.h>
++#include <linux/vs_context.h>
++
+ #ifdef CONFIG_SYSCTL
+ #define SYNC_INIT 0 /* let the user enable it */
+ #else
+@@ -294,6 +298,11 @@ void tcp_time_wait(struct sock *sk, int 
+ 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
+ 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
+ 
++		tw->tw_xid		= sk->sk_xid;
++		tw->tw_vx_info		= NULL;
++		tw->tw_nid		= sk->sk_nid;
++		tw->tw_nx_info		= NULL;
++
+ #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ 		if (tw->tw_family == PF_INET6) {
+ 			struct ipv6_pinfo *np = inet6_sk(sk);
+--- a/net/ipv4/udp.c	2011-05-29 23:42:29.000000000 +0200
++++ a/net/ipv4/udp.c	2011-06-10 13:03:02.000000000 +0200
+@@ -224,14 +224,7 @@ fail:
+ }
+ EXPORT_SYMBOL(udp_lib_get_port);
+ 
+-static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+-{
+-	struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
+-
+-	return 	(!ipv6_only_sock(sk2)  &&
+-		 (!inet1->rcv_saddr || !inet2->rcv_saddr ||
+-		   inet1->rcv_saddr == inet2->rcv_saddr));
+-}
++extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *);
+ 
+ int udp_v4_get_port(struct sock *sk, unsigned short snum)
+ {
+@@ -253,6 +246,11 @@ static inline int compute_score(struct s
+ 			if (inet->rcv_saddr != daddr)
+ 				return -1;
+ 			score += 2;
++		} else {
++			/* block non nx_info ips */
++			if (!v4_addr_in_nx_info(sk->sk_nx_info,
++				daddr, NXA_MASK_BIND))
++				return -1;
+ 		}
+ 		if (inet->daddr) {
+ 			if (inet->daddr != saddr)
+@@ -273,6 +271,7 @@ static inline int compute_score(struct s
+ 	return score;
+ }
+ 
++
+ /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
+  * harder than this. -DaveM
+  */
+@@ -294,6 +293,11 @@ begin:
+ 	sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+ 		score = compute_score(sk, net, saddr, hnum, sport,
+ 				      daddr, dport, dif);
++		/* FIXME: disabled?
++		if (score == 9) {
++			result = sk;
++			break;
++		} else */
+ 		if (score > badness) {
+ 			result = sk;
+ 			badness = score;
+@@ -307,6 +311,7 @@ begin:
+ 	if (get_nulls_value(node) != hash)
+ 		goto begin;
+ 
++
+ 	if (result) {
+ 		if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+ 			result = NULL;
+@@ -316,6 +321,7 @@ begin:
+ 			goto begin;
+ 		}
+ 	}
++
+ 	rcu_read_unlock();
+ 	return result;
+ }
+@@ -358,7 +364,7 @@ static inline struct sock *udp_v4_mcast_
+ 		    s->sk_hash != hnum					||
+ 		    (inet->daddr && inet->daddr != rmt_addr)		||
+ 		    (inet->dport != rmt_port && inet->dport)		||
+-		    (inet->rcv_saddr && inet->rcv_saddr != loc_addr)	||
++		    !v4_sock_addr_match(sk->sk_nx_info, inet, loc_addr)	||
+ 		    ipv6_only_sock(s)					||
+ 		    (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
+ 			continue;
+@@ -707,8 +713,13 @@ int udp_sendmsg(struct kiocb *iocb, stru
+ 					       { .sport = inet->sport,
+ 						 .dport = dport } } };
+ 		struct net *net = sock_net(sk);
++		struct nx_info *nxi = sk->sk_nx_info;
+ 
+ 		security_sk_classify_flow(sk, &fl);
++		err = ip_v4_find_src(net, nxi, &rt, &fl);
++		if (err)
++			goto out;
++
+ 		err = ip_route_output_flow(net, &rt, &fl, sk, 1);
+ 		if (err) {
+ 			if (err == -ENETUNREACH)
+@@ -988,7 +999,8 @@ try_again:
+ 	if (sin) {
+ 		sin->sin_family = AF_INET;
+ 		sin->sin_port = udp_hdr(skb)->source;
+-		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
++		sin->sin_addr.s_addr = nx_map_sock_lback(
++			skb->sk->sk_nx_info, ip_hdr(skb)->saddr);
+ 		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ 	}
+ 	if (inet->cmsg_flags)
+@@ -1627,6 +1639,8 @@ static struct sock *udp_get_first(struct
+ 		sk_nulls_for_each(sk, node, &hslot->head) {
+ 			if (!net_eq(sock_net(sk), net))
+ 				continue;
++			if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
++				continue;
+ 			if (sk->sk_family == state->family)
+ 				goto found;
+ 		}
+@@ -1644,7 +1658,9 @@ static struct sock *udp_get_next(struct 
+ 
+ 	do {
+ 		sk = sk_nulls_next(sk);
+-	} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
++	} while (sk && (!net_eq(sock_net(sk), net) ||
++		sk->sk_family != state->family ||
++		!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)));
+ 
+ 	if (!sk) {
+ 		if (state->bucket < UDP_HTABLE_SIZE)
+@@ -1751,7 +1767,10 @@ static void udp4_format_sock(struct sock
+ 
+ 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
+ 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
+-		bucket, src, srcp, dest, destp, sp->sk_state,
++		bucket,
++		nx_map_sock_lback(current_nx_info(), src), srcp,
++		nx_map_sock_lback(current_nx_info(), dest), destp,
++		sp->sk_state,
+ 		sk_wmem_alloc_get(sp),
+ 		sk_rmem_alloc_get(sp),
+ 		0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
+--- a/net/ipv6/Kconfig	2009-09-10 15:26:30.000000000 +0200
++++ a/net/ipv6/Kconfig	2011-06-10 13:03:02.000000000 +0200
+@@ -4,8 +4,8 @@
+ 
+ #   IPv6 as module will cause a CRASH if you try to unload it
+ menuconfig IPV6
+-	tristate "The IPv6 protocol"
+-	default m
++	bool "The IPv6 protocol"
++	default n
+ 	---help---
+ 	  This is complemental support for the IP version 6.
+ 	  You will still be able to do traditional IPv4 networking as well.
+--- a/net/ipv6/addrconf.c	2011-05-29 23:42:29.000000000 +0200
++++ a/net/ipv6/addrconf.c	2011-06-10 13:03:02.000000000 +0200
+@@ -86,6 +86,8 @@
+ 
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
++#include <linux/vs_network.h>
++#include <linux/vs_inet6.h>
+ 
+ /* Set to 3 to get tracing... */
+ #define ACONF_DEBUG 2
+@@ -1119,7 +1121,7 @@ out:
+ 
+ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
+ 		       const struct in6_addr *daddr, unsigned int prefs,
+-		       struct in6_addr *saddr)
++		       struct in6_addr *saddr, struct nx_info *nxi)
+ {
+ 	struct ipv6_saddr_score scores[2],
+ 				*score = &scores[0], *hiscore = &scores[1];
+@@ -1192,6 +1194,8 @@ int ipv6_dev_get_saddr(struct net *net, 
+ 					       dev->name);
+ 				continue;
+ 			}
++			if (!v6_addr_in_nx_info(nxi, &score->ifa->addr, -1))
++				continue;
+ 
+ 			score->rule = -1;
+ 			bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
+@@ -3000,7 +3004,10 @@ static void if6_seq_stop(struct seq_file
+ static int if6_seq_show(struct seq_file *seq, void *v)
+ {
+ 	struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
+-	seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
++
++	if (nx_check(0, VS_ADMIN|VS_WATCH) ||
++	    v6_addr_in_nx_info(current_nx_info(), &ifp->addr, -1))
++		seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
+ 		   &ifp->addr,
+ 		   ifp->idev->dev->ifindex,
+ 		   ifp->prefix_len,
+@@ -3497,6 +3504,12 @@ static int inet6_dump_addr(struct sk_buf
+ 	struct ifmcaddr6 *ifmca;
+ 	struct ifacaddr6 *ifaca;
+ 	struct net *net = sock_net(skb->sk);
++	struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL;
++
++	/* disable ipv6 on non v6 guests */
++	if (nxi && !nx_info_has_v6(nxi))
++		return skb->len;
++
+ 
+ 	s_idx = cb->args[0];
+ 	s_ip_idx = ip_idx = cb->args[1];
+@@ -3518,6 +3531,8 @@ static int inet6_dump_addr(struct sk_buf
+ 			     ifa = ifa->if_next, ip_idx++) {
+ 				if (ip_idx < s_ip_idx)
+ 					continue;
++				if (!v6_addr_in_nx_info(nxi, &ifa->addr, -1))
++					continue;
+ 				err = inet6_fill_ifaddr(skb, ifa,
+ 							NETLINK_CB(cb->skb).pid,
+ 							cb->nlh->nlmsg_seq,
+@@ -3531,6 +3546,8 @@ static int inet6_dump_addr(struct sk_buf
+ 			     ifmca = ifmca->next, ip_idx++) {
+ 				if (ip_idx < s_ip_idx)
+ 					continue;
++				if (!v6_addr_in_nx_info(nxi, &ifmca->mca_addr, -1))
++					continue;
+ 				err = inet6_fill_ifmcaddr(skb, ifmca,
+ 							  NETLINK_CB(cb->skb).pid,
+ 							  cb->nlh->nlmsg_seq,
+@@ -3544,6 +3561,8 @@ static int inet6_dump_addr(struct sk_buf
+ 			     ifaca = ifaca->aca_next, ip_idx++) {
+ 				if (ip_idx < s_ip_idx)
+ 					continue;
++				if (!v6_addr_in_nx_info(nxi, &ifaca->aca_addr, -1))
++					continue;
+ 				err = inet6_fill_ifacaddr(skb, ifaca,
+ 							  NETLINK_CB(cb->skb).pid,
+ 							  cb->nlh->nlmsg_seq,
+@@ -3830,12 +3849,19 @@ static int inet6_dump_ifinfo(struct sk_b
+ 	int s_idx = cb->args[0];
+ 	struct net_device *dev;
+ 	struct inet6_dev *idev;
++	struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL;
++
++	/* FIXME: maybe disable ipv6 on non v6 guests?
++	if (skb->sk && skb->sk->sk_vx_info)
++		return skb->len; */
+ 
+ 	read_lock(&dev_base_lock);
+ 	idx = 0;
+ 	for_each_netdev(net, dev) {
+ 		if (idx < s_idx)
+ 			goto cont;
++		if (!v6_dev_in_nx_info(dev, nxi))
++			goto cont;
+ 		if ((idev = in6_dev_get(dev)) == NULL)
+ 			goto cont;
+ 		err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid,
+--- a/net/ipv6/af_inet6.c	2009-12-03 20:03:00.000000000 +0100
++++ a/net/ipv6/af_inet6.c	2011-06-10 13:03:02.000000000 +0200
+@@ -41,6 +41,8 @@
+ #include <linux/netdevice.h>
+ #include <linux/icmpv6.h>
+ #include <linux/netfilter_ipv6.h>
++#include <linux/vs_inet.h>
++#include <linux/vs_inet6.h>
+ 
+ #include <net/ip.h>
+ #include <net/ipv6.h>
+@@ -158,9 +160,12 @@ lookup_protocol:
+ 	}
+ 
+ 	err = -EPERM;
++	if ((protocol == IPPROTO_ICMPV6) &&
++		nx_capable(answer->capability, NXC_RAW_ICMP))
++		goto override;
+ 	if (answer->capability > 0 && !capable(answer->capability))
+ 		goto out_rcu_unlock;
+-
++override:
+ 	sock->ops = answer->ops;
+ 	answer_prot = answer->prot;
+ 	answer_no_check = answer->no_check;
+@@ -259,6 +264,7 @@ int inet6_bind(struct socket *sock, stru
+ 	struct inet_sock *inet = inet_sk(sk);
+ 	struct ipv6_pinfo *np = inet6_sk(sk);
+ 	struct net *net = sock_net(sk);
++	struct nx_v6_sock_addr nsa;
+ 	__be32 v4addr = 0;
+ 	unsigned short snum;
+ 	int addr_type = 0;
+@@ -270,6 +276,11 @@ int inet6_bind(struct socket *sock, stru
+ 
+ 	if (addr_len < SIN6_LEN_RFC2133)
+ 		return -EINVAL;
++
++	err = v6_map_sock_addr(inet, addr, &nsa);
++	if (err)
++		return err;
++
+ 	addr_type = ipv6_addr_type(&addr->sin6_addr);
+ 	if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
+ 		return -EINVAL;
+@@ -301,6 +312,7 @@ int inet6_bind(struct socket *sock, stru
+ 		/* Reproduce AF_INET checks to make the bindings consitant */
+ 		v4addr = addr->sin6_addr.s6_addr32[3];
+ 		chk_addr_ret = inet_addr_type(net, v4addr);
++
+ 		if (!sysctl_ip_nonlocal_bind &&
+ 		    !(inet->freebind || inet->transparent) &&
+ 		    v4addr != htonl(INADDR_ANY) &&
+@@ -310,6 +322,10 @@ int inet6_bind(struct socket *sock, stru
+ 			err = -EADDRNOTAVAIL;
+ 			goto out;
+ 		}
++		if (!v4_addr_in_nx_info(sk->sk_nx_info, v4addr, NXA_MASK_BIND)) {
++			err = -EADDRNOTAVAIL;
++			goto out;
++		}
+ 	} else {
+ 		if (addr_type != IPV6_ADDR_ANY) {
+ 			struct net_device *dev = NULL;
+@@ -335,6 +351,11 @@ int inet6_bind(struct socket *sock, stru
+ 				}
+ 			}
+ 
++			if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) {
++				err = -EADDRNOTAVAIL;
++				goto out;
++			}
++
+ 			/* ipv4 addr of the socket is invalid.  Only the
+ 			 * unspecified and mapped address have a v4 equivalent.
+ 			 */
+@@ -353,6 +374,8 @@ int inet6_bind(struct socket *sock, stru
+ 		}
+ 	}
+ 
++	v6_set_sock_addr(inet, &nsa);
++
+ 	inet->rcv_saddr = v4addr;
+ 	inet->saddr = v4addr;
+ 
+@@ -448,9 +471,11 @@ int inet6_getname(struct socket *sock, s
+ 			return -ENOTCONN;
+ 		sin->sin6_port = inet->dport;
+ 		ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
++		/* FIXME: remap lback? */
+ 		if (np->sndflow)
+ 			sin->sin6_flowinfo = np->flow_label;
+ 	} else {
++		/* FIXME: remap lback? */
+ 		if (ipv6_addr_any(&np->rcv_saddr))
+ 			ipv6_addr_copy(&sin->sin6_addr, &np->saddr);
+ 		else
+--- a/net/ipv6/fib6_rules.c	2009-09-10 15:26:30.000000000 +0200
++++ a/net/ipv6/fib6_rules.c	2011-06-10 13:03:02.000000000 +0200
+@@ -96,7 +96,7 @@ static int fib6_rule_action(struct fib_r
+ 			if (ipv6_dev_get_saddr(net,
+ 					       ip6_dst_idev(&rt->u.dst)->dev,
+ 					       &flp->fl6_dst, srcprefs,
+-					       &saddr))
++					       &saddr, NULL))
+ 				goto again;
+ 			if (!ipv6_prefix_equal(&saddr, &r->src.addr,
+ 					       r->src.plen))
+--- a/net/ipv6/inet6_hashtables.c	2009-03-24 14:22:46.000000000 +0100
++++ a/net/ipv6/inet6_hashtables.c	2011-06-10 13:03:02.000000000 +0200
+@@ -16,6 +16,7 @@
+ 
+ #include <linux/module.h>
+ #include <linux/random.h>
++#include <linux/vs_inet6.h>
+ 
+ #include <net/inet_connection_sock.h>
+ #include <net/inet_hashtables.h>
+@@ -76,7 +77,6 @@ struct sock *__inet6_lookup_established(
+ 	unsigned int slot = hash & (hashinfo->ehash_size - 1);
+ 	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
+ 
+-
+ 	rcu_read_lock();
+ begin:
+ 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
+@@ -88,7 +88,7 @@ begin:
+ 				sock_put(sk);
+ 				goto begin;
+ 			}
+-		goto out;
++			goto out;
+ 		}
+ 	}
+ 	if (get_nulls_value(node) != slot)
+@@ -134,6 +134,9 @@ static int inline compute_score(struct s
+ 			if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+ 				return -1;
+ 			score++;
++		} else {
++			if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1))
++				return -1;
+ 		}
+ 		if (sk->sk_bound_dev_if) {
+ 			if (sk->sk_bound_dev_if != dif)
+--- a/net/ipv6/ip6_output.c	2011-05-29 23:42:29.000000000 +0200
++++ a/net/ipv6/ip6_output.c	2011-06-10 13:03:02.000000000 +0200
+@@ -942,7 +942,7 @@ static int ip6_dst_lookup_tail(struct so
+ 		err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev,
+ 					 &fl->fl6_dst,
+ 					 sk ? inet6_sk(sk)->srcprefs : 0,
+-					 &fl->fl6_src);
++					 &fl->fl6_src, sk->sk_nx_info);
+ 		if (err)
+ 			goto out_err_release;
+ 	}
+--- a/net/ipv6/ndisc.c	2009-12-03 20:03:00.000000000 +0100
++++ a/net/ipv6/ndisc.c	2011-06-10 13:03:02.000000000 +0200
+@@ -589,7 +589,7 @@ static void ndisc_send_na(struct net_dev
+ 	} else {
+ 		if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
+ 				       inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs,
+-				       &tmpaddr))
++				       &tmpaddr, NULL /* FIXME: ? */ ))
+ 			return;
+ 		src_addr = &tmpaddr;
+ 	}
+--- a/net/ipv6/raw.c	2009-12-03 20:03:00.000000000 +0100
++++ a/net/ipv6/raw.c	2011-06-10 13:03:02.000000000 +0200
+@@ -29,6 +29,7 @@
+ #include <linux/icmpv6.h>
+ #include <linux/netfilter.h>
+ #include <linux/netfilter_ipv6.h>
++#include <linux/vs_inet6.h>
+ #include <linux/skbuff.h>
+ #include <asm/uaccess.h>
+ #include <asm/ioctls.h>
+@@ -281,6 +282,13 @@ static int rawv6_bind(struct sock *sk, s
+ 			}
+ 		}
+ 
++		if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) {
++			err = -EADDRNOTAVAIL;
++			if (dev)
++				dev_put(dev);
++			goto out;
++		}
++
+ 		/* ipv4 addr of the socket is invalid.  Only the
+ 		 * unspecified and mapped address have a v4 equivalent.
+ 		 */
+--- a/net/ipv6/route.c	2011-05-29 23:42:29.000000000 +0200
++++ a/net/ipv6/route.c	2011-06-10 13:03:02.000000000 +0200
+@@ -2277,7 +2277,8 @@ static int rt6_fill_node(struct net *net
+ 		struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
+ 		struct in6_addr saddr_buf;
+ 		if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
+-				       dst, 0, &saddr_buf) == 0)
++			dst, 0, &saddr_buf,
++			(skb->sk ? skb->sk->sk_nx_info : NULL)) == 0)
+ 			NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
+ 	}
+ 
+--- a/net/ipv6/tcp_ipv6.c	2009-12-03 20:03:00.000000000 +0100
++++ a/net/ipv6/tcp_ipv6.c	2011-06-10 13:03:02.000000000 +0200
+@@ -68,6 +68,7 @@
+ 
+ #include <linux/crypto.h>
+ #include <linux/scatterlist.h>
++#include <linux/vs_inet6.h>
+ 
+ static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
+ static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+@@ -156,8 +157,15 @@ static int tcp_v6_connect(struct sock *s
+ 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
+ 	 */
+ 
+-	if(ipv6_addr_any(&usin->sin6_addr))
+-		usin->sin6_addr.s6_addr[15] = 0x1;
++	if(ipv6_addr_any(&usin->sin6_addr)) {
++		struct nx_info *nxi =  sk->sk_nx_info;
++
++		if (nxi && nx_info_has_v6(nxi))
++			/* FIXME: remap lback? */
++			usin->sin6_addr = nxi->v6.ip;
++		else
++			usin->sin6_addr.s6_addr[15] = 0x1;
++	}
+ 
+ 	addr_type = ipv6_addr_type(&usin->sin6_addr);
+ 
+--- a/net/ipv6/udp.c	2009-12-03 20:03:00.000000000 +0100
++++ a/net/ipv6/udp.c	2011-06-10 13:03:02.000000000 +0200
+@@ -47,6 +47,7 @@
+ 
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
++#include <linux/vs_inet6.h>
+ #include "udp_impl.h"
+ 
+ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
+@@ -61,24 +62,49 @@ int ipv6_rcv_saddr_equal(const struct so
+ 	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
+ 
+ 	/* if both are mapped, treat as IPv4 */
+-	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
+-		return (!sk2_ipv6only &&
++	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
++		if (!sk2_ipv6only &&
+ 			(!sk_rcv_saddr || !sk2_rcv_saddr ||
+-			  sk_rcv_saddr == sk2_rcv_saddr));
++			  sk_rcv_saddr == sk2_rcv_saddr))
++			goto vs_v4;
++		else
++			return 0;
++	}
+ 
+ 	if (addr_type2 == IPV6_ADDR_ANY &&
+ 	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
+-		return 1;
++		goto vs;
+ 
+ 	if (addr_type == IPV6_ADDR_ANY &&
+ 	    !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
+-		return 1;
++		goto vs;
+ 
+ 	if (sk2_rcv_saddr6 &&
+ 	    ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
+-		return 1;
++		goto vs;
+ 
+ 	return 0;
++
++vs_v4:
++	if (!sk_rcv_saddr && !sk2_rcv_saddr)
++		return nx_v4_addr_conflict(sk->sk_nx_info, sk2->sk_nx_info);
++	if (!sk2_rcv_saddr)
++		return v4_addr_in_nx_info(sk->sk_nx_info, sk2_rcv_saddr, -1);
++	if (!sk_rcv_saddr)
++		return v4_addr_in_nx_info(sk2->sk_nx_info, sk_rcv_saddr, -1);
++	return 1;
++vs:
++	if (addr_type2 == IPV6_ADDR_ANY && addr_type == IPV6_ADDR_ANY)
++		return nx_v6_addr_conflict(sk->sk_nx_info, sk2->sk_nx_info);
++	else if (addr_type2 == IPV6_ADDR_ANY)
++		return v6_addr_in_nx_info(sk2->sk_nx_info, sk_rcv_saddr6, -1);
++	else if (addr_type == IPV6_ADDR_ANY) {
++		if (addr_type2 == IPV6_ADDR_MAPPED)
++			return nx_v4_addr_conflict(sk->sk_nx_info, sk2->sk_nx_info);
++		else
++			return v6_addr_in_nx_info(sk->sk_nx_info, sk2_rcv_saddr6, -1);
++	}
++	return 1;
+ }
+ 
+ int udp_v6_get_port(struct sock *sk, unsigned short snum)
+@@ -109,6 +135,10 @@ static inline int compute_score(struct s
+ 			if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+ 				return -1;
+ 			score++;
++		} else {
++			/* block non nx_info ips */
++			if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1))
++				return -1;
+ 		}
+ 		if (!ipv6_addr_any(&np->daddr)) {
+ 			if (!ipv6_addr_equal(&np->daddr, saddr))
+--- a/net/ipv6/xfrm6_policy.c	2009-12-03 20:03:00.000000000 +0100
++++ a/net/ipv6/xfrm6_policy.c	2011-06-10 13:03:02.000000000 +0200
+@@ -63,7 +63,7 @@ static int xfrm6_get_saddr(struct net *n
+ 	dev = ip6_dst_idev(dst)->dev;
+ 	ipv6_dev_get_saddr(dev_net(dev), dev,
+ 			   (struct in6_addr *)&daddr->a6, 0,
+-			   (struct in6_addr *)&saddr->a6);
++			   (struct in6_addr *)&saddr->a6, NULL);
+ 	dst_release(dst);
+ 	return 0;
+ }
+--- a/net/netlink/af_netlink.c	2011-05-29 23:42:30.000000000 +0200
++++ a/net/netlink/af_netlink.c	2011-06-10 13:03:02.000000000 +0200
+@@ -55,6 +55,9 @@
+ #include <linux/types.h>
+ #include <linux/audit.h>
+ #include <linux/mutex.h>
++#include <linux/vs_context.h>
++#include <linux/vs_network.h>
++#include <linux/vs_limit.h>
+ 
+ #include <net/net_namespace.h>
+ #include <net/sock.h>
+@@ -1885,6 +1888,8 @@ static struct sock *netlink_seq_socket_i
+ 			sk_for_each(s, node, &hash->table[j]) {
+ 				if (sock_net(s) != seq_file_net(seq))
+ 					continue;
++				if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
++					continue;
+ 				if (off == pos) {
+ 					iter->link = i;
+ 					iter->hash_idx = j;
+@@ -1919,7 +1924,8 @@ static void *netlink_seq_next(struct seq
+ 	s = v;
+ 	do {
+ 		s = sk_next(s);
+-	} while (s && sock_net(s) != seq_file_net(seq));
++	} while (s && (sock_net(s) != seq_file_net(seq) ||
++		!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)));
+ 	if (s)
+ 		return s;
+ 
+@@ -1931,7 +1937,8 @@ static void *netlink_seq_next(struct seq
+ 
+ 		for (; j <= hash->mask; j++) {
+ 			s = sk_head(&hash->table[j]);
+-			while (s && sock_net(s) != seq_file_net(seq))
++			while (s && (sock_net(s) != seq_file_net(seq) ||
++				!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT)))
+ 				s = sk_next(s);
+ 			if (s) {
+ 				iter->link = i;
+--- a/net/sctp/ipv6.c	2009-12-03 20:03:01.000000000 +0100
++++ a/net/sctp/ipv6.c	2011-06-10 13:03:02.000000000 +0200
+@@ -316,7 +316,8 @@ static void sctp_v6_get_saddr(struct sct
+ 				   dst ? ip6_dst_idev(dst)->dev : NULL,
+ 				   &daddr->v6.sin6_addr,
+ 				   inet6_sk(&sk->inet.sk)->srcprefs,
+-				   &saddr->v6.sin6_addr);
++				   &saddr->v6.sin6_addr,
++				   asoc->base.sk->sk_nx_info);
+ 		SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: %pI6\n",
+ 				  &saddr->v6.sin6_addr);
+ 		return;
+--- a/net/socket.c	2011-05-29 23:42:30.000000000 +0200
++++ a/net/socket.c	2011-06-10 13:03:02.000000000 +0200
+@@ -96,6 +96,10 @@
+ 
+ #include <net/sock.h>
+ #include <linux/netfilter.h>
++#include <linux/vs_base.h>
++#include <linux/vs_socket.h>
++#include <linux/vs_inet.h>
++#include <linux/vs_inet6.h>
+ 
+ static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
+ static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
+@@ -559,7 +563,7 @@ static inline int __sock_sendmsg(struct 
+ 				 struct msghdr *msg, size_t size)
+ {
+ 	struct sock_iocb *si = kiocb_to_siocb(iocb);
+-	int err;
++	int err, len;
+ 
+ 	si->sock = sock;
+ 	si->scm = NULL;
+@@ -570,7 +574,22 @@ static inline int __sock_sendmsg(struct 
+ 	if (err)
+ 		return err;
+ 
+-	return sock->ops->sendmsg(iocb, sock, msg, size);
++	len = sock->ops->sendmsg(iocb, sock, msg, size);
++	if (sock->sk) {
++		if (len == size)
++			vx_sock_send(sock->sk, size);
++		else
++			vx_sock_fail(sock->sk, size);
++	}
++	vxdprintk(VXD_CBIT(net, 7),
++		"__sock_sendmsg: %p[%p,%p,%p;%d/%d]:%d/%d",
++		sock, sock->sk,
++		(sock->sk)?sock->sk->sk_nx_info:0,
++		(sock->sk)?sock->sk->sk_vx_info:0,
++		(sock->sk)?sock->sk->sk_xid:0,
++		(sock->sk)?sock->sk->sk_nid:0,
++		(unsigned int)size, len);
++	return len;
+ }
+ 
+ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+@@ -671,7 +690,7 @@ EXPORT_SYMBOL_GPL(__sock_recv_timestamp)
+ static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ 				 struct msghdr *msg, size_t size, int flags)
+ {
+-	int err;
++	int err, len;
+ 	struct sock_iocb *si = kiocb_to_siocb(iocb);
+ 
+ 	si->sock = sock;
+@@ -684,7 +703,18 @@ static inline int __sock_recvmsg(struct 
+ 	if (err)
+ 		return err;
+ 
+-	return sock->ops->recvmsg(iocb, sock, msg, size, flags);
++	len = sock->ops->recvmsg(iocb, sock, msg, size, flags);
++	if ((len >= 0) && sock->sk)
++		vx_sock_recv(sock->sk, len);
++	vxdprintk(VXD_CBIT(net, 7),
++		"__sock_recvmsg: %p[%p,%p,%p;%d/%d]:%d/%d",
++		sock, sock->sk,
++		(sock->sk)?sock->sk->sk_nx_info:0,
++		(sock->sk)?sock->sk->sk_vx_info:0,
++		(sock->sk)?sock->sk->sk_xid:0,
++		(sock->sk)?sock->sk->sk_nid:0,
++		(unsigned int)size, len);
++	return len;
+ }
+ 
+ int sock_recvmsg(struct socket *sock, struct msghdr *msg,
+@@ -1155,6 +1185,13 @@ static int __sock_create(struct net *net
+ 	if (type < 0 || type >= SOCK_MAX)
+ 		return -EINVAL;
+ 
++	if (!nx_check(0, VS_ADMIN)) {
++		if (family == PF_INET && !current_nx_info_has_v4())
++			return -EAFNOSUPPORT;
++		if (family == PF_INET6 && !current_nx_info_has_v6())
++			return -EAFNOSUPPORT;
++	}
++
+ 	/* Compatibility.
+ 
+ 	   This uglymoron is moved from INET layer to here to avoid
+@@ -1287,6 +1324,7 @@ SYSCALL_DEFINE3(socket, int, family, int
+ 	if (retval < 0)
+ 		goto out;
+ 
++	set_bit(SOCK_USER_SOCKET, &sock->flags);
+ 	retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
+ 	if (retval < 0)
+ 		goto out_release;
+@@ -1328,10 +1366,12 @@ SYSCALL_DEFINE4(socketpair, int, family,
+ 	err = sock_create(family, type, protocol, &sock1);
+ 	if (err < 0)
+ 		goto out;
++	set_bit(SOCK_USER_SOCKET, &sock1->flags);
+ 
+ 	err = sock_create(family, type, protocol, &sock2);
+ 	if (err < 0)
+ 		goto out_release_1;
++	set_bit(SOCK_USER_SOCKET, &sock2->flags);
+ 
+ 	err = sock1->ops->socketpair(sock1, sock2);
+ 	if (err < 0)
+--- a/net/sunrpc/auth.c	2009-12-03 20:03:01.000000000 +0100
++++ a/net/sunrpc/auth.c	2011-06-10 13:03:02.000000000 +0200
+@@ -14,6 +14,7 @@
+ #include <linux/hash.h>
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/spinlock.h>
++#include <linux/vs_tag.h>
+ 
+ #ifdef RPC_DEBUG
+ # define RPCDBG_FACILITY	RPCDBG_AUTH
+@@ -360,6 +361,7 @@ rpcauth_lookupcred(struct rpc_auth *auth
+ 	memset(&acred, 0, sizeof(acred));
+ 	acred.uid = cred->fsuid;
+ 	acred.gid = cred->fsgid;
++	acred.tag = dx_current_tag();
+ 	acred.group_info = get_group_info(((struct cred *)cred)->group_info);
+ 
+ 	ret = auth->au_ops->lookup_cred(auth, &acred, flags);
+@@ -400,6 +402,7 @@ rpcauth_bind_root_cred(struct rpc_task *
+ 	struct auth_cred acred = {
+ 		.uid = 0,
+ 		.gid = 0,
++		.tag = dx_current_tag(),
+ 	};
+ 	struct rpc_cred *ret;
+ 
+--- a/net/sunrpc/auth_unix.c	2008-12-25 00:26:37.000000000 +0100
++++ a/net/sunrpc/auth_unix.c	2011-06-10 13:03:02.000000000 +0200
+@@ -11,12 +11,14 @@
+ #include <linux/module.h>
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/sunrpc/auth.h>
++#include <linux/vs_tag.h>
+ 
+ #define NFS_NGROUPS	16
+ 
+ struct unx_cred {
+ 	struct rpc_cred		uc_base;
+ 	gid_t			uc_gid;
++	tag_t			uc_tag;
+ 	gid_t			uc_gids[NFS_NGROUPS];
+ };
+ #define uc_uid			uc_base.cr_uid
+@@ -78,6 +80,7 @@ unx_create_cred(struct rpc_auth *auth, s
+ 		groups = NFS_NGROUPS;
+ 
+ 	cred->uc_gid = acred->gid;
++	cred->uc_tag = acred->tag;
+ 	for (i = 0; i < groups; i++)
+ 		cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
+ 	if (i < NFS_NGROUPS)
+@@ -119,7 +122,9 @@ unx_match(struct auth_cred *acred, struc
+ 	unsigned int i;
+ 
+ 
+-	if (cred->uc_uid != acred->uid || cred->uc_gid != acred->gid)
++	if (cred->uc_uid != acred->uid ||
++		cred->uc_gid != acred->gid ||
++		cred->uc_tag != acred->tag)
+ 		return 0;
+ 
+ 	if (acred->group_info != NULL)
+@@ -142,7 +147,7 @@ unx_marshal(struct rpc_task *task, __be3
+ 	struct rpc_clnt	*clnt = task->tk_client;
+ 	struct unx_cred	*cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base);
+ 	__be32		*base, *hold;
+-	int		i;
++	int		i, tag;
+ 
+ 	*p++ = htonl(RPC_AUTH_UNIX);
+ 	base = p++;
+@@ -152,9 +157,12 @@ unx_marshal(struct rpc_task *task, __be3
+ 	 * Copy the UTS nodename captured when the client was created.
+ 	 */
+ 	p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
++	tag = task->tk_client->cl_tag;
+ 
+-	*p++ = htonl((u32) cred->uc_uid);
+-	*p++ = htonl((u32) cred->uc_gid);
++	*p++ = htonl((u32) TAGINO_UID(tag,
++		cred->uc_uid, cred->uc_tag));
++	*p++ = htonl((u32) TAGINO_GID(tag,
++		cred->uc_gid, cred->uc_tag));
+ 	hold = p++;
+ 	for (i = 0; i < 16 && cred->uc_gids[i] != (gid_t) NOGROUP; i++)
+ 		*p++ = htonl((u32) cred->uc_gids[i]);
+--- a/net/sunrpc/clnt.c	2009-12-03 20:03:01.000000000 +0100
++++ a/net/sunrpc/clnt.c	2011-06-10 13:03:02.000000000 +0200
+@@ -33,6 +33,7 @@
+ #include <linux/utsname.h>
+ #include <linux/workqueue.h>
+ #include <linux/in6.h>
++#include <linux/vs_cvirt.h>
+ 
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/sunrpc/rpc_pipe_fs.h>
+@@ -358,6 +359,9 @@ struct rpc_clnt *rpc_create(struct rpc_c
+ 	if (!(args->flags & RPC_CLNT_CREATE_QUIET))
+ 		clnt->cl_chatty = 1;
+ 
++	/* TODO: handle RPC_CLNT_CREATE_TAGGED
++	if (args->flags & RPC_CLNT_CREATE_TAGGED)
++		clnt->cl_tag = 1; */
+ 	return clnt;
+ }
+ EXPORT_SYMBOL_GPL(rpc_create);
+--- a/net/unix/af_unix.c	2011-05-29 23:42:30.000000000 +0200
++++ a/net/unix/af_unix.c	2011-06-10 13:03:02.000000000 +0200
+@@ -114,6 +114,8 @@
+ #include <linux/mount.h>
+ #include <net/checksum.h>
+ #include <linux/security.h>
++#include <linux/vs_context.h>
++#include <linux/vs_limit.h>
+ 
+ static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+ static DEFINE_SPINLOCK(unix_table_lock);
+@@ -258,6 +260,8 @@ static struct sock *__unix_find_socket_b
+ 		if (!net_eq(sock_net(s), net))
+ 			continue;
+ 
++		if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
++			continue;
+ 		if (u->addr->len == len &&
+ 		    !memcmp(u->addr->name, sunname, len))
+ 			goto found;
+@@ -2164,6 +2168,8 @@ static struct sock *unix_seq_idx(struct 
+ 	for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
+ 		if (sock_net(s) != seq_file_net(seq))
+ 			continue;
++		if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
++			continue;
+ 		if (off == pos)
+ 			return s;
+ 		++off;
+@@ -2188,7 +2194,8 @@ static void *unix_seq_next(struct seq_fi
+ 		sk = first_unix_socket(&iter->i);
+ 	else
+ 		sk = next_unix_socket(&iter->i, sk);
+-	while (sk && (sock_net(sk) != seq_file_net(seq)))
++	while (sk && (sock_net(sk) != seq_file_net(seq) ||
++		!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)))
+ 		sk = next_unix_socket(&iter->i, sk);
+ 	return sk;
+ }
+--- a/scripts/checksyscalls.sh	2009-09-10 15:26:31.000000000 +0200
++++ a/scripts/checksyscalls.sh	2011-06-10 13:03:02.000000000 +0200
+@@ -194,7 +194,6 @@ cat << EOF
+ #define __IGNORE_afs_syscall
+ #define __IGNORE_getpmsg
+ #define __IGNORE_putpmsg
+-#define __IGNORE_vserver
+ EOF
+ }
+ 
+--- a/security/commoncap.c	2009-12-03 20:03:02.000000000 +0100
++++ a/security/commoncap.c	2011-06-10 13:03:02.000000000 +0200
+@@ -27,6 +27,7 @@
+ #include <linux/sched.h>
+ #include <linux/prctl.h>
+ #include <linux/securebits.h>
++#include <linux/vs_context.h>
+ 
+ /*
+  * If a non-root user executes a setuid-root binary in
+@@ -52,7 +53,7 @@ static void warn_setuid_and_fcaps_mixed(
+ 
+ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
+ {
+-	NETLINK_CB(skb).eff_cap = current_cap();
++	NETLINK_CB(skb).eff_cap = vx_mbcaps(current_cap());
+ 	return 0;
+ }
+ 
+@@ -62,6 +63,7 @@ int cap_netlink_recv(struct sk_buff *skb
+ 		return -EPERM;
+ 	return 0;
+ }
++
+ EXPORT_SYMBOL(cap_netlink_recv);
+ 
+ /**
+@@ -82,7 +84,22 @@ EXPORT_SYMBOL(cap_netlink_recv);
+ int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap,
+ 		int audit)
+ {
+-	return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
++	struct vx_info *vxi = tsk->vx_info;
++
++#if 0
++	printk("cap_capable() VXF_STATE_SETUP = %llx, raised = %x, eff = %08x:%08x\n",
++		vx_info_flags(vxi, VXF_STATE_SETUP, 0),
++		cap_raised(tsk->cap_effective, cap),
++		tsk->cap_effective.cap[1], tsk->cap_effective.cap[0]);
++#endif
++
++	/* special case SETUP */
++	if (vx_info_flags(vxi, VXF_STATE_SETUP, 0) &&
++		/* FIXME: maybe use cred instead? */
++		cap_raised(tsk->cred->cap_effective, cap))
++		return 0;
++
++	return vx_cap_raised(vxi, cred->cap_effective, cap) ? 0 : -EPERM;
+ }
+ 
+ /**
+@@ -618,7 +635,7 @@ int cap_inode_setxattr(struct dentry *de
+ 
+ 	if (!strncmp(name, XATTR_SECURITY_PREFIX,
+ 		     sizeof(XATTR_SECURITY_PREFIX) - 1)  &&
+-	    !capable(CAP_SYS_ADMIN))
++		!vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY))
+ 		return -EPERM;
+ 	return 0;
+ }
+@@ -644,7 +661,7 @@ int cap_inode_removexattr(struct dentry 
+ 
+ 	if (!strncmp(name, XATTR_SECURITY_PREFIX,
+ 		     sizeof(XATTR_SECURITY_PREFIX) - 1)  &&
+-	    !capable(CAP_SYS_ADMIN))
++		!vx_capable(CAP_SYS_ADMIN, VXC_FS_SECURITY))
+ 		return -EPERM;
+ 	return 0;
+ }
+@@ -962,7 +979,8 @@ error:
+  */
+ int cap_syslog(int type)
+ {
+-	if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
++	if ((type != 3 && type != 10) &&
++		!vx_capable(CAP_SYS_ADMIN, VXC_SYSLOG))
+ 		return -EPERM;
+ 	return 0;
+ }
+--- a/security/selinux/hooks.c	2011-05-29 23:42:30.000000000 +0200
++++ a/security/selinux/hooks.c	2011-06-10 13:03:02.000000000 +0200
+@@ -64,7 +64,6 @@
+ #include <linux/dccp.h>
+ #include <linux/quota.h>
+ #include <linux/un.h>		/* for Unix socket types */
+-#include <net/af_unix.h>	/* for Unix socket types */
+ #include <linux/parser.h>
+ #include <linux/nfs_mount.h>
+ #include <net/ipv6.h>
+--- a/security/selinux/include/av_perm_to_string.h	2009-12-03 20:03:02.000000000 +0100
++++ a/security/selinux/include/av_perm_to_string.h	2011-06-10 13:03:02.000000000 +0200
+@@ -142,6 +142,7 @@
+    S_(SECCLASS_CAPABILITY, CAPABILITY__SETFCAP, "setfcap")
+    S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_OVERRIDE, "mac_override")
+    S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_ADMIN, "mac_admin")
++   S_(SECCLASS_CAPABILITY2, CAPABILITY2__CONTEXT, "context")
+    S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_READ, "nlmsg_read")
+    S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE, "nlmsg_write")
+    S_(SECCLASS_NETLINK_FIREWALL_SOCKET, NETLINK_FIREWALL_SOCKET__NLMSG_READ, "nlmsg_read")
+--- a/security/selinux/include/av_permissions.h	2009-12-03 20:03:02.000000000 +0100
++++ a/security/selinux/include/av_permissions.h	2011-06-10 13:03:02.000000000 +0200
+@@ -565,6 +565,7 @@
+ #define CAPABILITY__SETFCAP                       0x80000000UL
+ #define CAPABILITY2__MAC_OVERRIDE                 0x00000001UL
+ #define CAPABILITY2__MAC_ADMIN                    0x00000002UL
++#define CAPABILITY2__CONTEXT                      0x00000004UL
+ #define NETLINK_ROUTE_SOCKET__IOCTL               0x00000001UL
+ #define NETLINK_ROUTE_SOCKET__READ                0x00000002UL
+ #define NETLINK_ROUTE_SOCKET__WRITE               0x00000004UL

Copied: dists/squeeze-security/linux-2.6/debian/patches/features/all/vserver/vserver-Wire-up-syscall-on-powerpc.patch (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/vserver/vserver-Wire-up-syscall-on-powerpc.patch)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/vserver/vserver-Wire-up-syscall-on-powerpc.patch	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/features/all/vserver/vserver-Wire-up-syscall-on-powerpc.patch)
@@ -0,0 +1,16 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: [powerpc] vserver: Wire up vserver syscall
+
+Copied from lenny's vs2.3.0.35.patch, with the path adjusted.
+
+--- a/arch/powerpc/include/asm/systbl.h
++++ b/arch/powerpc/include/asm/systbl.h
+@@ -260,7 +260,7 @@
+ SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
+ PPC_SYS_SPU(rtas)
+ OLDSYS(debug_setcontext)
+-SYSCALL(ni_syscall)
++SYSX(sys_vserver, sys32_vserver, sys_vserver)
+ COMPAT_SYS(migrate_pages)
+ COMPAT_SYS(mbind)
+ COMPAT_SYS(get_mempolicy)

Modified: dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/pvops.patch
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/pvops.patch	Sun Jan  8 10:41:18 2012	(r18473)
+++ dists/squeeze-security/linux-2.6/debian/patches/features/all/xen/pvops.patch	Sun Jan  8 10:47:12 2012	(r18474)
@@ -6,22 +6,20 @@
 
 To regenerate:
 
-$ git checkout -b debian-base v2.6.32.24
+$ git checkout -b debian-base v2.6.32.43
 $ git pull git://xenbits.xensource.com/people/ianc/linux-2.6.git debian/squeeze/pvhvm
 
 $ git checkout -b debian-pvops e73f4955a821f850f5b88c32d12a81714523a95f
 $ git revert -m 1 bcf16b6b4f34fb40a7aaf637947c7d3bce0be671
-$ git merge v2.6.32.24
+$ git merge v2.6.32.43
 
 $ git diff debian-base..debian-pvops
 
-[bwh: Fix context in arch/x86/xen/mmu.c following 2.6.32.36]
-
 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 9ec8558..3e30e60 100644
+index 9238f05..eb729e4 100644
 --- a/Documentation/kernel-parameters.txt
 +++ b/Documentation/kernel-parameters.txt
-@@ -2768,10 +2768,8 @@ and is between 256 and 4096 characters. It is defined in the file
+@@ -2773,10 +2773,8 @@ and is between 256 and 4096 characters. It is defined in the file
  			aux-ide-disks -- unplug non-primary-master IDE devices
  			nics -- unplug network devices
  			all -- unplug all emulated devices (NICs and IDE disks)
@@ -144,10 +142,10 @@
  
  	paging_init();
 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
-index 04f638d..df2c9e9 100644
+index 00d3b65..dc5eede 100644
 --- a/arch/powerpc/kernel/setup_64.c
 +++ b/arch/powerpc/kernel/setup_64.c
-@@ -550,7 +550,7 @@ void __init setup_arch(char **cmdline_p)
+@@ -559,7 +559,7 @@ void __init setup_arch(char **cmdline_p)
  
  #ifdef CONFIG_SWIOTLB
  	if (ppc_swiotlb_enable)
@@ -157,10 +155,10 @@
  
  	paging_init();
 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index cb5a57c..a3b7475 100644
+index 73ae02a..0266f87 100644
 --- a/arch/x86/Kconfig
 +++ b/arch/x86/Kconfig
-@@ -1885,6 +1885,10 @@ config PCI_OLPC
+@@ -1896,6 +1896,10 @@ config PCI_OLPC
  	def_bool y
  	depends on PCI && OLPC && (PCI_GOOLPC || PCI_GOANY)
  
@@ -358,7 +356,7 @@
  
  static inline int arch_prepare_hugepage(struct page *page)
 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
-index 7373932..49ee1a9 100644
+index 6a63b86..21eebd7 100644
 --- a/arch/x86/include/asm/io.h
 +++ b/arch/x86/include/asm/io.h
 @@ -7,6 +7,10 @@
@@ -372,7 +370,7 @@
  #define build_mmio_read(name, size, type, reg, barrier) \
  static inline type name(const volatile void __iomem *addr) \
  { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
-@@ -199,6 +203,17 @@ extern void __iomem *early_memremap(resource_size_t phys_addr,
+@@ -200,6 +204,17 @@ extern void __iomem *early_memremap(resource_size_t phys_addr,
  				    unsigned long size);
  extern void early_iounmap(void __iomem *addr, unsigned long size);
  
@@ -697,7 +695,7 @@
  
  /* fs/proc/kcore.c */
 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 13b1885..0aac25a 100644
+index da35a70..a285ddb 100644
 --- a/arch/x86/include/asm/processor.h
 +++ b/arch/x86/include/asm/processor.h
 @@ -551,6 +551,9 @@ static inline void native_set_iopl_mask(unsigned mask)
@@ -1241,10 +1239,10 @@
 + 
 +#endif /* _ASM_X86_SWIOTLB_XEN_H */
 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
-index d8e5d0c..6e80af9 100644
+index d1911ab..cfe00bc 100644
 --- a/arch/x86/kernel/Makefile
 +++ b/arch/x86/kernel/Makefile
-@@ -111,6 +111,7 @@ obj-$(CONFIG_X86_MRST)		+= mrst.o
+@@ -113,6 +113,7 @@ obj-$(CONFIG_X86_MRST)		+= mrst.o
  microcode-y				:= microcode_core.o
  microcode-$(CONFIG_MICROCODE_INTEL)	+= microcode_intel.o
  microcode-$(CONFIG_MICROCODE_AMD)	+= microcode_amd.o
@@ -1253,7 +1251,7 @@
  
  obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
-index 23c2da8..a2a5125 100644
+index 8ba08c7..90be997 100644
 --- a/arch/x86/kernel/acpi/boot.c
 +++ b/arch/x86/kernel/acpi/boot.c
 @@ -42,6 +42,10 @@
@@ -1267,7 +1265,7 @@
  static int __initdata acpi_force = 0;
  u32 acpi_rsdt_forced;
  int acpi_disabled;
-@@ -149,6 +153,10 @@ static void __cpuinit acpi_register_lapic(int id, u8 enabled)
+@@ -150,6 +154,10 @@ static void __cpuinit acpi_register_lapic(int id, u8 enabled)
  {
  	unsigned int ver = 0;
  
@@ -1278,7 +1276,7 @@
  	if (!enabled) {
  		++disabled_cpus;
  		return;
-@@ -461,9 +469,13 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
+@@ -467,9 +475,13 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
   */
  int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
  {
@@ -1293,7 +1291,7 @@
  #ifdef CONFIG_PCI
  	/*
  	 * Make sure all (legacy) PCI IRQs are set as level-triggered.
-@@ -740,6 +752,10 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
+@@ -746,6 +758,10 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
  
  static void __init acpi_register_lapic_address(unsigned long address)
  {
@@ -1304,7 +1302,7 @@
  	mp_lapic_addr = address;
  
  	set_fixmap_nocache(FIX_APIC_BASE, address);
-@@ -860,6 +876,9 @@ int __init acpi_probe_gsi(void)
+@@ -866,6 +882,9 @@ int __init acpi_probe_gsi(void)
  			max_gsi = gsi;
  	}
  
@@ -1370,7 +1368,7 @@
  #include "sleep.h"
  
 diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
-index f0fa7a1..0c1876b 100644
+index 7cd33f7..b8497c6 100644
 --- a/arch/x86/kernel/amd_iommu.c
 +++ b/arch/x86/kernel/amd_iommu.c
 @@ -928,7 +928,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
@@ -1418,7 +1416,7 @@
  }
  
  /*
-@@ -1691,7 +1691,7 @@ static void __unmap_single(struct amd_iommu *iommu,
+@@ -1692,7 +1692,7 @@ static void __unmap_single(struct amd_iommu *iommu,
  	dma_addr_t i, start;
  	unsigned int pages;
  
@@ -1427,7 +1425,7 @@
  	    (dma_addr + size > dma_dom->aperture_size))
  		return;
  
-@@ -1733,7 +1733,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
+@@ -1735,7 +1735,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
  	INC_STATS_COUNTER(cnt_map_single);
  
  	if (!check_device(dev))
@@ -1436,7 +1434,7 @@
  
  	dma_mask = *dev->dma_mask;
  
-@@ -1744,12 +1744,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
+@@ -1746,12 +1746,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
  		return (dma_addr_t)paddr;
  
  	if (!dma_ops_domain(domain))
@@ -1451,7 +1449,7 @@
  		goto out;
  
  	iommu_completion_wait(iommu);
-@@ -1958,7 +1958,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
+@@ -1960,7 +1960,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
  	*dma_addr = __map_single(dev, iommu, domain->priv, paddr,
  				 size, DMA_BIDIRECTIONAL, true, dma_mask);
  
@@ -1460,7 +1458,7 @@
  		spin_unlock_irqrestore(&domain->lock, flags);
  		goto out_free;
  	}
-@@ -2120,8 +2120,7 @@ int __init amd_iommu_init_dma_ops(void)
+@@ -2122,8 +2122,7 @@ int __init amd_iommu_init_dma_ops(void)
  		prealloc_protection_domains();
  
  	iommu_detected = 1;
@@ -1471,7 +1469,7 @@
  	gart_iommu_aperture_disabled = 1;
  	gart_iommu_aperture = 0;
 diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
-index 3925adf..642793e 100644
+index fc5d470..838e1d7 100644
 --- a/arch/x86/kernel/amd_iommu_init.c
 +++ b/arch/x86/kernel/amd_iommu_init.c
 @@ -29,6 +29,7 @@
@@ -1482,7 +1480,7 @@
  
  /*
   * definitions for the ACPI scanning code
-@@ -1183,19 +1184,10 @@ static struct sys_device device_amd_iommu = {
+@@ -1206,19 +1207,10 @@ static struct sys_device device_amd_iommu = {
   * functions. Finally it prints some information about AMD IOMMUs and
   * the driver state and enables the hardware.
   */
@@ -1503,7 +1501,7 @@
  	/*
  	 * First parse ACPI tables to find the largest Bus/Dev/Func
  	 * we need to handle. Upon this information the shared data
-@@ -1310,6 +1302,7 @@ int __init amd_iommu_init(void)
+@@ -1333,6 +1325,7 @@ int __init amd_iommu_init(void)
  	else
  		printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
  
@@ -1511,7 +1509,7 @@
  out:
  	return ret;
  
-@@ -1338,11 +1331,6 @@ free:
+@@ -1361,11 +1354,6 @@ free:
  	goto out;
  }
  
@@ -1523,7 +1521,7 @@
  /****************************************************************************
   *
   * Early detect code. This code runs at IOMMU detection time in the DMA
-@@ -1357,16 +1345,13 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table)
+@@ -1380,16 +1368,13 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table)
  
  void __init amd_iommu_detect(void)
  {
@@ -1572,7 +1570,7 @@
  	} else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
  		   force_iommu ||
 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
-index 0da6495..42d1fe2 100644
+index 8928d97..4848d5d 100644
 --- a/arch/x86/kernel/apic/io_apic.c
 +++ b/arch/x86/kernel/apic/io_apic.c
 @@ -63,7 +63,12 @@
@@ -1588,7 +1586,7 @@
  
  #define __apicdebuginit(type) static type __init
  #define for_each_irq_pin(entry, head) \
-@@ -390,14 +395,18 @@ static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
+@@ -395,14 +400,18 @@ static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
  
  static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
  {
@@ -1609,7 +1607,7 @@
  	writel(reg, &io_apic->index);
  	writel(value, &io_apic->data);
  }
-@@ -410,7 +419,9 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
+@@ -415,7 +424,9 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
   */
  static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
  {
@@ -1620,7 +1618,7 @@
  
  	if (sis_apic_bug)
  		writel(reg, &io_apic->index);
-@@ -3487,6 +3498,9 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+@@ -3494,6 +3505,9 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  	if (type == PCI_CAP_ID_MSI && nvec > 1)
  		return 1;
  
@@ -1630,7 +1628,7 @@
  	node = dev_to_node(&dev->dev);
  	irq_want = nr_irqs_gsi;
  	sub_handle = 0;
-@@ -3536,7 +3550,29 @@ error:
+@@ -3543,7 +3557,29 @@ error:
  
  void arch_teardown_msi_irq(unsigned int irq)
  {
@@ -1661,7 +1659,7 @@
  }
  
  #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
-@@ -3852,7 +3888,14 @@ void __init probe_nr_irqs_gsi(void)
+@@ -3860,7 +3896,14 @@ void __init probe_nr_irqs_gsi(void)
  	printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
  }
  
@@ -1676,7 +1674,7 @@
  int __init arch_probe_nr_irqs(void)
  {
  	int nr;
-@@ -3870,6 +3913,8 @@ int __init arch_probe_nr_irqs(void)
+@@ -3878,6 +3921,8 @@ int __init arch_probe_nr_irqs(void)
  	if (nr < nr_irqs)
  		nr_irqs = nr;
  
@@ -1796,7 +1794,7 @@
 +	.num_var_ranges	   	= generic_num_var_ranges,
  };
 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
-index 84e83de..c8cb9ed 100644
+index fd60f09..aa21994 100644
 --- a/arch/x86/kernel/cpu/mtrr/main.c
 +++ b/arch/x86/kernel/cpu/mtrr/main.c
 @@ -110,21 +110,6 @@ static int have_wrcomb(void)
@@ -1821,7 +1819,7 @@
  static void __init init_table(void)
  {
  	int i, max;
-@@ -711,8 +696,11 @@ void __init mtrr_bp_init(void)
+@@ -721,8 +706,11 @@ void __init mtrr_bp_init(void)
  		}
  	}
  
@@ -2012,7 +2010,7 @@
  #endif	/* CONFIG_XEN */
  
 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index a626344..1bf0911 100644
+index cba6d5a..f7aee5e 100644
 --- a/arch/x86/kernel/entry_64.S
 +++ b/arch/x86/kernel/entry_64.S
 @@ -1365,7 +1365,7 @@ ENTRY(xen_failsafe_callback)
@@ -2025,7 +2023,7 @@
  #endif /* CONFIG_XEN */
  
 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
-index 19528ef..40e47cd 100644
+index 69cab24..e791bb2 100644
 --- a/arch/x86/kernel/hpet.c
 +++ b/arch/x86/kernel/hpet.c
 @@ -98,7 +98,7 @@ static int __init hpet_setup(char *str)
@@ -2854,7 +2852,7 @@
  	}
  }
 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index 5fd5b07..11d8667 100644
+index fc6c84d..d079f92 100644
 --- a/arch/x86/kernel/process.c
 +++ b/arch/x86/kernel/process.c
 @@ -73,16 +73,12 @@ void exit_thread(void)
@@ -2902,7 +2900,7 @@
  
  int sys_fork(struct pt_regs *regs)
 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
-index 269c2a3..8e1aac8 100644
+index 200fcde..ff8cc40 100644
 --- a/arch/x86/kernel/reboot.c
 +++ b/arch/x86/kernel/reboot.c
 @@ -23,7 +23,7 @@
@@ -2924,7 +2922,7 @@
  }
  
 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index c852868..594e324 100644
+index 917e531..ee5da53 100644
 --- a/arch/x86/kernel/setup.c
 +++ b/arch/x86/kernel/setup.c
 @@ -70,6 +70,7 @@
@@ -2951,7 +2949,7 @@
  
  #include <asm/percpu.h>
  #include <asm/topology.h>
-@@ -967,6 +968,9 @@ void __init setup_arch(char **cmdline_p)
+@@ -968,6 +969,9 @@ void __init setup_arch(char **cmdline_p)
  
  	initmem_init(0, max_pfn);
  
@@ -3042,7 +3040,7 @@
  
  #ifdef CONFIG_STRICT_DEVMEM
 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
-index c9ba9de..103e324 100644
+index e0e6fad..ef2236a 100644
 --- a/arch/x86/mm/pgtable.c
 +++ b/arch/x86/mm/pgtable.c
 @@ -4,6 +4,9 @@
@@ -3072,7 +3070,7 @@
  pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  {
  	return (pte_t *)__get_free_page(PGALLOC_GFP);
-@@ -288,6 +301,12 @@ out:
+@@ -287,6 +300,12 @@ out:
  
  void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  {
@@ -3529,7 +3527,7 @@
 +#endif
 +}
 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 37d30e4..56b85d2 100644
+index d96d81f..ce8db7a 100644
 --- a/arch/x86/xen/enlighten.c
 +++ b/arch/x86/xen/enlighten.c
 @@ -29,6 +29,7 @@
@@ -3749,7 +3747,7 @@
  	.io_delay = xen_io_delay,
  
  	/* Xen takes care of %gs when switching to usermode for us */
-@@ -1024,15 +1068,40 @@ static void xen_machine_halt(void)
+@@ -1020,15 +1064,40 @@ static void xen_machine_halt(void)
  	xen_reboot(SHUTDOWN_poweroff);
  }
  
@@ -3791,7 +3789,7 @@
  	.shutdown = xen_machine_halt,
  	.crash_shutdown = xen_crash_shutdown,
  	.emergency_restart = xen_emergency_restart,
-@@ -1065,6 +1134,8 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1061,6 +1130,8 @@ asmlinkage void __init xen_start_kernel(void)
  
  	xen_domain_type = XEN_PV_DOMAIN;
  
@@ -3800,7 +3798,7 @@
  	/* Install Xen paravirt ops */
  	pv_info = xen_info;
  	pv_init_ops = xen_init_ops;
-@@ -1113,6 +1184,10 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1109,6 +1180,10 @@ asmlinkage void __init xen_start_kernel(void)
  	 */
  	xen_setup_stackprotector();
  
@@ -3811,7 +3809,7 @@
  	xen_init_irq_ops();
  	xen_init_cpuid_mask();
  
-@@ -1141,6 +1216,8 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1137,6 +1212,8 @@ asmlinkage void __init xen_start_kernel(void)
  
  	pgd = (pgd_t *)xen_start_info->pt_base;
  
@@ -3820,7 +3818,7 @@
  	/* Don't do the full vcpu_info placement stuff until we have a
  	   possible map and a non-dummy shared_info. */
  	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
-@@ -1150,6 +1227,7 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1146,6 +1223,7 @@ asmlinkage void __init xen_start_kernel(void)
  
  	xen_raw_console_write("mapping kernel into physical memory\n");
  	pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
@@ -3828,7 +3826,7 @@
  
  	init_mm.pgd = pgd;
  
-@@ -1159,6 +1237,14 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1155,6 +1233,14 @@ asmlinkage void __init xen_start_kernel(void)
  	if (xen_feature(XENFEAT_supervisor_mode_kernel))
  		pv_info.kernel_rpl = 0;
  
@@ -3843,7 +3841,7 @@
  	/* set the limit of our address space */
  	xen_reserve_top();
  
-@@ -1181,6 +1267,16 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1177,6 +1263,16 @@ asmlinkage void __init xen_start_kernel(void)
  		add_preferred_console("xenboot", 0, NULL);
  		add_preferred_console("tty", 0, NULL);
  		add_preferred_console("hvc", 0, NULL);
@@ -3860,7 +3858,7 @@
  	}
  
  	xen_raw_console_write("about to get started...\n");
-@@ -1252,7 +1348,6 @@ void xen_hvm_init_shared_info(void)
+@@ -1248,7 +1344,6 @@ void xen_hvm_init_shared_info(void)
  
  	if (!shared_info_page)
  		shared_info_page = (struct shared_info *) alloc_bootmem_pages(PAGE_SIZE);
@@ -3868,7 +3866,7 @@
  	xatp.domid = DOMID_SELF;
  	xatp.idx = 0;
  	xatp.space = XENMAPSPACE_shared_info;
-@@ -1275,7 +1370,6 @@ void xen_hvm_init_shared_info(void)
+@@ -1271,7 +1366,6 @@ void xen_hvm_init_shared_info(void)
  	}
  }
  
@@ -3876,13 +3874,13 @@
  static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
  				    unsigned long action, void *hcpu)
  {
-@@ -1317,4 +1411,3 @@ void __init xen_hvm_guest_init(void)
+@@ -1313,4 +1407,3 @@ void __init xen_hvm_guest_init(void)
  	xen_hvm_init_time_ops();
  	xen_hvm_init_mmu_ops();
  }
 -#endif
 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index 971a405..74e284f 100644
+index f23edcf..b33ace5 100644
 --- a/arch/x86/xen/mmu.c
 +++ b/arch/x86/xen/mmu.c
 @@ -42,6 +42,7 @@
@@ -4221,7 +4219,7 @@
  static void set_page_prot(void *addr, pgprot_t prot)
  {
  	unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
-@@ -1676,6 +1849,20 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
+@@ -1678,6 +1851,20 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
  	set_page_prot(pmd, PAGE_KERNEL_RO);
  }
  
@@ -4242,15 +4240,15 @@
  #ifdef CONFIG_X86_64
  static void convert_pfn_mfn(void *v)
  {
-@@ -1767,6 +1954,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+@@ -1775,6 +1962,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
  					 unsigned long max_pfn)
  {
  	pmd_t *kernel_pmd;
 +	int i;
  
- 	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
- 
-@@ -1778,6 +1966,20 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+ 	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
+ 				  xen_start_info->nr_pt_frames * PAGE_SIZE +
+@@ -1786,6 +1974,20 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
  	xen_map_identity_early(level2_kernel_pgt, max_pfn);
  
  	memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
@@ -4271,7 +4269,7 @@
  	set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
  			__pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
  
-@@ -1800,6 +2002,8 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+@@ -1808,6 +2010,8 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
  }
  #endif	/* CONFIG_X86_64 */
  
@@ -4280,7 +4278,7 @@
  static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
  {
  	pte_t pte;
-@@ -1829,9 +2033,26 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
+@@ -1837,9 +2041,26 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
  		pte = pfn_pte(phys, prot);
  		break;
  
@@ -4308,7 +4306,7 @@
  	}
  
  	__native_set_fixmap(idx, pte);
-@@ -1846,6 +2067,29 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
+@@ -1854,6 +2075,29 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
  #endif
  }
  
@@ -4338,7 +4336,7 @@
  static __init void xen_post_allocator_init(void)
  {
  	pv_mmu_ops.set_pte = xen_set_pte;
-@@ -1961,9 +2205,271 @@ void __init xen_init_mmu_ops(void)
+@@ -1969,9 +2213,271 @@ void __init xen_init_mmu_ops(void)
  	x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
  	x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
  	pv_mmu_ops = xen_mmu_ops;
@@ -4611,7 +4609,7 @@
  static void xen_hvm_exit_mmap(struct mm_struct *mm)
  {
  	struct xen_hvm_pagetable_dying a;
-@@ -1995,7 +2501,6 @@ void __init xen_hvm_init_mmu_ops(void)
+@@ -2003,7 +2509,6 @@ void __init xen_hvm_init_mmu_ops(void)
  	if (is_pagetable_dying_supported())
  		pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
  }
@@ -5235,7 +5233,7 @@
  	fiddle_vdso();
  }
 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
-index 360f8d8..8a390dc 100644
+index ca5f56e..3e06a9e 100644
 --- a/arch/x86/xen/smp.c
 +++ b/arch/x86/xen/smp.c
 @@ -178,11 +178,18 @@ static void __init xen_smp_prepare_boot_cpu(void)
@@ -5276,7 +5274,7 @@
  	BUG();
  }
 diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
-index 3b1ba77..68feb40 100644
+index 11da865..bd4d114 100644
 --- a/arch/x86/xen/time.c
 +++ b/arch/x86/xen/time.c
 @@ -201,8 +201,22 @@ static unsigned long xen_get_wallclock(void)
@@ -5312,7 +5310,7 @@
  }
  
  void xen_teardown_timer(int cpu)
-@@ -434,10 +450,6 @@ void xen_timer_resume(void)
+@@ -436,10 +452,6 @@ void xen_timer_resume(void)
  	}
  }
  
@@ -5323,7 +5321,7 @@
  static __init void xen_time_init(void)
  {
  	int cpu = smp_processor_id();
-@@ -463,6 +475,10 @@ static __init void xen_time_init(void)
+@@ -465,6 +477,10 @@ static __init void xen_time_init(void)
  	xen_setup_cpu_clockevents();
  }
  
@@ -5334,7 +5332,7 @@
  __init void xen_init_time_ops(void)
  {
  	pv_time_ops = xen_time_ops;
-@@ -476,7 +492,6 @@ __init void xen_init_time_ops(void)
+@@ -478,7 +494,6 @@ __init void xen_init_time_ops(void)
  	x86_platform.set_wallclock = xen_set_wallclock;
  }
  
@@ -5342,7 +5340,7 @@
  static void xen_hvm_setup_cpu_clockevents(void)
  {
  	int cpu = smp_processor_id();
-@@ -488,9 +503,8 @@ static void xen_hvm_setup_cpu_clockevents(void)
+@@ -490,9 +505,8 @@ static void xen_hvm_setup_cpu_clockevents(void)
  __init void xen_hvm_init_time_ops(void)
  {
  	/* vector callback is needed otherwise we cannot receive interrupts
@@ -5354,7 +5352,7 @@
  		return;
  	if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
  		printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
-@@ -506,4 +520,3 @@ __init void xen_hvm_init_time_ops(void)
+@@ -508,4 +522,3 @@ __init void xen_hvm_init_time_ops(void)
  	x86_platform.get_wallclock = xen_get_wallclock;
  	x86_platform.set_wallclock = xen_set_wallclock;
  }
@@ -5478,10 +5476,18 @@
 +
  #endif /* XEN_OPS_H */
 diff --git a/block/blk-core.c b/block/blk-core.c
-index 71da511..32d305c 100644
+index cffd737..3efdae0 100644
 --- a/block/blk-core.c
 +++ b/block/blk-core.c
-@@ -439,6 +439,7 @@ void blk_put_queue(struct request_queue *q)
+@@ -310,7 +310,6 @@ void blk_unplug_timeout(unsigned long data)
+ 	trace_block_unplug_timer(q);
+ 	kblockd_schedule_work(q, &q->unplug_work);
+ }
+-EXPORT_SYMBOL(blk_put_queue);
+ 
+ void blk_unplug(struct request_queue *q)
+ {
+@@ -440,6 +439,7 @@ void blk_put_queue(struct request_queue *q)
  {
  	kobject_put(&q->kobj);
  }
@@ -5489,14 +5495,6 @@
  
  void blk_cleanup_queue(struct request_queue *q)
  {
-@@ -612,6 +613,7 @@ int blk_get_queue(struct request_queue *q)
- 
- 	return 1;
- }
-+EXPORT_SYMBOL_GPL(blk_get_queue);
- 
- static inline void blk_free_request(struct request_queue *q, struct request *rq)
- {
 diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
 index 7702118..1be123c 100644
 --- a/drivers/acpi/Makefile
@@ -5588,7 +5586,7 @@
  	status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
  	if (ACPI_FAILURE(status)) {
 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
-index ec742a4..492a899 100644
+index 7102474..2428cc0 100644
 --- a/drivers/acpi/processor_core.c
 +++ b/drivers/acpi/processor_core.c
 @@ -58,6 +58,7 @@
@@ -5611,7 +5609,7 @@
  static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
  static int acpi_processor_handle_eject(struct acpi_processor *pr);
  
-@@ -253,7 +252,7 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
+@@ -247,7 +246,7 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
  	return 0;
  }
  
@@ -5620,7 +5618,7 @@
  {
  	int result = 0;
  	struct pci_dev *dev = NULL;
-@@ -284,7 +283,7 @@ static int acpi_processor_errata(struct acpi_processor *pr)
+@@ -278,7 +277,7 @@ static int acpi_processor_errata(struct acpi_processor *pr)
   * _PDC is required for a BIOS-OS handshake for most of the newer
   * ACPI processor features.
   */
@@ -5629,7 +5627,7 @@
  {
  	struct acpi_object_list *pdc_in = pr->pdc;
  	acpi_status status = AE_OK;
-@@ -353,7 +352,7 @@ static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
+@@ -347,7 +346,7 @@ static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
  			   PDE(inode)->data);
  }
  
@@ -5638,7 +5636,7 @@
  {
  	struct proc_dir_entry *entry = NULL;
  
-@@ -392,7 +391,7 @@ static int acpi_processor_add_fs(struct acpi_device *device)
+@@ -386,7 +385,7 @@ static int acpi_processor_add_fs(struct acpi_device *device)
  		return -EIO;
  	return 0;
  }
@@ -5647,7 +5645,7 @@
  {
  
  	if (acpi_device_dir(device)) {
-@@ -408,15 +407,6 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
+@@ -402,15 +401,6 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
  
  	return 0;
  }
@@ -5663,7 +5661,7 @@
  #endif
  
  /* Use the acpiid in MADT to map cpus in case of SMP */
-@@ -711,7 +701,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
+@@ -705,7 +695,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
  
  static DEFINE_PER_CPU(void *, processor_device_array);
  
@@ -5672,7 +5670,7 @@
  {
  	struct acpi_processor *pr = acpi_driver_data(device);
  	int saved;
-@@ -879,7 +869,7 @@ err_free_cpumask:
+@@ -873,7 +863,7 @@ err_free_cpumask:
  	return result;
  }
  
@@ -5681,7 +5679,7 @@
  {
  	struct acpi_processor *pr = NULL;
  
-@@ -1154,7 +1144,11 @@ static int __init acpi_processor_init(void)
+@@ -1148,7 +1138,11 @@ static int __init acpi_processor_init(void)
  	if (result < 0)
  		goto out_proc;
  
@@ -5694,7 +5692,7 @@
  	if (result < 0)
  		goto out_cpuidle;
  
-@@ -1190,7 +1184,10 @@ static void __exit acpi_processor_exit(void)
+@@ -1184,7 +1178,10 @@ static void __exit acpi_processor_exit(void)
  
  	acpi_processor_uninstall_hotplug_notify();
  
@@ -7444,7 +7442,7 @@
  }
  EXPORT_SYMBOL(ttm_fbdev_mmap);
 diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
-index 8ec2201..80a072e 100644
+index ac42691..546838f 100644
 --- a/drivers/input/xen-kbdfront.c
 +++ b/drivers/input/xen-kbdfront.c
 @@ -21,7 +21,10 @@
@@ -7458,7 +7456,7 @@
  #include <xen/events.h>
  #include <xen/page.h>
  #include <xen/interface/io/fbif.h>
-@@ -272,6 +275,8 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
+@@ -286,6 +289,8 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
  	switch (backend_state) {
  	case XenbusStateInitialising:
  	case XenbusStateInitialised:
@@ -7467,7 +7465,7 @@
  	case XenbusStateUnknown:
  	case XenbusStateClosed:
  		break;
-@@ -335,7 +340,7 @@ static struct xenbus_driver xenkbd_driver = {
+@@ -348,7 +353,7 @@ static struct xenbus_driver xenkbd_driver = {
  
  static int __init xenkbd_init(void)
  {
@@ -7814,7 +7812,7 @@
  EXPORT_SYMBOL_GPL(pci_bus_add_device);
  EXPORT_SYMBOL(pci_bus_add_devices);
 diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
-index 5753036..8e6e6d1 100644
+index 91d0390..24f6f28 100644
 --- a/drivers/pci/dmar.c
 +++ b/drivers/pci/dmar.c
 @@ -673,10 +673,13 @@ void __init detect_intel_iommu(void)
@@ -7834,10 +7832,10 @@
  	early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
  	dmar_tbl = NULL;
 diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
-index ba83495..1506d4a 100644
+index 5b680df..02bf548 100644
 --- a/drivers/pci/intel-iommu.c
 +++ b/drivers/pci/intel-iommu.c
-@@ -3278,7 +3278,7 @@ int __init intel_iommu_init(void)
+@@ -3284,7 +3284,7 @@ int __init intel_iommu_init(void)
  	 * Check the need for DMA-remapping initialization now.
  	 * Above initialization will also be used by Interrupt-remapping.
  	 */
@@ -7846,7 +7844,7 @@
  		return -ENODEV;
  
  	iommu_init_mempool();
-@@ -3299,7 +3299,9 @@ int __init intel_iommu_init(void)
+@@ -3305,7 +3305,9 @@ int __init intel_iommu_init(void)
  	"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
  
  	init_timer(&unmap_timer);
@@ -12975,7 +12973,7 @@
 +MODULE_LICENSE("Dual BSD/GPL");
 diff --git a/drivers/xen/blktap/device.c b/drivers/xen/blktap/device.c
 new file mode 100644
-index 0000000..6091780
+index 0000000..6091780b
 --- /dev/null
 +++ b/drivers/xen/blktap/device.c
 @@ -0,0 +1,943 @@
@@ -15049,7 +15047,7 @@
  
  #include <asm/xen/hypervisor.h>
 diff --git a/drivers/xen/events.c b/drivers/xen/events.c
-index 373905b..937bad7 100644
+index e287ddd..904b402 100644
 --- a/drivers/xen/events.c
 +++ b/drivers/xen/events.c
 @@ -16,7 +16,7 @@
@@ -15623,7 +15621,7 @@
  }
  EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
  
-@@ -886,7 +1292,7 @@ void xen_clear_irq_pending(int irq)
+@@ -879,7 +1285,7 @@ void xen_clear_irq_pending(int irq)
  	if (VALID_EVTCHN(evtchn))
  		clear_evtchn(evtchn);
  }
@@ -15632,7 +15630,7 @@
  void xen_set_irq_pending(int irq)
  {
  	int evtchn = evtchn_from_irq(irq);
-@@ -906,9 +1312,9 @@ bool xen_test_irq_pending(int irq)
+@@ -899,9 +1305,9 @@ bool xen_test_irq_pending(int irq)
  	return ret;
  }
  
@@ -15644,7 +15642,7 @@
  {
  	evtchn_port_t evtchn = evtchn_from_irq(irq);
  
-@@ -916,13 +1322,33 @@ void xen_poll_irq(int irq)
+@@ -909,13 +1315,33 @@ void xen_poll_irq(int irq)
  		struct sched_poll poll;
  
  		poll.nr_ports = 1;
@@ -15679,7 +15677,7 @@
  
  void xen_irq_resume(void)
  {
-@@ -969,6 +1395,26 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
+@@ -962,6 +1388,26 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
  	.ack		= ack_dynirq,
  };
  
@@ -15706,7 +15704,7 @@
  int xen_set_callback_via(uint64_t via)
  {
  	struct xen_hvm_param a;
-@@ -979,7 +1425,21 @@ int xen_set_callback_via(uint64_t via)
+@@ -972,7 +1418,21 @@ int xen_set_callback_via(uint64_t via)
  }
  EXPORT_SYMBOL_GPL(xen_set_callback_via);
  
@@ -15729,7 +15727,7 @@
  /* Vector callbacks are better than PCI interrupts to receive event
   * channel notifications because we can receive vector callbacks on any
   * vcpu and we don't need PCI support or APIC interactions. */
-@@ -998,14 +1458,9 @@ void xen_callback_vector(void)
+@@ -991,14 +1451,9 @@ void xen_callback_vector(void)
  		}
  		printk(KERN_INFO "Xen HVM callback vector for event delivery is "
  				"enabled\n");
@@ -15745,7 +15743,7 @@
  
  void __init xen_init_IRQ(void)
  {
-@@ -1013,7 +1468,12 @@ void __init xen_init_IRQ(void)
+@@ -1006,7 +1461,12 @@ void __init xen_init_IRQ(void)
  
  	cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
  				    GFP_KERNEL);
@@ -15759,7 +15757,7 @@
  
  	init_evtchn_cpu_bindings();
  
-@@ -1026,5 +1486,6 @@ void __init xen_init_IRQ(void)
+@@ -1019,5 +1479,6 @@ void __init xen_init_IRQ(void)
  		native_init_IRQ();
  	} else {
  		irq_ctx_init(smp_processor_id());
@@ -22379,7 +22377,7 @@
 +
 diff --git a/drivers/xen/pciback/passthrough.c b/drivers/xen/pciback/passthrough.c
 new file mode 100644
-index 0000000..5386beb
+index 0000000..5386bebf
 --- /dev/null
 +++ b/drivers/xen/pciback/passthrough.c
 @@ -0,0 +1,178 @@
@@ -28476,7 +28474,7 @@
  
  /**
 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
-index 7ad3faa..cf9ddce 100644
+index 66713c6..29c4ec1 100644
 --- a/include/drm/drmP.h
 +++ b/include/drm/drmP.h
 @@ -1388,7 +1388,7 @@ extern int drm_vma_info(struct seq_file *m, void *data);
@@ -28599,7 +28597,7 @@
 +};
  #endif /* _LINUX_IF_LINK_H */
 diff --git a/include/linux/mm.h b/include/linux/mm.h
-index a8d25e4..1bc4927 100644
+index 11e5be6..4c98621 100644
 --- a/include/linux/mm.h
 +++ b/include/linux/mm.h
 @@ -109,6 +109,12 @@ extern unsigned int kobjsize(const void *objp);
@@ -28628,7 +28626,7 @@
  	/*
  	 * set_policy() op must add a reference to any non-NULL @new mempolicy
 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
-index ec12f8c..3f4991c 100644
+index c27a182..04a08e75f 100644
 --- a/include/linux/netdevice.h
 +++ b/include/linux/netdevice.h
 @@ -28,6 +28,7 @@
@@ -32699,6 +32697,15 @@
  	if (!map) {
  		swiotlb_full(dev, size, dir, 1);
  		map = io_tlb_overflow_buffer;
+@@ -632,7 +123,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
+ 	 * Ensure that the address returned is DMA'ble
+ 	 */
+ 	if (!dma_capable(dev, dev_addr, size))
+-		panic("map_single: bounce buffer is not DMA'ble");
++		panic("DMA: swiotlb_map_single: bounce buffer is not DMA'ble");
+ 
+ 	return dev_addr;
+ }
 @@ -697,7 +188,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
  	BUG_ON(dir == DMA_NONE);
  
@@ -32779,7 +32786,7 @@
  {
  	int aligned;
 diff --git a/mm/memory.c b/mm/memory.c
-index 53c1da0..c8741df 100644
+index 6c836d3..faa40d9 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
 @@ -553,6 +553,13 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
@@ -32883,7 +32890,7 @@
  }
  EXPORT_SYMBOL_GPL(apply_to_page_range);
 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
-index 902e5fc..101715c 100644
+index 3ecab7e..8ab5033 100644
 --- a/mm/page_alloc.c
 +++ b/mm/page_alloc.c
 @@ -594,6 +594,13 @@ static void __free_pages_ok(struct page *page, unsigned int order)
@@ -32915,7 +32922,7 @@
  		page->mapping = NULL;
  	if (free_pages_check(page))
 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index c228731..cb459fb 100644
+index 680dcbb..4f701c2 100644
 --- a/mm/vmalloc.c
 +++ b/mm/vmalloc.c
 @@ -31,6 +31,7 @@
@@ -32936,7 +32943,7 @@
  	log = fls(num_online_cpus());
  
  	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
-@@ -561,8 +565,9 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
+@@ -570,8 +574,9 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
  	}
  	rcu_read_unlock();
  

Copied: dists/squeeze-security/linux-2.6/debian/patches/series/35squeeze2 (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/35squeeze2)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/series/35squeeze2	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/35squeeze2)
@@ -0,0 +1 @@
++ bugfix/all/proc-fix-oops-on-invalid-proc-pid-maps-access.patch

Copied: dists/squeeze-security/linux-2.6/debian/patches/series/36 (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/36)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/series/36	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/36)
@@ -0,0 +1,697 @@
++ bugfix/all/drm-radeon-kms-fix-bad-shift-atom-iio-parser.patch
+
+- bugfix/all/nl80211-fix-overflow-in-ssid_len.patch
+- bugfix/all/nl80211-fix-check-for-valid-SSID-size-in-scan-operations.patch
+- bugfix/all/fix-for-buffer-overflow-in-ldm_frag_add-not-sufficient.patch
+- bugfix/x86/x86-amd-do-not-enable-arat-feature-on-amd-processors-below.patch
++ bugfix/all/stable/2.6.32.42.patch
++ bugfix/all/nl80211-fix-overflow-in-ssid_len.patch
+
++ bugfix/all/drm_mm-extract-check_free_mm_node.patch
++ bugfix/all/drm-i915-Add-a-no-lvds-quirk-for-the-Asus-EeeBox-PC-.patch
++ bugfix/all/drm-radeon-kms-fix-for-radeon-on-systems-4GB-without.patch
+- bugfix/all/taskstats-don-t-allow-duplicate-entries-in-listener-mode.patch
++ bugfix/all/stable/2.6.32.43.patch
++ debian/if_packet-avoid-ABI-change-in-2.6.32.43.patch
++ debian/mm-avoid-ABI-change-in-2.6.32.43.patch
++ debian/revert-net-ipv4-Check-for-mistakenly-passed-in-non-I.patch
+
+# Networking
++ features/all/net-Add-netdev_alloc_skb_ip_align-helper.patch
++ features/all/PCI-Add-dummy-implementation-of-pci_dev_run_wake.patch
++ features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS.patch
++ features/all/dma-mapping-dma-mapping.h-add-dma_set_coherent_mask.patch
++ features/all/dma-mapping.h-add-the-dma_unmap-state-API.patch
++ features/all/etherdevice-Dummy-implementation-of-dev_hw_addr_rand.patch
++ features/all/err.h-add-helper-function-to-simplify-pointer-error-.patch
++ features/all/e1000e/0001-e1000e-Use-the-instance-of-net_device_stats-from-net.patch
++ features/all/e1000e/0002-net-Use-netdev_alloc_skb_ip_align.patch
++ features/all/e1000e/0003-e1000e-Fix-erroneous-display-of-stats-by-ethtool-S.patch
++ features/all/e1000e/0004-drivers-net-request_irq-Remove-unnecessary-leading-f.patch
++ features/all/e1000e/0005-e1000e-check-WoL-mode-is-among-set-of-supported-mode.patch
++ features/all/e1000e/0006-e1000e-add-missing-tests-for-82583-in-ethtool-functi.patch
++ features/all/e1000e/0007-e1000e-clearing-interrupt-timers-causes-descriptors-.patch
++ features/all/e1000e/0008-e1000e-function-pointers-for-ethtool-set-get-offload.patch
++ features/all/e1000e/0009-e1000e-don-t-clean-Rx-ring-while-resetting.patch
++ features/all/e1000e/0010-e1000e-link-reporting-problems.patch
++ features/all/e1000e/0011-e1000e-improper-return-code-signage.patch
++ features/all/e1000e/0012-e1000e-disable-K1-on-PCH-LOM-when-in-PHY-loopback-mo.patch
++ features/all/e1000e/0013-e1000e-Incorrect-MII-Link-beat-reporting.patch
++ features/all/e1000e/0014-e1000e-cleanup-redundant-include-s.patch
++ features/all/e1000e/0015-e1000e-consolidate-two-dbug-macros-into-one-simpler-.patch
++ features/all/e1000e/0016-e1000e-cleanup-ops-function-pointers.patch
++ features/all/e1000e/0017-e1000e-update-copyright-information.patch
++ features/all/e1000e/0018-e1000e-remove-comments-regarding-a-non-existent-api-.patch
++ features/all/e1000e/0019-e1000e-provide-comment-for-82571-workaround.patch
++ features/all/e1000e/0020-e1000e-set-bools-to-true-false-instead-of-1-0.patch
++ features/all/e1000e/0021-e1000e-cleanup-shift-indentation-left-by-exiting-ear.patch
++ features/all/e1000e/0022-e1000e-cleanup-functions-that-clear-hardware-statist.patch
++ features/all/e1000e/0023-e1000e-set-pm_qos-DMA-latency-requirement-per-interf.patch
++ features/all/e1000e/0024-e1000e-do-not-error-out-on-identification-LED-init-f.patch
++ features/all/e1000e/0025-e1000e-remove-redundant-might_sleep.patch
++ features/all/e1000e/0026-e1000e-cosmetic-group-local-variables-of-the-same-ty.patch
++ features/all/e1000e/0027-e1000e-update-Tx-Unit-hang-detection-message.patch
++ features/all/e1000e/0028-e1000e-provide-family-specific-functions-to-manage-V.patch
++ features/all/e1000e/0029-e1000e-guard-against-buffer-overflow-in-cable-length.patch
++ features/all/e1000e/0030-e1000e-provide-family-specific-PHY-power-up-down-ope.patch
++ features/all/e1000e/0031-e1000e-ensure-the-link-state-is-correct-for-serdes-l.patch
++ features/all/e1000e/0032-e1000e-comment-corrections.patch
++ features/all/e1000e/0033-e1000e-add-debug-messages.patch
++ features/all/e1000e/0034-e1000e-PHY-type-cleanups-in-e1000e_check_downshift.patch
++ features/all/e1000e/0035-e1000e-Use-sizeof-struct-rather-than-pointer-in-e100.patch
++ features/all/e1000e/0036-e1000e-remove-use-of-skb_dma_map-from-e1000e-driver.patch
++ features/all/e1000e/0037-e1000e-check_polarity-function-pointers-not-set-for-.patch
++ features/all/e1000e/0038-e1000e-refactor-PHY-ID-detection-workaround.patch
++ features/all/e1000e/0039-e1000e-correct-ICH-PCH-PHY-operations-function-point.patch
++ features/all/e1000e/0040-drivers-net-Move-and-to-end-of-previous-line.patch
++ features/all/e1000e/0041-e1000e-minor-correction-to-name-of-bit-in-CTRL_EXT-r.patch
++ features/all/e1000e/0042-e1000e-replace-incorrect-use-of-GG82563_REG-macro.patch
++ features/all/e1000e/0043-e1000e-only-perform-ESB2-MDIC-workaround-on-certain-.patch
++ features/all/e1000e/0044-e1000e-LED-settings-in-EEPROM-ignored-on-82571-and-8.patch
++ features/all/e1000e/0045-drivers-net-use-DEFINE_PCI_DEVICE_TABLE.patch
++ features/all/e1000e/0046-e1000e-call-pci_save_state-after-pci_restore_state.patch
++ features/all/e1000e/0047-e1000e-don-t-accumulate-PHY-statistics-on-PHY-read-f.patch
++ features/all/e1000e/0048-e1000e-perform-10-100-adaptive-IFS-only-on-parts-tha.patch
++ features/all/e1000e/0049-e1000e-e1000e_enable_tx_pkt_filtering-returns-wrong-.patch
++ features/all/e1000e/0050-e1000e-fix-and-commonize-code-for-setting-the-receiv.patch
++ features/all/e1000e/0051-e1000e-MDIO-slow-mode-should-always-be-done-for-8257.patch
++ features/all/e1000e/0052-e1000e-workaround-link-issues-on-busy-hub-in-half-du.patch
++ features/all/e1000e/0053-e1000e-use-alternate-MAC-address-on-ESB2-if-availabl.patch
++ features/all/e1000e/0054-e1000e-provide-MAC-family-specific-function-to-set-L.patch
++ features/all/e1000e/0055-e1000e-genericize-the-update-multicast-address-list.patch
++ features/all/e1000e/0056-e1000-e1000e-igb-igbvf-ixgb-ixgbe-Fix-tests-of-unsig.patch
++ features/all/e1000e/0058-e1000e-Fix-namespace-conflicts-wrt.-e1000_has_link.patch
++ features/all/e1000e/0059-net-e1000e-convert-to-use-mc-helpers.patch
++ features/all/e1000e/0060-tree-wide-Assorted-spelling-fixes.patch
++ features/all/e1000e/0061-e1000e-fix-packet-corruption-and-tx-hang-during-NFSv.patch
++ features/all/e1000e/0062-e1000e-enable-disable-ASPM-L0s-and-L1-and-ERT-accord.patch
++ features/all/e1000e/0063-e1000e-PCI-PM-Add-basic-runtime-PM-support-rev.-4.patch
++ features/all/e1000e/0064-e1000e-Fix-build-with-CONFIG_PM-disabled.patch
++ features/all/e1000e/0065-Net-e1000e-Fix-build-issue-introduced-by-runtime-PM-.patch
++ features/all/e1000e/0066-e1000e-Disable-run-time-PM-support.patch
++ features/all/e1000e/0067-e1000e-do-not-modify-tx_queue_len-on-link-speed-chan.patch
++ features/all/e1000e/0068-e1000e-Use-pr_-level-and-netdev_-level.patch
++ features/all/e1000e/0069-include-cleanup-Update-gfp.h-and-slab.h-includes-to-.patch
++ features/all/e1000e/0070-e1000e-typo-corrections.patch
++ features/all/e1000e/0071-e1000e-use-static-params-to-save-stack-space.patch
++ features/all/e1000e/0072-drivers-net-use-skb_headlen.patch
++ features/all/e1000e/0073-e1000e-use-DMA-API-instead-of-PCI-DMA-functions.patch
++ features/all/e1000e/0074-e1000e-add-registers-etc.-printout-code-just-before-.patch
++ features/all/e1000e/0075-e1000e-Fix-oops-caused-by-ASPM-patch.patch
++ features/all/e1000e/0076-e1000e-save-skb-counts-in-TX-to-avoid-cache-misses.patch
++ features/all/e1000e/0077-e1000e-reduce-writes-of-RX-producer-ptr.patch
++ features/all/e1000e/0078-e1000e-Remove-unnessary-log-message.patch
++ features/all/e1000e/0079-e1000e-Save-irq-into-netdev-structure.patch
++ features/all/e1000e/0080-e1000e-increase-rx-fifo-size-to-36K-on-82574-and-825.patch
++ features/all/e1000e/0081-e1000-e1000e-implement-a-simple-interrupt-moderation.patch
++ features/all/e1000e/0082-e1000e-reset-MAC-PHY-interconnect-on-82577-82578-dur.patch
++ features/all/e1000e/0083-e1000e-use-static-params-to-save-stack-space-part-2.patch
++ features/all/e1000e/0084-e1000e-bad-state-after-running-ethtool-diagnostics-w.patch
++ features/all/e1000e/0085-e1000e-initialize-manageability-IPMI-pass-through-in.patch
++ features/all/e1000e/0086-e1000e-s-w-initiated-LSC-MSI-X-interrupts-not-genera.patch
++ features/all/e1000e/0087-e1000e-cleanup-multiple-common-exit-points.patch
++ features/all/e1000e/0088-e1000e-Remove-EN_MAC_ADDR_FILTER-check-from-enable_m.patch
++ features/all/e1000e/0089-e1000e-Cleanup-e1000_sw_lcd_config_ich8lan.patch
++ features/all/e1000e/0090-e1000e-Incorrect-function-pointer-set-for-force_spee.patch
++ features/all/e1000e/0091-e1000e-fix-checks-for-manageability-enabled-and-mana.patch
++ features/all/e1000e/0092-e1000e-move-settting-of-flow-control-refresh-timer-t.patch
++ features/all/e1000e/0093-e1000e-Fix-cleanup-PHY-reset-code-for-ICHx-PCHx.patch
++ features/all/e1000e/0094-e1000e-add-PCI-device-id-to-enable-support-for-82567.patch
++ features/all/e1000e/0095-drivers-net-Remove-unnecessary-returns-from-void-fun.patch
++ features/all/e1000e/0096-e1000e-change-logical-negate-to-bitwise.patch
++ features/all/e1000e/0097-e1000e-cleanup-ethtool-loopback-setup-code.patch
++ features/all/e1000e/0098-e1000e-cleanup-e1000_sw_lcd_config_ich8lan.patch
++ features/all/e1000e/0099-e1000e-separate-out-PHY-statistics-register-updates.patch
++ features/all/e1000e/0100-e1000e-fix-check-for-manageability-on-ICHx-PCH.patch
++ features/all/e1000e/0101-e1000e-initial-support-for-82579-LOMs.patch
++ features/all/e1000e/0102-e1000e-enable-support-for-EEE-on-82579.patch
++ features/all/e1000e/0103-e1000e-update-copyright-information.patch
++ features/all/e1000e/0104-e1000e-update-driver-version-number.patch
++ features/all/e1000e/0105-e1000e-avoid-polling-h-w-registers-during-link-negot.patch
++ features/all/e1000e/0106-e1000e-do-not-touch-PHY-page-800-registers-when-link.patch
++ features/all/e1000e/0107-e1000e-packet-split-should-not-be-used-with-early-re.patch
++ features/all/e1000e/0108-e1000e-disable-gig-speed-when-in-S0-Sx-transition.patch
++ features/all/e1000e/0109-e1000e-fail-when-try-to-setup-unsupported-features.patch
++ features/all/e1000e/0110-e1000e-suppress-compile-warnings-on-certain-archs.patch
++ features/all/e1000e/0111-e1000e-remove-EEE-module-parameter.patch
++ features/all/e1000e/0112-e1000e-disable-EEE-support-by-default.patch
++ features/all/e1000e/0113-e1000e-Drop-a-useless-statement.patch
++ features/all/e1000e/0114-e1000e-Fix-irq_synchronize-in-MSI-X-case.patch
++ features/all/e1000e/0115-e1000e-correct-MAC-PHY-interconnect-register-offset-.patch
++ features/all/e1000e/0116-e100-e1000-igb-ixgb-Add-missing-read-memory-barrier.patch
++ features/all/e1000e/0117-e1000e-disable-ASPM-L1-on-82573.patch
++ features/all/e1000e/0118-e1000e-don-t-check-for-alternate-MAC-addr-on-parts-t.patch
++ features/all/e1000e/0119-drivers-net-Remove-address-use-from-assignments-of-f.patch
++ features/all/e1000e/0120-e1000e-Simplify-MSI-interrupt-testing.patch
++ features/all/e1000e/0121-e1000e-82577-8-9-issues-with-device-in-Sx.patch
++ features/all/e1000e/0122-e1000e-82579-SMBus-address-and-LEDs-incorrect-after-.patch
++ features/all/e1000e/0123-e1000e-82566DC-fails-to-get-link.patch
++ features/all/e1000e/0124-e1000e-82579-unaccounted-missed-packets.patch
++ features/all/e1000e/0125-e1000e-82579-jumbo-frame-workaround-causing-CRC-erro.patch
++ features/all/e1000e/0126-e1000e-82579-do-not-gate-auto-config-of-PHY-by-hardw.patch
++ features/all/e1000e/0127-e1000-e1000e-igb-ixgb-ixgbe-set-NETIF_F_HIGHDMA-for-.patch
++ features/all/e1000e/0128-drivers-net-return-operator-cleanup.patch
++ features/all/e1000e/0129-e1000e-use-hardware-writeback-batching.patch
++ features/all/e1000e/0130-e1000e-Fix-for-offline-diag-test-failure-at-first-ca.patch
++ features/all/e1000e/0131-e1000e-82579-performance-improvements.patch
++ features/all/e1000e/0132-e1000e-reset-PHY-after-errors-detected.patch
++ features/all/e1000e/0133-e1000e-Add-check-for-reset-flags-before-displaying-r.patch
++ features/all/e1000e/0134-tree-wide-fix-comment-printk-typos.patch
++ features/all/e1000e/0135-drivers-net-e1000e-Remove-unnecessary-semicolons.patch
++ features/all/e1000e/0136-e1000e-82571-SerDes-link-handle-null-code-word-from-.patch
++ features/all/e1000e/0137-e1000e-82574-intermittently-fails-to-initialize-with.patch
++ features/all/e1000e/0138-e1000e-add-netpoll-support-for-MSI-MSI-X-IRQ-modes.patch
++ features/all/e1000e/0139-Intel-Wired-LAN-drivers-Use-static-const.patch
++ features/all/e1000e/0140-e1000e-fix-double-initialization-in-blink-path.patch
++ features/all/e1000e/0141-e1000e-82571-based-mezzanine-card-can-fail-ethtool-l.patch
++ features/all/e1000e/0142-e1000e-82574-82583-performance-improvement.patch
++ features/all/e1000e/0143-e1000e-82577-8-must-acquire-h-w-semaphore-before-wor.patch
++ features/all/e1000e/0144-e1000e-82571-Serdes-can-fail-to-get-link.patch
++ features/all/e1000e/0145-e1000e-82577-8-9-mis-configured-OEM-bits-during-S0-S.patch
++ features/all/e1000e/0146-e1000e-82579-PHY-incorrectly-identified-during-init.patch
++ features/all/e1000e/0147-e1000e-support-new-PBA-format-from-EEPROM.patch
++ features/all/e1000e/0148-e1000e-prevent-null-ptr-dereference-in-e1000_tx_queu.patch
++ features/all/e1000e/0149-e1000e-minor-error-message-corrections.patch
++ features/all/e1000e/0150-e1000e-static-analysis-tools-complain-of-a-possible-.patch
++ features/all/e1000e/0151-e1000e-increment-the-driver-version.patch
++ features/all/e1000e/0152-e1000e-checkpatch-error-macro-panethesis.patch
++ features/all/e1000e/0153-e1000e-checkpatch-error-trailing-statements.patch
++ features/all/e1000e/0154-e1000e-checkpatch-error-open-braces.patch
++ features/all/e1000e/0155-e1000e-checkpatch-warnings-braces.patch
++ features/all/e1000e/0156-e1000e-cleanup-variables-set-but-not-used.patch
++ features/all/e1000e/0157-e1000e-convert-calls-of-ops.-read-write-_reg-to-e1e_.patch
++ features/all/e1000e/0158-e1000e-properly-bounds-check-string-functions.patch
++ features/all/e1000e/0159-e1000e-use-either_crc_le-rather-than-re-write-it.patch
++ features/all/e1000e/0160-e1000e-power-off-PHY-after-reset-when-interface-is-d.patch
++ features/all/e1000e/0161-e1000e-add-custom-set_d-0-3-_lplu_state-function-poi.patch
++ features/all/e1000e/0162-e1000e-update-Copyright-for-2011.patch
++ features/all/e1000e/0163-e1000e-consistent-use-of-Rx-Tx-vs.-RX-TX-rx-tx-in-co.patch
++ features/all/e1000e/0164-e1000e-tx_timeout-should-not-increment-for-non-hang-.patch
++ features/all/e1000e/0165-e1000e-check-down-flag-in-tasks.patch
++ features/all/e1000e/0166-e1000e-flush-all-writebacks-before-unload.patch
++ features/all/e1000e/0167-e1000e-alternate-mac-address-does-not-work-on-device-id-0x1060.patch
++ features/all/igb/0001-net-Convert-ethtool-get_stats-self_test-_count-ops-t.patch
++ features/all/igb/0002-igb-remove-unused-temp-variable-from-stats-clearing-.patch
++ features/all/igb/0003-igb-update-comments-for-serdes-config-and-update-to-.patch
++ features/all/igb/0004-igb-update-the-approach-taken-to-acquiring-and-relea.patch
++ features/all/igb/0005-igb-add-locking-to-reads-of-the-i2c-interface.patch
++ features/all/igb/0006-igb-add-combined-function-for-setting-rar-and-pool-b.patch
++ features/all/igb/0007-igb-make-use-of-the-uta-to-allow-for-promiscous-mode.patch
++ features/all/igb/0008-igb-add-function-to-handle-mailbox-lock.patch
++ features/all/igb/0009-igb-fix-a-few-items-where-weren-t-correctly-setup-fo.patch
++ features/all/igb/0010-igb-remove-microwire-support-from-igb.patch
++ features/all/igb/0011-igb-move-the-generic-copper-link-setup-code-into-e10.patch
++ features/all/igb/0012-igb-add-code-to-retry-a-phy-read-in-the-event-of-fai.patch
++ features/all/igb/0013-igb-add-additional-error-handling-to-the-phy-code.patch
++ features/all/igb/0014-igb-add-flushes-between-RAR-writes-when-setting-mac-.patch
++ features/all/igb/0015-igb-Use-the-instance-of-net_device_stats-from-net_de.patch
++ features/all/igb/0016-net-Use-netdev_alloc_skb_ip_align.patch
++ features/all/igb/0017-igb-Fix-erroneous-display-of-stats-by-ethtool-S.patch
++ features/all/igb/0018-igb-add-new-data-structure-for-handling-interrupts-a.patch
++ features/all/igb/0019-igb-remove-rx-checksum-good-counter.patch
++ features/all/igb/0020-igb-increase-minimum-rx-buffer-size-to-1K.patch
++ features/all/igb/0021-igb-move-the-tx-and-rx-ring-specific-config-into-sep.patch
++ features/all/igb/0022-igb-remove-rx_ps_hdr_len.patch
++ features/all/igb/0023-igb-move-SRRCTL-register-configuration-into-ring-spe.patch
++ features/all/igb/0024-igb-change-the-head-and-tail-offsets-into-pointers.patch
++ features/all/igb/0025-igb-add-pci-device-pointer-to-ring-structure.patch
++ features/all/igb/0026-igb-move-rx_buffer_len-into-the-ring-structure.patch
++ features/all/igb/0027-igb-move-alloc_failed-and-csum_err-stats-into-per-rx.patch
++ features/all/igb/0028-igb-add-a-flags-value-to-the-ring.patch
++ features/all/igb/0029-igb-place-a-pointer-to-the-netdev-struct-in-the-ring.patch
++ features/all/igb/0030-igb-move-the-multiple-receive-queue-configuration-in.patch
++ features/all/igb/0031-igb-delay-VF-reset-notification-until-after-interrup.patch
++ features/all/igb/0032-igb-setup-vlan-tag-replication-stripping-in-igb_vmm_.patch
++ features/all/igb/0033-igb-re-use-ring-configuration-code-in-ethtool-testin.patch
++ features/all/igb/0034-igb-make-tx-ring-map-and-free-functionality-non-stat.patch
++ features/all/igb/0035-igb-make-ethtool-use-core-xmit-map-and-free-function.patch
++ features/all/igb/0036-igb-add-single-vector-msi-x-testing-to-interrupt-tes.patch
++ features/all/igb/0037-igb-cleanup-todo-code-found-in-igb_ethtool.c.patch
++ features/all/igb/0038-igb-add-support-for-seperate-tx-usecs-setting-in-eth.patch
++ features/all/igb/0039-igb-cleanup-some-of-the-code-related-to-hw-timestamp.patch
++ features/all/igb/0040-igb-misc-cleanups-within-igb_ethtool.c.patch
++ features/all/igb/0041-igb-use-packet-buffer-sizes-from-RXPBS-register.patch
++ features/all/igb/0042-igb-replace-the-VF-clear_to_send-with-a-flags-value.patch
++ features/all/igb/0043-igb-rework-use-of-VMOLR-in-regards-to-PF-and-VFs.patch
++ features/all/igb/0044-igb-rework-handling-of-the-vfta-and-vlvf-registers-i.patch
++ features/all/igb/0045-igb-move-vf-init-into-a-seperate-function.patch
++ features/all/igb/0046-igb-only-process-global-stats-in-igb_update_stats.patch
++ features/all/igb/0047-igb-move-global_quad_port_a-from-global-into-local-s.patch
++ features/all/igb/0048-igb-make-tx-hang-check-multiqueue-check-eop-descript.patch
++ features/all/igb/0049-igb-cleanup-code-related-to-ring-resource-allocation.patch
++ features/all/igb/0050-igb-change-queue-ordering-for-82576-based-adapters.patch
++ features/all/igb/0051-igb-cleanup-interrupt-enablement-in-regards-to-msix_.patch
++ features/all/igb/0052-igb-Remove-invalid-stats-counters.patch
++ features/all/igb/0053-igb-cleanup-igb.h-header-whitespace-and-some-structu.patch
++ features/all/igb/0054-igb-cleanup-igb-xmit-frame-path.patch
++ features/all/igb/0055-igb-cleanup-clean_rx_irq_adv-and-alloc_rx_buffers_ad.patch
++ features/all/igb/0056-igb-replace-unecessary-adapter-hw-with-just-hw-where.patch
++ features/all/igb/0057-igb-add-pci_dev-in-few-spots-to-clean-up-use-of-dev_.patch
++ features/all/igb/0058-igb-limit-minimum-mtu-to-68-to-keep-ip-bound-to-inte.patch
++ features/all/igb/0059-igb-open-up-SCTP-checksum-offloads-to-all-MACs-82576.patch
++ features/all/igb/0060-igb-cleanup-whitespace-issues-in-igb_main.c.patch
++ features/all/igb/0061-igb-Fix-warnings-in-igb_set_ringparam.patch
++ features/all/igb/0062-igb-change-type-for-ring-sizes-to-u16-in-igb_set_rin.patch
++ features/all/igb/0063-igb-move-timesync-init-into-a-seperate-function.patch
++ features/all/igb/0064-igb-when-number-of-CPUs-4-combine-tx-rx-queues-to-al.patch
++ features/all/igb/0065-igb-Rework-how-netdev-stats-is-handled.patch
++ features/all/igb/0066-igb-removed-unused-tx-rx-total-bytes-packets-from-ad.patch
++ features/all/igb/0067-igb-check-for-packets-on-all-tx-rings-when-link-is-d.patch
++ features/all/igb/0068-igb-only-recycle-page-if-it-is-on-our-numa-node.patch
++ features/all/igb/0069-drivers-net-request_irq-Remove-unnecessary-leading-f.patch
++ features/all/igb/0070-igb-add-support-for-the-82580-phy.patch
++ features/all/igb/0071-igb-add-support-for-82580-MAC.patch
++ features/all/igb/0072-igb-Add-full-support-for-82580-devices.patch
++ features/all/igb/0073-igb-remove-use-of-skb_dma_map-from-driver.patch
++ features/all/igb/0074-igbvf-remove-skb_dma_map-unmap-call-from-drivers.patch
++ features/all/igb/0075-drivers-net-Move-and-to-end-of-previous-line.patch
++ features/all/igb/0076-igb-fix-handling-of-mailbox-collisions-between-PF-VF.patch
++ features/all/igb/0077-igbvf-avoid-reset-storms-due-to-mailbox-issues.patch
++ features/all/igb/0078-igb-do-not-force-pcs-link-when-in-KX-mode.patch
++ features/all/igb/0079-igb-do-not-force-retry-count-to-1-on-82580-phy.patch
++ features/all/igb/0080-igb-correctly-offset-82575-flow-control-watermarks-b.patch
++ features/all/igb/0081-igb-check-both-function-bits-in-status-register-in-w.patch
++ features/all/igb/0082-igbvf-Make-igbvf-error-message-more-informative.patch
++ features/all/igb/0083-drivers-net-igbvf-netdev.c-use-pM-to-shown-MAC-addre.patch
++ features/all/igb/0084-drivers-net-use-DEFINE_PCI_DEVICE_TABLE.patch
++ features/all/igb/0085-igb-add-support-for-device-reset-interrupt.patch
++ features/all/igb/0086-e1000-e1000e-igb-igbvf-ixgb-ixgbe-Fix-tests-of-unsig.patch
++ features/all/igb/0087-igb-igbvf-cleanup-exception-handling-in-tx_map_adv.patch
++ features/all/igb/0089-igbvf-fix-issue-w-mapped_as_page-being-left-set-afte.patch
++ features/all/igb/0090-igb-make-certain-to-reassign-legacy-interrupt-vector.patch
++ features/all/igb/0091-tree-wide-Assorted-spelling-fixes.patch
++ features/all/igb/0092-net-use-netdev_mc_count-and-netdev_mc_empty-when-app.patch
++ features/all/igb/0093-igb-remove-unecessary-q_vector-declarations-and-remo.patch
++ features/all/igb/0094-igb-add-support-for-wake-on-link.patch
++ features/all/igb/0095-igb-Report-link-status-in-ethtool-when-interface-is-.patch
++ features/all/igb/0096-igb-ignore-EEPROM-APME-check-when-shutting-down-serd.patch
++ features/all/igb/0097-igb-Power-down-link-when-interface-is-down.patch
++ features/all/igb/0098-igb-call-pci_save_state-after-pci_restore_state.patch
++ features/all/igb/0099-igb-Allocate-rings-seperately-instead-of-as-a-block.patch
++ features/all/igb/0100-igb-remove-adaptive-IFS-from-driver.patch
++ features/all/igb/0101-igb-cap-interrupts-at-20K-per-queue-when-in-itr-mode.patch
++ features/all/igb/0102-igb-only-support-SRRCTL_DROP_EN-when-using-multiple-.patch
++ features/all/igb/0103-igb-only-read-phy-specific-stats-if-in-internal-phy-.patch
++ features/all/igb/0104-igb-inline-igb_maybe_stop_tx.patch
++ features/all/igb/0105-igb-move-gso_segs-into-buffer_info-structure.patch
++ features/all/igb/0106-igb-minor-type-cleanups.patch
++ features/all/igb/0107-igb-remove-unused-vmolr-value.patch
++ features/all/igb/0108-igb-use-igb_free_q_vectors-to-cleanup-failure-in-igb.patch
++ features/all/igb/0109-igb-change-descriptor-control-thresholds.patch
++ features/all/igb/0110-igb-update-tx-DMA-mapping-error-handling.patch
++ features/all/igb/0111-net-convert-multiple-drivers-to-use-netdev_for_each_.patch
++ features/all/igb/0112-igb-Add-support-for-82576-ET2-Quad-Port-Server-Adapt.patch
++ features/all/igb/0113-igb-Do-not-overwrite-mdicnfg-register-when-accessing.patch
++ features/all/igb/0114-igb-cleanup-usage-of-virtualization-registers.patch
++ features/all/igb/0115-igb-add-support-for-Intel-I350-Gigabit-Network-Conne.patch
++ features/all/igb/0116-igb-count-Rx-FIFO-errors-correctly.patch
++ features/all/igb/0117-igb-do-not-modify-tx_queue_len-on-link-speed-change.patch
++ features/all/igb/0118-igbvf-do-not-modify-tx_queue_len-on-link-speed-chang.patch
++ features/all/igb/0119-igb-use-correct-bits-to-identify-if-managability-is-.patch
++ features/all/igb/0120-include-cleanup-Update-gfp.h-and-slab.h-includes-to-.patch
++ features/all/igb/0121-igb-update-hw_debug-macro-to-make-use-of-netdev_dbg-.patch
++ features/all/igb/0122-igb-add-per-packet-timestamping.patch
++ features/all/igb/0123-igb-modify-register-test-for-i350-to-reflect-read-on.patch
++ features/all/igb/0124-igb-restrict-WoL-for-82576-ET2-Quad-Port-Server-Adap.patch
++ features/all/igb/0125-igb-double-increment-nr_frags.patch
++ features/all/igb/0126-igbvf-double-increment-nr_frags.patch
++ features/all/igb/0127-igb-add-support-for-reporting-5GT-s-during-probe-on-.patch
++ features/all/igb/0128-igb-convert-igb-from-using-PCI-DMA-functions-to-usin.patch
++ features/all/igb/0129-igbvf-use-DMA-API-instead-of-PCI-DMA-functions.patch
++ features/all/igb/0130-igb-add-registers-etc.-printout-code-just-before-res.patch
++ features/all/igb/0131-igb-Clean-up-left-over-prototype-of-igb_get_hw_dev_n.patch
++ features/all/igb/0132-igb-reduce-cache-misses-on-tx-cleanup.patch
++ features/all/igb/0133-drivers-net-Remove-unnecessary-returns-from-void-fun.patch
++ features/all/igb/0134-igb-fix-PHY-config-access-on-82580.patch
++ features/all/igb/0135-igb-Use-only-a-single-Tx-queue-in-SR-IOV-mode.patch
++ features/all/igb/0136-igb-Fix-Tx-hangs-seen-when-loading-igb-with-max_vfs-.patch
++ features/all/igb/0137-igb-correct-link-test-not-being-run-when-link-is-dow.patch
++ features/all/igb/0138-igb-Add-comment.patch
++ features/all/igb/0139-igb-drop-support-for-UDP-hashing-w-RSS.patch
++ features/all/igb/0140-ixgbe-igb-catch-invalid-VF-settings.patch
++ features/all/igb/0141-igb-add-support-for-SGMII-based-MDIO-PHYs.patch
++ features/all/igb/0142-igb-restore-EEPROM-values-of-MDICNFG-on-reset-with-8.patch
++ features/all/igb/0143-igbvf-ixgbevf-use-dev_hw_addr_random.patch
++ features/all/igb/0144-igb-Use-irq_synchronize-per-vector-when-using-MSI-X.patch
++ features/all/igb/0145-igb-Program-MDICNFG-register-prior-to-PHY-init.patch
++ features/all/igb/0146-e100-e1000-igb-ixgb-Add-missing-read-memory-barrier.patch
++ features/all/igb/0147-Fix-spelling-fuction-function-in-comments.patch
++ features/all/igb/0148-drivers-net-Remove-address-use-from-assignments-of-f.patch
++ features/all/igb/0149-igb-clear-VF_PROMISC-bits-instead-of-setting-all-oth.patch
++ features/all/igb/0150-igb-Add-support-for-DH89xxCC.patch
++ features/all/igb/0151-e1000-e1000e-igb-ixgb-ixgbe-set-NETIF_F_HIGHDMA-for-.patch
++ features/all/igb/0152-drivers-net-return-operator-cleanup.patch
++ features/all/igb/0153-igb-add-check-for-fiber-serdes-devices-to-igb_set_sp.patch
++ features/all/igb/0154-igbvf-Remove-unneeded-pm_qos-calls.patch
++ features/all/igb/0155-igbvf-fix-panic-on-load.patch
++ features/all/igb/0156-igbvf-Update-version-and-Copyright.patch
++ features/all/igb/0157-igbvf-Remove-some-dead-code-in-igbvf.patch
++ features/all/igb/0158-Intel-Wired-LAN-drivers-Use-static-const.patch
++ features/all/igb/0159-igb-Add-new-function-to-read-part-number-from-EEPROM.patch
++ features/all/igb/0160-igb-Some-fine-tuning.patch
++ features/all/igb/0161-igb-Add-Anti-spoofing-feature-support.patch
++ features/all/igb/0162-igbvf-force-link-checking-when-mailbox-timeout-has-o.patch
++ features/all/igb/0163-igbvf-add-support-for-i350-VF-device.patch
++ features/all/igb/0164-igb-fix-sparse-warning.patch
++ features/all/igb/0165-Revert-r8169-Fix-up-backport-of-r8169-keep-firmware-.patch
++ features/all/igb/0166-igb-Add-support-for-i340-Quad-Port-Fiber-Adapter.patch
++ features/all/igb/0167-igb-Enable-PF-side-of-SR-IOV-support-for-i350-device.patch
++ features/all/igb/0168-igbvf-remove-Tx-hang-detection.patch
++ features/all/igb/0169-igb-Update-Intel-copyright-notice-for-driver-source.patch
++ features/all/igb/0170-igb-update-version-string.patch
++ features/all/igb/0171-net-use-pci_dev-revision-again.patch
++ features/all/igb/0172-igb-warn-if-max_vfs-limit-is-exceeded.patch
++ features/all/igb/0173-igb-Fix-reg-pattern-test-in-ethtool-for-i350-devices.patch
++ features/all/igb/0174-igb-Fix-strncpy-calls-to-be-safe-per-source-code-rev.patch
++ features/all/igb/0175-igb-Add-stats-output-for-OS2BMC-feature-on-i350-devi.patch
++ features/all/igb/0176-igb-Add-Energy-Efficient-Ethernet-EEE-for-i350-devic.patch
++ features/all/igb/0177-igb-Update-NVM-functions-to-work-with-i350-devices.patch
++ features/all/igb/0178-igb-Add-DMA-Coalescing-feature-to-driver.patch
++ features/all/igb/0179-igb-Bump-version-to-3.0.6.patch
++ features/all/igb/0180-igb-fix-hw-timestamping.patch
++ features/all/igb/0181-igb-Add-messaging-for-thermal-sensor-events-on-i350-.patch
++ features/all/igb/0182-Fix-common-misspellings.patch
++ features/all/igb/0183-igb-fix-typo-in-igb_validate_nvm_checksum_82580.patch
++ features/all/igb/0184-igb-introduce-igb_thermal_sensor_event-for-sensor-ch.patch
++ features/all/igb/0185-ethtool-Use-full-32-bit-speed-range-in-ethtool-s-set.patch
++ features/all/igb/0186-ethtool-cosmetic-Use-ethtool-ethtool_cmd_speed-API.patch
++ features/all/igb/0187-net-igb-e1000-e1000e-more-robust-ethtool-duplex-spee.patch
++ features/all/igb/0188-igb-Add-check-for-invalid-size-to-igb_get_invariants.patch
++ features/all/igb/0189-igbvf-remove-bogus-phys_id.patch
++ features/all/igb/0190-Add-appropriate-linux-prefetch.h-include-for-prefetc.patch
++ features/all/igb/0191-igb-fix-i350-SR-IOV-failture.patch
+- bugfix/all/r8169-keep-firmware-in-memory.patch
+- features/all/r8169-remove-the-firmware-of-RTL8111D-2.patch
++ features/all/r8169/0001-net-Use-netdev_alloc_skb_ip_align.patch
++ features/all/r8169/0002-r8169-move-PHY-regs-tables-to-.rodata.patch
++ features/all/r8169/0003-drivers-net-Move-and-to-end-of-previous-line.patch
++ features/all/r8169/0004-const-constify-remaining-dev_pm_ops.patch
++ features/all/r8169/0005-drivers-net-r8169.c-use-pM-to-shown-MAC-address.patch
++ features/all/r8169/0006-drivers-net-use-DEFINE_PCI_DEVICE_TABLE.patch
++ features/all/r8169/0007-net-use-netdev_mc_count-and-netdev_mc_empty-when-app.patch
++ features/all/r8169/0008-drivers-net-r8196.c-Use-netif_printk-macros.patch
++ features/all/r8169/0009-net-convert-multiple-drivers-to-use-netdev_for_each_.patch
++ features/all/r8169/0010-r8169-PCI-PM-Add-simplified-runtime-PM-support-rev.-.patch
++ features/all/r8169/0011-r8169-Fix-rtl8169_rx_interrupt.patch
++ features/all/r8169/0012-r8169-failure-to-enable-mwi-should-not-be-fatal.patch
++ features/all/r8169/0013-r8169-remove-unnecessary-cast-of-readl-s-return-valu.patch
++ features/all/r8169/0014-r8169-incorrect-identifier-for-a-8168dp.patch
++ features/all/r8169/0015-r8169-fix-rx-checksum-offload.patch
++ features/all/r8169/0016-r8169-add-gro-support.patch
++ features/all/r8169/0017-r8169-use-device-model-DMA-API.patch
++ features/all/r8169/0018-r8169-use-50-less-ram-for-RX-ring.patch
++ features/all/r8169/0019-r8169-check-dma-mapping-failures.patch
++ features/all/r8169/0020-r8169-init-rx-ring-cleanup.patch
++ features/all/r8169/0021-r8169-replace-PCI_DMA_-TO-FROM-DEVICE-to-DMA_-TO-FRO.patch
++ features/all/r8169/0022-r8169-use-pointer-to-struct-device-as-local-variable.patch
++ features/all/r8169/0023-r8169-do-not-account-fragments-as-packets.patch
++ features/all/r8169/0024-r8169-changing-mtu-clean-up.patch
++ features/all/r8169/0025-r8169-re-init-phy-on-resume.patch
++ features/all/r8169/0026-r8169-print-errors-when-dma-mapping-fail.patch
++ features/all/r8169/0027-r8169-fix-sleeping-while-holding-spinlock.patch
++ features/all/r8169/0028-r8169-fix-checksum-broken.patch
++ features/all/r8169/0029-r8169-Fix-runtime-power-management.patch
++ features/all/r8169/0030-drivers-net-.c-Use-static-const.patch
++ features/all/r8169/0031-r8169-remove-the-firmware-of-RTL8111D.patch
++ features/all/r8169/0032-r8169-identify-different-registers.patch
++ features/all/r8169/0033-r8169-use-device-dependent-methods-to-access-the-MII.patch
++ features/all/r8169/0034-r8169-8168DP-specific-MII-registers-access-methods.patch
++ features/all/r8169/0035-r8169-phy-power-ops.patch
++ features/all/r8169/0036-r8169-magic.patch
++ features/all/r8169/0037-r8169-rtl_csi_access_enable-rename.patch
++ features/all/r8169/0038-r8169-more-8168dp-support.patch
++ features/all/r8169/0039-r8169-delay-phy-init-until-device-opens.patch
++ features/all/r8169/0040-net-r8169-Update-the-function-of-parsing-firmware.patch
++ features/all/r8169/0041-r8169-keep-firmware-in-memory.patch
++ features/all/r8169/0042-r8169-RxFIFO-overflow-oddities-with-8168-chipsets.patch
++ features/all/r8169/0043-r8169-prevent-RxFIFO-induced-loops-in-the-irq-handle.patch
++ features/all/r8169/0044-drivers-net-Call-netif_carrier_off-at-the-end-of-the.patch
++ features/all/r8169/0045-r8169-fix-incorrect-args-to-oob-notify.patch
++ features/all/r8169/0046-r8169-correct-settings-of-rtl8102e.patch
++ features/all/r8169/0047-r8169-fix-RTL8168DP-power-off-issue.patch
++ features/all/r8169/0048-r8169-adjust-rtl8169_set_speed_xmii-function.patch
++ features/all/r8169/0049-r8169-support-the-new-chips-for-RTL8105E.patch
++ features/all/r8169/0050-r8169-support-control-of-advertising.patch
++ features/all/r8169/0051-r8169-fix-a-bug-in-rtl8169_init_phy.patch
++ features/all/r8169/0052-net-r8169-add-a-new-chip-for-RTL8105.patch
++ features/all/r8169/0053-net-r8169-add-a-new-chip-for-RTL8168DP.patch
++ features/all/r8169/0054-net-r8169-support-RTL8168E.patch
++ features/all/r8169/0055-r8169-TSO-fixes.patch
++ features/all/r8169/0056-r8169-don-t-request-firmware-when-there-s-no-userspa.patch
++ features/all/r8169/0057-r8169-fix-merge-conflict-fix.patch
++ features/all/r8169/0058-ethtool-Use-full-32-bit-speed-range-in-ethtool-s-set.patch
++ features/all/r8169/0059-ethtool-cosmetic-Use-ethtool-ethtool_cmd_speed-API.patch
++ features/all/r8169/0060-r8169-style-cleanups.patch
++ features/all/r8169/0061-r8169-remove-some-code-duplication.patch
++ features/all/r8169/0062-r8169-rtl8169_set_speed_xmii-cleanup.patch
++ features/all/r8169/0063-r8169-link-speed-selection-timer-rework.patch
++ features/all/r8169/0064-r8169-remove-non-NAPI-context-invocation-of-rtl8169_.patch
++ features/all/r8169/0065-r8169-provide-some-firmware-information-via-ethtool.patch
++ features/all/r8169/0066-r8169-merge-firmware-information-into-the-chipset-de.patch
++ features/all/r8169/0067-r8169-avoid-late-chip-identifier-initialisation.patch
++ features/all/r8169/0068-Add-appropriate-linux-prefetch.h-include-for-prefetc.patch
++ features/all/r8169/0069-r8169-fix-static-initializers.patch
++ features/all/r8169/0070-r8169-fix-wrong-register-use.patch
++ features/all/r8169/0071-r8169-Add-support-for-D-Link-530T-rev-C1-Kernel-Bug-.patch
++ features/all/tg3/0001-Revert-tg3-Fix-5906-transmit-hangs.patch
++ features/all/tg3/0002-tg3-Assign-flags-to-fixes-in-start_xmit_dma_bug.patch
++ features/all/tg3/0003-tg3-Fix-5906-transmit-hangs.patch
++ features/all/tg3/0004-tg3-Fix-disappearing-57780-devices.patch
++ features/all/tg3/0005-tg3-Convert-PHY_ADDR-TG3_PHY_MII_ADDR.patch
++ features/all/tg3/0006-tg3-Prevent-a-PCIe-tx-glitch.patch
++ features/all/tg3/0007-tg3-Add-more-PCI-DMA-map-error-checking.patch
++ features/all/tg3/0008-tg3-Improve-5785-PCIe-performance.patch
++ features/all/tg3/0009-tg3-Add-AC131-power-down-support.patch
++ features/all/tg3/0010-tg3-5785-Set-port-mode-to-MII-when-link-down.patch
++ features/all/tg3/0011-tg3-Extend-loopback-test-timeout.patch
++ features/all/tg3/0012-broadcom-Isolate-phy-dsp-accesses.patch
++ features/all/tg3/0013-broadcom-Fix-slow-link-problem.patch
++ features/all/tg3/0014-tg3-Add-50610M-phy-ID-for-5785.patch
++ features/all/tg3/0015-broadcom-Consolidate-dev_flags-definitions.patch
++ features/all/tg3/0016-tg3-broadcom-Add-PHY_BRCM_CLEAR_RGMII_MODE-flag.patch
++ features/all/tg3/0017-tg3-broadcom-Refine-AC131-APD-support.patch
++ features/all/tg3/0018-tg3-broadcom-Add-code-to-disable-rxc-refclk.patch
++ features/all/tg3/0019-tg3-broadcom-Add-APD-support-for-GPHYs.patch
++ features/all/tg3/0020-tg3-broadcom-Optionally-disable-TXC-if-no-link.patch
++ features/all/tg3/0021-tg3-Update-version-to-3.103.patch
++ features/all/tg3/0022-tg3-Add-5717-phy-ID.patch
++ features/all/tg3/0023-tg3-Don-t-touch-RCB-nic-addresses.patch
++ features/all/tg3/0024-tg3-Napify-tg3_start_xmit_dma_bug.patch
++ features/all/tg3/0025-tg3-Move-TG3_FLG2_PROTECTED_NVRAM-to-tg3_flags3.patch
++ features/all/tg3/0026-tg3-Refine-TSO-and-MSI-discovery.patch
++ features/all/tg3/0027-tg3-Add-new-HW_TSO_3-flag-for-5717.patch
++ features/all/tg3/0028-tg3-Use-tg3_start_xmit_dma_bug-for-5717-A0.patch
++ features/all/tg3/0029-tg3-Allow-DMAs-to-cross-cacheline-boundaries.patch
++ features/all/tg3/0030-tg3-Create-tg3_poll_msix-for-non-zero-MSIX-vecs.patch
++ features/all/tg3/0031-tg3-Move-napi_add-calls-below-tg3_get_invariants.patch
++ features/all/tg3/0032-tg3-Make-tg3_alloc_rx_skb-a-dst-only-operation.patch
++ features/all/tg3/0033-tg3-Add-prodring-parameter-to-tg3_alloc_rx_skb.patch
++ features/all/tg3/0034-tg3-tg3_alloc_rx_skb-tnapi-tp.patch
++ features/all/tg3/0035-tg3-rename-rx_-std-jmb-_ptr.patch
++ features/all/tg3/0036-tg3-Consider-rx_std_prod_idx-a-hw-mailbox.patch
++ features/all/tg3/0037-tg3-Lay-proucer-ring-handling-groundwork.patch
++ features/all/tg3/0038-tg3-Create-aliases-for-rx-producer-mailbox-regs.patch
++ features/all/tg3/0039-tg3-Add-rx-prod-ring-consolidation.patch
++ features/all/tg3/0040-tg3-Fix-DIDs-Enable-5717-support.patch
++ features/all/tg3/0041-tg3-Update-version-to-3.104.patch
++ features/all/tg3/0042-tg3-remove-use-of-skb_dma_map-unmap.patch
++ features/all/tg3/0043-drivers-net-Move-and-to-end-of-previous-line.patch
++ features/all/tg3/0044-tg3-Make-TSS-enable-independent-of-MSI-X-enable.patch
++ features/all/tg3/0045-tg3-Add-57765-asic-rev.patch
++ features/all/tg3/0046-tg3-Add-some-VPD-preprocessor-constants.patch
++ features/all/tg3/0047-tg3-Use-pci_read_vpd-instead-of-private-method.patch
++ features/all/tg3/0048-tg3-Clean-tg3_init_one.patch
++ features/all/tg3/0049-tg3-Update-version-to-3.105.patch
++ features/all/tg3/0050-broadcom-bcm54xx_shadow_read-errors-ignored-in-bcm54.patch
++ features/all/tg3/0051-broadcom-Fix-confusion-in-bcm54xx_adjust_rxrefclk.patch
++ features/all/tg3/0052-drivers-net-use-DEFINE_PCI_DEVICE_TABLE.patch
++ features/all/tg3/0053-tg3-Fix-std-prod-ring-nicaddr-for-5787-and-57765.patch
++ features/all/tg3/0054-tg3-Fix-std-rx-prod-ring-handling.patch
++ features/all/tg3/0055-tg3-Add-reliable-serdes-detection-for-5717-A0.patch
++ features/all/tg3/0056-tg3-Disable-5717-serdes-and-B0-support.patch
++ features/all/tg3/0057-tg3-Update-copyright-and-driver-version.patch
++ features/all/tg3/0058-tg3-Enable-PLL-PD-when-CLKREQ-disabled-for-5717A0.patch
++ features/all/tg3/0059-tg3-Improve-internal-resource-allocations.patch
++ features/all/tg3/0060-tg3-Add-5717-serdes-phy-ID.patch
++ features/all/tg3/0061-tg3-Abort-phy-init-for-5717-serdes-devices.patch
++ features/all/tg3/0062-tg3-Fix-5717-and-57765-memory-selftests.patch
++ features/all/tg3/0063-tg3-Supply-a-nicaddr-for-57765-jumbo-RCB.patch
++ features/all/tg3/0064-tg3-Fix-tx-mailbox-initialization.patch
++ features/all/tg3/0065-tg3-Turn-off-the-debug-UART-for-57765.patch
++ features/all/tg3/0066-tg3-Bypass-power-source-switching-for-57765.patch
++ features/all/tg3/0067-tg3-Add-57765-phy-ID-and-enable-devices.patch
++ features/all/tg3/0068-net-use-netdev_mc_count-and-netdev_mc_empty-when-app.patch
++ features/all/tg3/0069-tg3-Give-MSI-X-vec-1-rx-backlog-space.patch
++ features/all/tg3/0070-tg3-Prevent-rx-producer-ring-overruns.patch
++ features/all/tg3/0071-tg3-Unwedge-stuck-MSI-X-vectors.patch
++ features/all/tg3/0072-tg3-Fix-57765-A0-bootcode-race-condition.patch
++ features/all/tg3/0073-tg3-Turn-off-multiple-DMA-reads-for-5717.patch
++ features/all/tg3/0074-tg3-Reset-phy-during-bringup-when-using-phylib.patch
++ features/all/tg3/0075-tg3-Fix-napi-assignments-in-loopback-test.patch
++ features/all/tg3/0076-tg3-Fix-AC131-loopback-test-errors-for-5785.patch
++ features/all/tg3/0077-tg3-Update-driver-version-to-3.107.patch
++ features/all/tg3/0078-tg3-Enforce-DMA-mapping-skb-assignment-ordering.patch
++ features/all/tg3/0079-tg3-Make-57791-and-57795-10-100-only.patch
++ features/all/tg3/0080-tg3-Add-support-for-2-new-selfboot-formats.patch
++ features/all/tg3/0081-tg3-Allow-phylib-flowctrl-changes-anytime.patch
++ features/all/tg3/0082-tg3-Add-more-partno-entries-for-fallback-path.patch
++ features/all/tg3/0083-tg3-Rename-TG3_FLG3_RGMII_STD_IBND_DISABLE.patch
++ features/all/tg3/0084-tg3-Reduce-indent-level-of-tg3_rx_prodring_alloc.patch
++ features/all/tg3/0085-tg3-Discover-phy-address-once.patch
++ features/all/tg3/0086-tg3-Reformat-SSID-to-phy-ID-table.patch
++ features/all/tg3/0087-tg3-Rename-tg3-phy-ID-preprocessor-definitions.patch
++ features/all/tg3/0088-tg3-Push-phylib-definitions-to-phylib.patch
++ features/all/tg3/0089-tg3-Update-version-to-3.108.patch
++ features/all/tg3/0090-drivers-net-tg3.c-Use-pr-netdev-_-level-macro-helper.patch
++ features/all/tg3/0091-net-convert-multiple-drivers-to-use-netdev_for_each_.patch
++ features/all/tg3/0092-pci-Add-PCI-LRDT-tag-size-and-section-size.patch
++ features/all/tg3/0093-pci-Add-large-and-small-resource-data-type-code.patch
++ features/all/tg3/0094-pci-Add-helper-to-find-a-VPD-resource-data-type.patch
++ features/all/tg3/0095-pci-Add-VPD-information-field-helper-functions.patch
++ features/all/tg3/0096-pci-Add-helper-to-search-for-VPD-keywords.patch
++ features/all/tg3/0097-tg3-Remove-now-useless-VPD-code.patch
++ features/all/tg3/0098-drivers-net-tg3.c-change-the-field-used-with-the-TG3.patch
++ features/all/tg3/0099-tg3-Fix-MII_TG3_DSP_EXP8-offset.patch
++ features/all/tg3/0100-tg3-Restore-likely-check-in-tg3_poll_msix.patch
++ features/all/tg3/0101-tg3-Replace-pr_err-with-sensible-alternatives.patch
++ features/all/tg3/0102-tg3-netdev_err-dev_err.patch
++ features/all/tg3/0103-tg3-Fix-message-80-char-violations.patch
++ features/all/tg3/0104-tg3-Prepare-FW-version-code-for-VPD-versioning.patch
++ features/all/tg3/0105-tg3-Use-VPD-fw-version-when-present.patch
++ features/all/tg3/0106-tg3-Whitespace-constant-and-comment-updates.patch
++ features/all/tg3/0107-tg3-The-case-of-switches.patch
++ features/all/tg3/0108-tg3-Cleanup-if-codestyle.patch
++ features/all/tg3/0109-tg3-Remove-tg3_dump_state.patch
++ features/all/tg3/0110-tg3-Update-version-to-3.109.patch
++ features/all/tg3/0111-tg3-Disable-CLKREQ-in-L2.patch
++ features/all/tg3/0112-tg3-Set-card-57765-card-reader-MRRS-to-1024B.patch
++ features/all/tg3/0113-tg3-Reduce-57765-core-clock-when-link-at-10Mbps.patch
++ features/all/tg3/0114-tg3-Optimize-rx-double-copy-test.patch
++ features/all/tg3/0115-tg3-Re-inline-VLAN-tags-when-appropriate.patch
++ features/all/tg3/0116-tg3-Unify-max-pkt-size-preprocessor-constants.patch
++ features/all/tg3/0117-tg3-Remove-function-errors-flagged-by-checkpatch.patch
++ features/all/tg3/0118-tg3-Update-version-to-3.110.patch
++ features/all/tg3/0119-tg3-use-the-DMA-state-API-instead-of-the-pci-equival.patch
++ features/all/tg3/0120-tg3-Enable-GRO-by-default.patch
++ features/all/tg3/0121-tg3-Relocate-APE-mutex-regs-for-5717.patch
++ features/all/tg3/0122-tg3-Avoid-tx-lockups-on-5755-devices.patch
++ features/all/tg3/0123-tg3-Fix-a-memory-leak-on-5717-devices.patch
++ features/all/tg3/0124-tg3-Allow-single-MSI-X-vector-allocations.patch
++ features/all/tg3/0125-tg3-5717-Allow-serdes-link-via-parallel-detect.patch
++ features/all/tg3/0126-tg3-Use-devfn-to-determine-function-number.patch
++ features/all/tg3/0127-tg3-Add-5719-ASIC-rev.patch
++ features/all/tg3/0128-tg3-Add-5719-PCI-device-and-phy-IDs.patch
++ features/all/tg3/0129-tg3-Update-version-to-3.111.patch
++ features/all/tg3/0130-broadcom-move-all-PHY_ID-s-to-header.patch
++ features/all/tg3/0131-broadcom-Add-5241-support.patch
++ features/all/tg3/0132-tg3-allow-TSO-on-vlan-devices.patch
++ features/all/tg3/0133-tg3-Fix-single-MSI-X-vector-coalescing.patch
++ features/all/tg3/0134-tg3-Fix-IPv6-TSO-code-in-tg3_start_xmit_dma_bug.patch
++ features/all/tg3/0135-tg3-Relax-5717-serdes-restriction.patch
++ features/all/tg3/0136-tg3-Report-driver-version-to-firmware.patch
++ features/all/tg3/0137-tg3-Revert-PCIe-tx-glitch-fix.patch
++ features/all/tg3/0138-tg3-Fix-some-checkpatch-errors.patch
++ features/all/tg3/0139-tg3-Update-version-to-3.112.patch
++ features/all/tg3/0140-tg3-Add-5784-ASIC-rev-to-earlier-PCIe-MPS-fix.patch
++ features/all/tg3/0141-tg3-Disable-TSS-also-during-tg3_close.patch
++ features/all/tg3/0142-tg3-Create-TG3_FLG3_5717_PLUS-flag.patch
++ features/all/tg3/0143-tg3-Don-t-access-phy-test-ctrl-reg-for-5717.patch
++ features/all/tg3/0144-tg3-Manage-gphy-power-for-CPMU-less-devs-only.patch
++ features/all/tg3/0145-tg3-Restrict-ASPM-workaround-devlist.patch
++ features/all/tg3/0146-tg3-Detect-APE-firmware-types.patch
++ features/all/tg3/0147-tg3-Remove-5720-5750-and-5750M.patch
++ features/all/tg3/0148-tg3-Improve-small-packet-performance.patch
++ features/all/tg3/0149-tg3-Add-error-reporting-to-tg3_phydsp_write.patch
++ features/all/tg3/0150-tg3-Add-phy-related-preprocessor-constants.patch
++ features/all/tg3/0151-tg3-Create-phy_flags-and-migrate-phy_is_low_power.patch
++ features/all/tg3/0152-tg3-Migrate-tg3_flags-to-phy_flags.patch
++ features/all/tg3/0153-tg3-Update-version-to-3.113.patch
++ features/all/tg3/0154-tg3-Fix-read-DMA-FIFO-overruns-on-recent-devices.patch
++ features/all/tg3/0155-tg3-Disable-TSS.patch
++ features/all/tg3/0156-tg3-Don-t-send-APE-events-for-NCSI-firmware.patch
++ features/all/tg3/0157-tg3-Unlock-5717-B0-support.patch
++ features/all/tg3/0158-tg3-Clarify-semantics-of-TG3_IRQ_MAX_VECS.patch
++ features/all/tg3/0159-tg3-Move-producer-ring-struct-to-tg3_napi.patch
++ features/all/tg3/0160-tg3-Use-skb_is_gso_v6.patch
++ features/all/tg3/0161-tg3-Dynamically-allocate-VPD-data-memory.patch
++ features/all/tg3/0162-tg3-phy-tmp-variable-roundup.patch
++ features/all/tg3/0163-drivers-net-return-operator-cleanup.patch
++ features/all/tg3/0164-tg3-Fix-potential-netpoll-crash.patch
++ features/all/tg3/0165-tg3-5719-Prevent-tx-data-corruption.patch
++ features/all/tg3/0166-tg3-Remove-5724-device-ID.patch
++ features/all/tg3/0167-tg3-Cleanup-missing-VPD-partno-section.patch
++ features/all/tg3/0168-tg3-Futureproof-the-loopback-test.patch
++ features/all/tg3/0169-tg3-Prepare-for-larger-rx-ring-sizes.patch
++ features/all/tg3/0170-tg3-Add-extend-rx-ring-sizes-for-5717-and-5719.patch
++ features/all/tg3/0171-tg3-Update-version-to-3.114.patch
++ features/all/tg3/0172-net-tg3-simplify-conditional.patch
++ features/all/tg3/0173-tg3-Add-support-for-selfboot-format-1-v6.patch
++ features/all/tg3/0174-tg3-Disable-unused-transmit-rings.patch
++ features/all/tg3/0175-tg3-Add-clause-45-register-accessor-methods.patch
++ features/all/tg3/0176-tg3-Add-EEE-support.patch
++ features/all/tg3/0177-tg3-Cleanup-tg3_alloc_rx_skb.patch
++ features/all/tg3/0178-tg3-Don-t-allocate-jumbo-ring-for-5780-class-devs.patch
++ features/all/tg3/0179-tg3-Report-invalid-link-from-tg3_get_settings.patch
++ features/all/tg3/0180-tg3-Update-version-to-3.115.patch
++ features/all/tg3/0181-tg3-Do-not-call-device_set_wakeup_enable-under-spin_.patch
++ features/all/tg3/0182-tg3-Apply-10Mbps-fix-to-all-57765-revisions.patch
++ features/all/tg3/0183-tg3-Assign-correct-tx-margin-for-5719.patch
++ features/all/tg3/0184-tg3-Always-turn-on-APE-features-in-mac_mode-reg.patch
++ features/all/tg3/0185-tg3-Fix-5719-internal-FIFO-overflow-problem.patch
++ features/all/tg3/0186-tg3-Reorg-tg3_napi-members.patch
++ features/all/tg3/0187-tg3-Enable-mult-rd-DMA-engine-on-5719.patch
++ features/all/tg3/0188-tg3-Reenable-TSS-for-5719.patch
++ features/all/tg3/0189-tg3-use-dma_alloc_coherent-instead-of-pci_alloc_cons.patch
++ features/all/tg3/0190-tg3-Enable-phy-APD-for-5717-and-later-asic-revs.patch
++ features/all/tg3/0191-tg3-Remove-tg3_config_info-definition.patch
++ features/all/tg3/0192-tg3-Raise-the-jumbo-frame-BD-flag-threshold.patch
++ features/all/tg3/0193-tg3-Move-EEE-definitions-into-mdio.h.patch
++ features/all/tg3/0194-tg3-Fix-57765-EEE-support.patch
++ features/all/tg3/0195-tg3-Minor-EEE-code-tweaks.patch
++ features/all/tg3/0196-tg3-Relax-EEE-thresholds.patch
++ features/all/tg3/0197-tg3-Update-version-to-3.116.patch
++ features/all/tg3/0198-tg3-Use-DEFINE_PCI_DEVICE_TABLE.patch
++ features/all/tg3/0199-tg3-Do-not-use-legacy-PCI-power-management.patch
++ features/all/tg3/0200-tg3-fix-return-value-check-in-tg3_read_vpd.patch
++ features/all/tg3/0201-tg3-fix-warnings.patch
++ features/all/tg3/0202-tg3-Restrict-phy-ioctl-access.patch
++ features/all/tg3/0203-tg3-Fix-loopback-tests.patch
++ features/all/tg3/0204-tg3-Disable-MAC-loopback-test-for-CPMU-devices.patch
++ features/all/tg3/0205-tg3-Fix-NVRAM-selftest.patch
++ features/all/tg3/0206-tg3-Only-allow-phy-ioctls-while-netif_running.patch
++ features/all/tg3/0207-tg3-Set-tx-bug-flags-for-more-devices.patch
+
+- bugfix/all/perf-remove-the-nmi-parameter-from-the-swevent-and-overflow-interface.patch
+- bugfix/all/comedi-fix-infoleak-to-userspace.patch
+- bugfix/all/gro-only-reset-frag0-when-skb-can-be-pulled.patch
+- bugfix/all/proc-fix-a-race-in-do_io_accounting.patch
+- bugfix/all/proc-restrict-access-to-proc-pid-io.patch
+- bugfix/all/si4713-i2c-avoid-potential-buffer-overflow-on-si4713.patch
+- bugfix/all/tunnels-fix-netns-vs-proto-registration-ordering-regression-fix.patch
+- bugfix/all/tunnels-fix-netns-vs-proto-registration-ordering.patch
+- bugfix/all/netns-xfrm-fixup-xfrm6_tunnel-error-propagation.patch
+- bugfix/all/gre-fix-netns-vs-proto-registration-ordering.patch
+- bugfix/all/cifs-check-for-NULL-session-password.patch
+- bugfix/all/cifs-fix-NULL-pointer-dereference-in-cifs_find_smb_ses.patch
+- bugfix/all/cifs-clean-up-cifs_find_smb_ses.patch
++ bugfix/all/stable/2.6.32.44.patch
++ bugfix/all/tunnels-fix-netns-vs-proto-registration-ordering-regression-fix.patch
++ bugfix/all/proc-fix-a-race-in-do_io_accounting.patch
++ debian/bridge-avoid-ABI-change-in-2.6.32.44.patch
+
+- bugfix/all/CVE-2011-3188.patch
++ bugfix/all/stable/2.6.32.45.patch
+
++ bugfix/all/sched-work-around-sched_group-cpu_power-0.patch
++ bugfix/x86/revert-x86-hotplug-Use-mwait-to-offline-a-processor-.patch
++ bugfix/all/fs-devpts-inode.c-correctly-check-d_alloc_name-retur.patch
++ bugfix/all/ipv6-make-fragment-identifications-less-predictable.patch
+
+- bugfix/all/perf-do-not-look-at-.-config-for-configuration.patch 
+- bugfix/all/befs-validate-length-of-long-symbolic-links.patch
+- bugfix/all/efi-corrupted-GUID-partition-tables-can-cause-kernel-oops.patch
++ bugfix/all/stable/2.6.32.46.patch
+
++ bugfix/all/drm-ttm-fix-ttm_bo_add_ttm-user-failure-path.patch
++ bugfix/all/igb-Fix-lack-of-flush-after-register-write-and-befor.patch
++ bugfix/all/3c503-fix-broken-IRQ-autoprobing.patch

Copied: dists/squeeze-security/linux-2.6/debian/patches/series/37 (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/37)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/series/37	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/37)
@@ -0,0 +1,4 @@
+- features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS.patch
++ features/all/pm-Define-SET_SYSTEM_SLEEP_PM_OPS-2.patch
++ bugfix/all/splice-direct_splice_actor-should-not-use-pos-in-sd.patch
++ features/all/PCI-introduce-pci_pcie_cap.patch

Copied: dists/squeeze-security/linux-2.6/debian/patches/series/38 (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/38)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/series/38	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/38)
@@ -0,0 +1 @@
+- bugfix/all/ipv6-make-fragment-identifications-less-predictable.patch

Copied: dists/squeeze-security/linux-2.6/debian/patches/series/39 (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/39)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/series/39	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/39)
@@ -0,0 +1,3 @@
++ bugfix/all/revert-xen-use-IRQF_FORCE_RESUME.patch
++ bugfix/all/net-fix-ipv6-gso-type-checks-in-intel-ethernet-drivers.patch
++ bugfix/all/ipv6-add-gso-support-on-forwarding-path.patch

Copied: dists/squeeze-security/linux-2.6/debian/patches/series/39-extra (from r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/39-extra)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/series/39-extra	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, releases/linux-2.6/2.6.32-39/debian/patches/series/39-extra)
@@ -0,0 +1,41 @@
+- bugfix/all/sched-work-around-sched_group-cpu_power-0.patch featureset=openvz
++ debian/revert-sched-changes-in-2.6.32.29.patch featureset=openvz
++ features/all/openvz/openvz.patch featureset=openvz
++ features/all/openvz/0001-sunrpc-ve-semaphore-deadlock-fixed.patch featureset=openvz
++ features/all/openvz/0002-venfs-Backport-some-patches-from-rhel6-branch.patch featureset=openvz
++ features/all/openvz/0003-VE-shutdown-environment-only-if-VE-pid-ns-is-destroy.patch featureset=openvz
++ features/all/openvz/0004-net-decriment-unix_nr_socks-if-ub_other_sock_charge-.patch featureset=openvz
++ features/all/openvz/0005-ve-Fix-d_path-return-code-when-no-buffer-given.patch featureset=openvz
++ features/all/openvz/ptrace_dont_allow_process_without_memory_map_v2.patch featureset=openvz
++ features/all/openvz/cpt-Allow-ext4-mount.patch featureset=openvz
+
++ features/all/vserver/vs2.3.0.36.29.7.patch featureset=vserver
++ features/all/vserver/vserver-complete-fix-for-CVE-2010-4243.patch featureset=vserver
++ features/all/vserver/vserver-Wire-up-syscall-on-powerpc.patch featureset=vserver
+
++ features/all/xen/pvops.patch featureset=xen
++ features/all/xen/xen-netfront-make-smartpoll-optional-and-default-off.patch featureset=xen
++ features/all/xen/xen-grant-table-do-not-truncate-machine-address-on-g.patch featureset=xen
++ features/all/xen/Fix-one-race-condition-for-netfront-smartpoll-logic.patch featureset=xen
++ features/all/xen/xen-netfront-Fix-another-potential-race-condition.patch featureset=xen
++ features/all/xen/xen-netfront-unconditionally-initialize-smartpoll-hr.patch featureset=xen
++ features/all/xen/xen-allocate-irq-descs-on-any-NUMA-node.patch featureset=xen
++ features/all/xen/xen-disable-ACPI-NUMA-for-PV-guests.patch featureset=xen
++ features/all/xen/xen-acpi-Add-cpu-hotplug-support.patch featureset=xen
++ features/all/xen/fbmem-VM_IO-set-but-not-propagated.patch featureset=xen
++ features/all/xen/ttm-Set-VM_IO-only-on-pages-with-TTM_MEMTYPE_FLAG_N.patch featureset=xen
++ features/all/xen/ttm-Change-VMA-flags-if-they-to-the-TTM-flags.patch featureset=xen
++ features/all/xen/drm-ttm-Add-ttm_tt_free_page.patch featureset=xen
++ features/all/xen/ttm-Introduce-a-placeholder-for-DMA-bus-addresses.patch featureset=xen
++ features/all/xen/ttm-Utilize-the-dma_addr_t-array-for-pages-that-are.patch featureset=xen
++ features/all/xen/ttm-Expand-populate-to-support-an-array-of-DMA-a.patch featureset=xen
++ features/all/xen/radeon-ttm-PCIe-Use-dma_addr-if-TTM-has-set-it.patch featureset=xen
++ features/all/xen/nouveau-ttm-PCIe-Use-dma_addr-if-TTM-has-set-it.patch featureset=xen
++ features/all/xen/radeon-PCIe-Use-the-correct-index-field.patch featureset=xen
++ features/all/xen/xen-netback-Drop-GSO-SKBs-which-do-not-have-csum_b.patch featureset=xen
++ features/all/xen/xen-blkback-CVE-2010-3699.patch featureset=xen
++ features/all/xen/xen-do-not-release-any-memory-under-1M-in-domain-0.patch featureset=xen
++ features/all/xen/x86-mm-Hold-mm-page_table_lock-while-doing-vmalloc_s.patch featureset=xen
++ features/all/xen/x86-mm-Fix-incorrect-data-type-in-vmalloc_sync_all.patch featureset=xen
++ features/all/xen/vmalloc-eagerly-clear-ptes-on-vunmap.patch featureset=xen
++ features/all/xen/x86-mm-Fix-pgd_lock-deadlock.patch featureset=xen

Copied: dists/squeeze-security/linux-2.6/debian/patches/series/39squeeze1 (from r18473, dists/squeeze-security/linux-2.6/debian/patches/series/35squeeze3)
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/series/39squeeze1	Sun Jan  8 10:47:12 2012	(r18474, copy of r18473, dists/squeeze-security/linux-2.6/debian/patches/series/35squeeze3)
@@ -0,0 +1,7 @@
++ bugfix/all/add-scsi_cmd_blk_ioctl-wrapper.patch
++ bugfix/all/limit-ioctls-forwarded-to-non-scsi-devices.patch
++ bugfix/all/treat-lvs-on-one-pv-like-a-partition.patch
++ bugfix/all/xfs-fix-possible-memory-corruption-in-xfs_readlink.patch
++ bugfix/all/KEYS-Fix-a-NULL-pointer-deref-in-the-user-defined-key-type.patch
++ bugfix/x86/kvm-prevent-starting-pit-timers-in-the-absence-of-irqchip-support.patch
++ bugfix/all/rose-add-length-checks-to-CALL_REQUEST-parsing.patch

Modified: dists/squeeze-security/linux-2.6/debian/templates/image.plain.bug/include-network
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/templates/image.plain.bug/include-network	Sun Jan  8 10:41:18 2012	(r18473)
+++ dists/squeeze-security/linux-2.6/debian/templates/image.plain.bug/include-network	Sun Jan  8 10:47:12 2012	(r18474)
@@ -27,11 +27,6 @@
     netstat -s >&3 || true
     echo >&3
   fi
-  echo '*** Device features:' >&3
-  for dir in /sys/class/net/*; do
-    echo -n "${dir##*/}: " >&3
-    cat "$dir"/features >&3
-  done
   echo >&3
 }
 



More information about the Kernel-svn-changes mailing list