[kernel] r21758 - in dists/trunk/linux/debian: . patches patches/features/powerpc

Aurelien Jarno aurel32 at moszumanska.debian.org
Mon Sep 1 19:34:44 UTC 2014


Author: aurel32
Date: Mon Sep  1 19:34:44 2014
New Revision: 21758

Log:
[powerpc,ppc64el] Backport KVM little endian support from 3.17.

Added:
   dists/trunk/linux/debian/patches/features/powerpc/KVM-Allow-KVM_CHECK_EXTENSION-on-the-vm-fd.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-Don-t-keep-reference-to-irq-routing-table-in-irq.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-Give-IRQFD-its-own-separate-enabling-Kconfig-opt.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-Move-all-accesses-to-kvm-irq_routing-into-irqchi.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-Move-irq-notifier-implementation-into-eventfd.c.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-Controls-for-in-kernel-sPAPR-hypercal.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Access-XICS-in-BE.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Access-guest-VPA-in-BE.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Access-host-lppaca-and-shadow-slb-.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Enable-for-little-endian-hosts.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Fix-ABIv2-indirect-branch-issue.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Fix-ABIv2-on-LE.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Make-HTAB-code-LE-host-aware.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-PR-Fix-sparse-endian-checks.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-PR-Handle-hyp-doorbell-exits.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3s-HV-Fix-tlbie-compile-error.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3s-PR-Disable-AIL-mode-with-OPAL.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Enable-IRQFD-support-for-the-XICS-interrupt-.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-Rename-and-add-argument-to-check_extension.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-irqchip-Provide-and-use-accessors-for-irq-routin.patch
   dists/trunk/linux/debian/patches/features/powerpc/KVM-prepare-for-KVM_-S-G-ET_MP_STATE-on-other-archit.patch
   dists/trunk/linux/debian/patches/features/powerpc/PPC-Add-asm-helpers-for-BE-32bit-load-store.patch
Modified:
   dists/trunk/linux/debian/changelog
   dists/trunk/linux/debian/patches/series

Modified: dists/trunk/linux/debian/changelog
==============================================================================
--- dists/trunk/linux/debian/changelog	Sat Aug 30 20:28:09 2014	(r21757)
+++ dists/trunk/linux/debian/changelog	Mon Sep  1 19:34:44 2014	(r21758)
@@ -27,6 +27,7 @@
   * Update Spanish debconf template translations (Matias A. Bellone)
     (Closes: #758591).
   * [mips*/loongson3] Backport Loongson 3B support from 3.17.
+  * [powerpc,ppc64el] Backport KVM little endian support from 3.17.
 
   [ maximilian attems ]
   * Redisable UAS due to trouble with Seagate expansion drives

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-Allow-KVM_CHECK_EXTENSION-on-the-vm-fd.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-Allow-KVM_CHECK_EXTENSION-on-the-vm-fd.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,144 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Mon, 14 Jul 2014 18:33:08 +0200
+Subject: KVM: Allow KVM_CHECK_EXTENSION on the vm fd
+Origin: https://git.kernel.org/linus/92b591a4c46b103ebd3fc0d03a084e1efd331253
+
+The KVM_CHECK_EXTENSION is only available on the kvm fd today. Unfortunately
+on PPC some of the capabilities change depending on the way a VM was created.
+
+So instead we need a way to expose capabilities as VM ioctl, so that we can
+see which VM type we're using (HV or PR). To enable this, add the
+KVM_CHECK_EXTENSION ioctl to our vm ioctl portfolio.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+Acked-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ Documentation/virtual/kvm/api.txt |    7 +++--
+ include/uapi/linux/kvm.h          |    1 +
+ virt/kvm/kvm_main.c               |   58 ++++++++++++++++++++-----------------
+ 3 files changed, 37 insertions(+), 29 deletions(-)
+
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index 884f819..8898caf 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -148,9 +148,9 @@ of banks, as set via the KVM_X86_SETUP_MCE ioctl.
+ 
+ 4.4 KVM_CHECK_EXTENSION
+ 
+-Capability: basic
++Capability: basic, KVM_CAP_CHECK_EXTENSION_VM for vm ioctl
+ Architectures: all
+-Type: system ioctl
++Type: system ioctl, vm ioctl
+ Parameters: extension identifier (KVM_CAP_*)
+ Returns: 0 if unsupported; 1 (or some other positive integer) if supported
+ 
+@@ -160,6 +160,9 @@ receives an integer that describes the extension availability.
+ Generally 0 means no and 1 means yes, but some extensions may report
+ additional information in the integer return value.
+ 
++Based on their initialization different VMs may have different capabilities.
++It is thus encouraged to use the vm ioctl to query for capabilities (available
++with KVM_CAP_CHECK_EXTENSION_VM on the vm fd)
+ 
+ 4.5 KVM_GET_VCPU_MMAP_SIZE
+ 
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index 0418b74..51776ca 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -759,6 +759,7 @@ struct kvm_ppc_smmu_info {
+ #define KVM_CAP_ARM_PSCI_0_2 102
+ #define KVM_CAP_PPC_FIXUP_HCALL 103
+ #define KVM_CAP_PPC_ENABLE_HCALL 104
++#define KVM_CAP_CHECK_EXTENSION_VM 105
+ 
+ #ifdef KVM_CAP_IRQ_ROUTING
+ 
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index e28f3ca..1b95cc9 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2324,6 +2324,34 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
+ 	return 0;
+ }
+ 
++static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
++{
++	switch (arg) {
++	case KVM_CAP_USER_MEMORY:
++	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
++	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
++#ifdef CONFIG_KVM_APIC_ARCHITECTURE
++	case KVM_CAP_SET_BOOT_CPU_ID:
++#endif
++	case KVM_CAP_INTERNAL_ERROR_DATA:
++#ifdef CONFIG_HAVE_KVM_MSI
++	case KVM_CAP_SIGNAL_MSI:
++#endif
++#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
++	case KVM_CAP_IRQFD_RESAMPLE:
++#endif
++	case KVM_CAP_CHECK_EXTENSION_VM:
++		return 1;
++#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
++	case KVM_CAP_IRQ_ROUTING:
++		return KVM_MAX_IRQ_ROUTES;
++#endif
++	default:
++		break;
++	}
++	return kvm_vm_ioctl_check_extension(kvm, arg);
++}
++
+ static long kvm_vm_ioctl(struct file *filp,
+ 			   unsigned int ioctl, unsigned long arg)
+ {
+@@ -2487,6 +2515,9 @@ static long kvm_vm_ioctl(struct file *filp,
+ 		r = 0;
+ 		break;
+ 	}
++	case KVM_CHECK_EXTENSION:
++		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
++		break;
+ 	default:
+ 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
+ 		if (r == -ENOTTY)
+@@ -2571,33 +2602,6 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
+ 	return r;
+ }
+ 
+-static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
+-{
+-	switch (arg) {
+-	case KVM_CAP_USER_MEMORY:
+-	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
+-	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
+-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
+-	case KVM_CAP_SET_BOOT_CPU_ID:
+-#endif
+-	case KVM_CAP_INTERNAL_ERROR_DATA:
+-#ifdef CONFIG_HAVE_KVM_MSI
+-	case KVM_CAP_SIGNAL_MSI:
+-#endif
+-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+-	case KVM_CAP_IRQFD_RESAMPLE:
+-#endif
+-		return 1;
+-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+-	case KVM_CAP_IRQ_ROUTING:
+-		return KVM_MAX_IRQ_ROUTES;
+-#endif
+-	default:
+-		break;
+-	}
+-	return kvm_vm_ioctl_check_extension(kvm, arg);
+-}
+-
+ static long kvm_dev_ioctl(struct file *filp,
+ 			  unsigned int ioctl, unsigned long arg)
+ {
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-Don-t-keep-reference-to-irq-routing-table-in-irq.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-Don-t-keep-reference-to-irq-routing-table-in-irq.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,134 @@
+From: Paul Mackerras <paulus at samba.org>
+Date: Mon, 30 Jun 2014 20:51:09 +1000
+Subject: KVM: Don't keep reference to irq routing table in
+ irqfd struct
+Origin: https://git.kernel.org/linus/56f89f3629ffd1a21d38c3d0bea23deac0e284ce
+
+This makes the irqfd code keep a copy of the irq routing table entry
+for each irqfd, rather than a reference to the copy in the actual
+irq routing table maintained in kvm/virt/irqchip.c.  This will enable
+us to change the routing table structure in future, or even not have a
+routing table at all on some platforms.
+
+The synchronization that was previously achieved using srcu_dereference
+on the read side is now achieved using a seqcount_t structure.  That
+ensures that we don't get a halfway-updated copy of the structure if
+we read it while another thread is updating it.
+
+We still use srcu_read_lock/unlock around the read side so that when
+changing the routing table we can be sure that after calling
+synchronize_srcu, nothing will be using the old routing.
+
+Signed-off-by: Paul Mackerras <paulus at samba.org>
+Tested-by: Eric Auger <eric.auger at linaro.org>
+Tested-by: Cornelia Huck <cornelia.huck at de.ibm.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ virt/kvm/eventfd.c |   41 +++++++++++++++++++++++++----------------
+ 1 file changed, 25 insertions(+), 16 deletions(-)
+
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index 20c3af7..bae593a 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -33,6 +33,7 @@
+ #include <linux/kernel.h>
+ #include <linux/srcu.h>
+ #include <linux/slab.h>
++#include <linux/seqlock.h>
+ 
+ #include "iodev.h"
+ 
+@@ -75,7 +76,8 @@ struct _irqfd {
+ 	struct kvm *kvm;
+ 	wait_queue_t wait;
+ 	/* Update side is protected by irqfds.lock */
+-	struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
++	struct kvm_kernel_irq_routing_entry irq_entry;
++	seqcount_t irq_entry_sc;
+ 	/* Used for level IRQ fast-path */
+ 	int gsi;
+ 	struct work_struct inject;
+@@ -223,16 +225,20 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
+ {
+ 	struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
+ 	unsigned long flags = (unsigned long)key;
+-	struct kvm_kernel_irq_routing_entry *irq;
++	struct kvm_kernel_irq_routing_entry irq;
+ 	struct kvm *kvm = irqfd->kvm;
++	unsigned seq;
+ 	int idx;
+ 
+ 	if (flags & POLLIN) {
+ 		idx = srcu_read_lock(&kvm->irq_srcu);
+-		irq = srcu_dereference(irqfd->irq_entry, &kvm->irq_srcu);
++		do {
++			seq = read_seqcount_begin(&irqfd->irq_entry_sc);
++			irq = irqfd->irq_entry;
++		} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
+ 		/* An event has been signaled, inject an interrupt */
+-		if (irq)
+-			kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
++		if (irq.type == KVM_IRQ_ROUTING_MSI)
++			kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
+ 					false);
+ 		else
+ 			schedule_work(&irqfd->inject);
+@@ -277,18 +283,20 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
+ {
+ 	struct kvm_kernel_irq_routing_entry *e;
+ 
+-	if (irqfd->gsi >= irq_rt->nr_rt_entries) {
+-		rcu_assign_pointer(irqfd->irq_entry, NULL);
+-		return;
+-	}
++	write_seqcount_begin(&irqfd->irq_entry_sc);
++
++	irqfd->irq_entry.type = 0;
++	if (irqfd->gsi >= irq_rt->nr_rt_entries)
++		goto out;
+ 
+ 	hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) {
+ 		/* Only fast-path MSI. */
+ 		if (e->type == KVM_IRQ_ROUTING_MSI)
+-			rcu_assign_pointer(irqfd->irq_entry, e);
+-		else
+-			rcu_assign_pointer(irqfd->irq_entry, NULL);
++			irqfd->irq_entry = *e;
+ 	}
++
++ out:
++	write_seqcount_end(&irqfd->irq_entry_sc);
+ }
+ 
+ static int
+@@ -310,6 +318,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
+ 	INIT_LIST_HEAD(&irqfd->list);
+ 	INIT_WORK(&irqfd->inject, irqfd_inject);
+ 	INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
++	seqcount_init(&irqfd->irq_entry_sc);
+ 
+ 	f = fdget(args->fd);
+ 	if (!f.file) {
+@@ -466,14 +475,14 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
+ 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
+ 		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
+ 			/*
+-			 * This rcu_assign_pointer is needed for when
++			 * This clearing of irq_entry.type is needed for when
+ 			 * another thread calls kvm_irq_routing_update before
+ 			 * we flush workqueue below (we synchronize with
+ 			 * kvm_irq_routing_update using irqfds.lock).
+-			 * It is paired with synchronize_srcu done by caller
+-			 * of that function.
+ 			 */
+-			rcu_assign_pointer(irqfd->irq_entry, NULL);
++			write_seqcount_begin(&irqfd->irq_entry_sc);
++			irqfd->irq_entry.type = 0;
++			write_seqcount_end(&irqfd->irq_entry_sc);
+ 			irqfd_deactivate(irqfd);
+ 		}
+ 	}
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-Give-IRQFD-its-own-separate-enabling-Kconfig-opt.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-Give-IRQFD-its-own-separate-enabling-Kconfig-opt.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,174 @@
+From: Paul Mackerras <paulus at samba.org>
+Date: Mon, 30 Jun 2014 20:51:13 +1000
+Subject: KVM: Give IRQFD its own separate enabling Kconfig
+ option
+Origin: https://git.kernel.org/linus/297e21053a52f060944e9f0de4c64fad9bcd72fc
+
+Currently, the IRQFD code is conditional on CONFIG_HAVE_KVM_IRQ_ROUTING.
+So that we can have the IRQFD code compiled in without having the
+IRQ routing code, this creates a new CONFIG_HAVE_KVM_IRQFD, makes
+the IRQFD code conditional on it instead of CONFIG_HAVE_KVM_IRQ_ROUTING,
+and makes all the platforms that currently select HAVE_KVM_IRQ_ROUTING
+also select HAVE_KVM_IRQFD.
+
+Signed-off-by: Paul Mackerras <paulus at samba.org>
+Tested-by: Eric Auger <eric.auger at linaro.org>
+Tested-by: Cornelia Huck <cornelia.huck at de.ibm.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ arch/ia64/kvm/Kconfig    |    1 +
+ arch/powerpc/kvm/Kconfig |    1 +
+ arch/s390/kvm/Kconfig    |    1 +
+ arch/x86/kvm/Kconfig     |    1 +
+ include/linux/kvm_host.h |    8 ++++----
+ virt/kvm/Kconfig         |    3 +++
+ virt/kvm/eventfd.c       |    6 +++---
+ virt/kvm/kvm_main.c      |    2 +-
+ 8 files changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
+index 990b864..3d50ea9 100644
+--- a/arch/ia64/kvm/Kconfig
++++ b/arch/ia64/kvm/Kconfig
+@@ -25,6 +25,7 @@ config KVM
+ 	select PREEMPT_NOTIFIERS
+ 	select ANON_INODES
+ 	select HAVE_KVM_IRQCHIP
++	select HAVE_KVM_IRQFD
+ 	select HAVE_KVM_IRQ_ROUTING
+ 	select KVM_APIC_ARCHITECTURE
+ 	select KVM_MMIO
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
+index 8f104a6..d4741db 100644
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -158,6 +158,7 @@ config KVM_MPIC
+ 	bool "KVM in-kernel MPIC emulation"
+ 	depends on KVM && E500
+ 	select HAVE_KVM_IRQCHIP
++	select HAVE_KVM_IRQFD
+ 	select HAVE_KVM_IRQ_ROUTING
+ 	select HAVE_KVM_MSI
+ 	help
+diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
+index 10d529a..646db9c 100644
+--- a/arch/s390/kvm/Kconfig
++++ b/arch/s390/kvm/Kconfig
+@@ -26,6 +26,7 @@ config KVM
+ 	select KVM_ASYNC_PF
+ 	select KVM_ASYNC_PF_SYNC
+ 	select HAVE_KVM_IRQCHIP
++	select HAVE_KVM_IRQFD
+ 	select HAVE_KVM_IRQ_ROUTING
+ 	---help---
+ 	  Support hosting paravirtualized guest machines using the SIE
+diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
+index 287e4c8..f9d16ff 100644
+--- a/arch/x86/kvm/Kconfig
++++ b/arch/x86/kvm/Kconfig
+@@ -27,6 +27,7 @@ config KVM
+ 	select MMU_NOTIFIER
+ 	select ANON_INODES
+ 	select HAVE_KVM_IRQCHIP
++	select HAVE_KVM_IRQFD
+ 	select HAVE_KVM_IRQ_ROUTING
+ 	select HAVE_KVM_EVENTFD
+ 	select KVM_APIC_ARCHITECTURE
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index ddd33e1..8593d2e 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -437,7 +437,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
+ int __must_check vcpu_load(struct kvm_vcpu *vcpu);
+ void vcpu_put(struct kvm_vcpu *vcpu);
+ 
+-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
++#ifdef CONFIG_HAVE_KVM_IRQFD
+ int kvm_irqfd_init(void);
+ void kvm_irqfd_exit(void);
+ #else
+@@ -932,20 +932,20 @@ int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+ 			  const struct kvm_irq_routing_entry *ue);
+ void kvm_free_irq_routing(struct kvm *kvm);
+ 
+-int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
+-
+ #else
+ 
+ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
+ 
+ #endif
+ 
++int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
++
+ #ifdef CONFIG_HAVE_KVM_EVENTFD
+ 
+ void kvm_eventfd_init(struct kvm *kvm);
+ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
+ 
+-#ifdef CONFIG_HAVE_KVM_IRQCHIP
++#ifdef CONFIG_HAVE_KVM_IRQFD
+ int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
+ void kvm_irqfd_release(struct kvm *kvm);
+ void kvm_irq_routing_update(struct kvm *);
+diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
+index 13f2d19..fc0c5e6 100644
+--- a/virt/kvm/Kconfig
++++ b/virt/kvm/Kconfig
+@@ -6,6 +6,9 @@ config HAVE_KVM
+ config HAVE_KVM_IRQCHIP
+        bool
+ 
++config HAVE_KVM_IRQFD
++       bool
++
+ config HAVE_KVM_IRQ_ROUTING
+        bool
+ 
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index 99957df..f5f6154 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -39,7 +39,7 @@
+ #include "irq.h"
+ #include "iodev.h"
+ 
+-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
++#ifdef CONFIG_HAVE_KVM_IRQFD
+ /*
+  * --------------------------------------------------------------------
+  * irqfd: Allows an fd to be used to inject an interrupt to the guest
+@@ -450,7 +450,7 @@ out:
+ void
+ kvm_eventfd_init(struct kvm *kvm)
+ {
+-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
++#ifdef CONFIG_HAVE_KVM_IRQFD
+ 	spin_lock_init(&kvm->irqfds.lock);
+ 	INIT_LIST_HEAD(&kvm->irqfds.items);
+ 	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
+@@ -459,7 +459,7 @@ kvm_eventfd_init(struct kvm *kvm)
+ 	INIT_LIST_HEAD(&kvm->ioeventfds);
+ }
+ 
+-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
++#ifdef CONFIG_HAVE_KVM_IRQFD
+ /*
+  * shutdown any irqfd's that match fd+gsi
+  */
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 1b95cc9..a69a623 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2337,7 +2337,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
+ #ifdef CONFIG_HAVE_KVM_MSI
+ 	case KVM_CAP_SIGNAL_MSI:
+ #endif
+-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
++#ifdef CONFIG_HAVE_KVM_IRQFD
+ 	case KVM_CAP_IRQFD_RESAMPLE:
+ #endif
+ 	case KVM_CAP_CHECK_EXTENSION_VM:
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-Move-all-accesses-to-kvm-irq_routing-into-irqchi.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-Move-all-accesses-to-kvm-irq_routing-into-irqchi.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,317 @@
+From: Paul Mackerras <paulus at samba.org>
+Date: Mon, 30 Jun 2014 20:51:11 +1000
+Subject: KVM: Move all accesses to kvm::irq_routing into
+ irqchip.c
+Origin: https://git.kernel.org/linus/9957c86d659a4d5a2bed25ccbd3bfc9c3f25e658
+
+Now that struct _irqfd does not keep a reference to storage pointed
+to by the irq_routing field of struct kvm, we can move the statement
+that updates it out from under the irqfds.lock and put it in
+kvm_set_irq_routing() instead.  That means we then have to take a
+srcu_read_lock on kvm->irq_srcu around the irqfd_update call in
+kvm_irqfd_assign(), since holding the kvm->irqfds.lock no longer
+ensures that that the routing can't change.
+
+Combined with changing kvm_irq_map_gsi() and kvm_irq_map_chip_pin()
+to take a struct kvm * argument instead of the pointer to the routing
+table, this allows us to to move all references to kvm->irq_routing
+into irqchip.c.  That in turn allows us to move the definition of the
+kvm_irq_routing_table struct into irqchip.c as well.
+
+Signed-off-by: Paul Mackerras <paulus at samba.org>
+Tested-by: Eric Auger <eric.auger at linaro.org>
+Tested-by: Cornelia Huck <cornelia.huck at de.ibm.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ include/linux/kvm_host.h |   35 +++++++----------------------------
+ virt/kvm/eventfd.c       |   22 +++++++++-------------
+ virt/kvm/irq_comm.c      |    6 ++----
+ virt/kvm/irqchip.c       |   39 +++++++++++++++++++++++++--------------
+ 4 files changed, 43 insertions(+), 59 deletions(-)
+
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 4956149..ddd33e1 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -325,24 +325,7 @@ struct kvm_kernel_irq_routing_entry {
+ 	struct hlist_node link;
+ };
+ 
+-#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+-
+-struct kvm_irq_routing_table {
+-	int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
+-	struct kvm_kernel_irq_routing_entry *rt_entries;
+-	u32 nr_rt_entries;
+-	/*
+-	 * Array indexed by gsi. Each entry contains list of irq chips
+-	 * the gsi is connected to.
+-	 */
+-	struct hlist_head map[0];
+-};
+-
+-#else
+-
+-struct kvm_irq_routing_table {};
+-
+-#endif
++struct kvm_irq_routing_table;
+ 
+ #ifndef KVM_PRIVATE_MEM_SLOTS
+ #define KVM_PRIVATE_MEM_SLOTS 0
+@@ -401,8 +384,7 @@ struct kvm {
+ 	struct mutex irq_lock;
+ #ifdef CONFIG_HAVE_KVM_IRQCHIP
+ 	/*
+-	 * Update side is protected by irq_lock and,
+-	 * if configured, irqfds.lock.
++	 * Update side is protected by irq_lock.
+ 	 */
+ 	struct kvm_irq_routing_table __rcu *irq_routing;
+ 	struct hlist_head mask_notifier_list;
+@@ -752,10 +734,9 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
+ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
+ 			     bool mask);
+ 
+-int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries,
+-		    struct kvm_irq_routing_table *irq_rt, int gsi);
+-int kvm_irq_map_chip_pin(struct kvm_irq_routing_table *irq_rt,
+-			 unsigned irqchip, unsigned pin);
++int kvm_irq_map_gsi(struct kvm *kvm,
++		    struct kvm_kernel_irq_routing_entry *entries, int gsi);
++int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
+ 
+ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ 		bool line_status);
+@@ -967,7 +948,7 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
+ #ifdef CONFIG_HAVE_KVM_IRQCHIP
+ int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
+ void kvm_irqfd_release(struct kvm *kvm);
+-void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
++void kvm_irq_routing_update(struct kvm *);
+ #else
+ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
+ {
+@@ -989,10 +970,8 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
+ static inline void kvm_irqfd_release(struct kvm *kvm) {}
+ 
+ #ifdef CONFIG_HAVE_KVM_IRQCHIP
+-static inline void kvm_irq_routing_update(struct kvm *kvm,
+-					  struct kvm_irq_routing_table *irq_rt)
++static inline void kvm_irq_routing_update(struct kvm *kvm)
+ {
+-	rcu_assign_pointer(kvm->irq_routing, irq_rt);
+ }
+ #endif
+ 
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index 15fa948..f0075ff 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -278,14 +278,13 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
+ }
+ 
+ /* Must be called under irqfds.lock */
+-static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
+-			 struct kvm_irq_routing_table *irq_rt)
++static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd)
+ {
+ 	struct kvm_kernel_irq_routing_entry *e;
+ 	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
+ 	int i, n_entries;
+ 
+-	n_entries = kvm_irq_map_gsi(entries, irq_rt, irqfd->gsi);
++	n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
+ 
+ 	write_seqcount_begin(&irqfd->irq_entry_sc);
+ 
+@@ -304,12 +303,12 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
+ static int
+ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
+ {
+-	struct kvm_irq_routing_table *irq_rt;
+ 	struct _irqfd *irqfd, *tmp;
+ 	struct fd f;
+ 	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
+ 	int ret;
+ 	unsigned int events;
++	int idx;
+ 
+ 	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
+ 	if (!irqfd)
+@@ -403,9 +402,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
+ 		goto fail;
+ 	}
+ 
+-	irq_rt = rcu_dereference_protected(kvm->irq_routing,
+-					   lockdep_is_held(&kvm->irqfds.lock));
+-	irqfd_update(kvm, irqfd, irq_rt);
++	idx = srcu_read_lock(&kvm->irq_srcu);
++	irqfd_update(kvm, irqfd);
++	srcu_read_unlock(&kvm->irq_srcu, idx);
+ 
+ 	list_add_tail(&irqfd->list, &kvm->irqfds.items);
+ 
+@@ -539,20 +538,17 @@ kvm_irqfd_release(struct kvm *kvm)
+ }
+ 
+ /*
+- * Change irq_routing and irqfd.
++ * Take note of a change in irq routing.
+  * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
+  */
+-void kvm_irq_routing_update(struct kvm *kvm,
+-			    struct kvm_irq_routing_table *irq_rt)
++void kvm_irq_routing_update(struct kvm *kvm)
+ {
+ 	struct _irqfd *irqfd;
+ 
+ 	spin_lock_irq(&kvm->irqfds.lock);
+ 
+-	rcu_assign_pointer(kvm->irq_routing, irq_rt);
+-
+ 	list_for_each_entry(irqfd, &kvm->irqfds.items, list)
+-		irqfd_update(kvm, irqfd, irq_rt);
++		irqfd_update(kvm, irqfd);
+ 
+ 	spin_unlock_irq(&kvm->irqfds.lock);
+ }
+diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
+index 1758445..963b899 100644
+--- a/virt/kvm/irq_comm.c
++++ b/virt/kvm/irq_comm.c
+@@ -163,7 +163,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
+ 	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
+ 	struct kvm_kernel_irq_routing_entry *e;
+ 	int ret = -EINVAL;
+-	struct kvm_irq_routing_table *irq_rt;
+ 	int idx;
+ 
+ 	trace_kvm_set_irq(irq, level, irq_source_id);
+@@ -177,8 +176,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
+ 	 * which is limited to 1:1 GSI mapping.
+ 	 */
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+-	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+-	if (kvm_irq_map_gsi(entries, irq_rt, irq) > 0) {
++	if (kvm_irq_map_gsi(kvm, entries, irq) > 0) {
+ 		e = &entries[0];
+ 		if (likely(e->type == KVM_IRQ_ROUTING_MSI))
+ 			ret = kvm_set_msi_inatomic(e, kvm);
+@@ -264,7 +262,7 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
+ 	int idx, gsi;
+ 
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+-	gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
++	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+ 	if (gsi != -1)
+ 		hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
+ 			if (kimn->irq == gsi)
+diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
+index f4648dd..04faac5 100644
+--- a/virt/kvm/irqchip.c
++++ b/virt/kvm/irqchip.c
+@@ -31,12 +31,26 @@
+ #include <trace/events/kvm.h>
+ #include "irq.h"
+ 
+-int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries,
+-		    struct kvm_irq_routing_table *irq_rt, int gsi)
++struct kvm_irq_routing_table {
++	int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
++	struct kvm_kernel_irq_routing_entry *rt_entries;
++	u32 nr_rt_entries;
++	/*
++	 * Array indexed by gsi. Each entry contains list of irq chips
++	 * the gsi is connected to.
++	 */
++	struct hlist_head map[0];
++};
++
++int kvm_irq_map_gsi(struct kvm *kvm,
++		    struct kvm_kernel_irq_routing_entry *entries, int gsi)
+ {
++	struct kvm_irq_routing_table *irq_rt;
+ 	struct kvm_kernel_irq_routing_entry *e;
+ 	int n = 0;
+ 
++	irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
++					lockdep_is_held(&kvm->irq_lock));
+ 	if (gsi < irq_rt->nr_rt_entries) {
+ 		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
+ 			entries[n] = *e;
+@@ -47,21 +61,21 @@ int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries,
+ 	return n;
+ }
+ 
+-int kvm_irq_map_chip_pin(struct kvm_irq_routing_table *irq_rt,
+-			 unsigned irqchip, unsigned pin)
++int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
+ {
++	struct kvm_irq_routing_table *irq_rt;
++
++	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+ 	return irq_rt->chip[irqchip][pin];
+ }
+ 
+ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
+ {
+-	struct kvm_irq_routing_table *irq_rt;
+ 	struct kvm_irq_ack_notifier *kian;
+ 	int gsi, idx;
+ 
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+-	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+-	gsi = kvm_irq_map_chip_pin(irq_rt, irqchip, pin);
++	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+ 	if (gsi != -1)
+ 		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+ 					 link)
+@@ -78,15 +92,13 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+ 
+ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+ {
+-	struct kvm_irq_routing_table *irq_rt;
+ 	struct kvm_irq_ack_notifier *kian;
+ 	int gsi, idx;
+ 
+ 	trace_kvm_ack_irq(irqchip, pin);
+ 
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+-	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+-	gsi = kvm_irq_map_chip_pin(irq_rt, irqchip, pin);
++	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+ 	if (gsi != -1)
+ 		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+ 					 link)
+@@ -143,7 +155,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ {
+ 	struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS];
+ 	int ret = -1, i, idx;
+-	struct kvm_irq_routing_table *irq_rt;
+ 
+ 	trace_kvm_set_irq(irq, level, irq_source_id);
+ 
+@@ -152,8 +163,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ 	 * writes to the unused one.
+ 	 */
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+-	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+-	i = kvm_irq_map_gsi(irq_set, irq_rt, irq);
++	i = kvm_irq_map_gsi(kvm, irq_set, irq);
+ 	srcu_read_unlock(&kvm->irq_srcu, idx);
+ 
+ 	while(i--) {
+@@ -250,7 +260,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
+ 
+ 	mutex_lock(&kvm->irq_lock);
+ 	old = kvm->irq_routing;
+-	kvm_irq_routing_update(kvm, new);
++	rcu_assign_pointer(kvm->irq_routing, new);
++	kvm_irq_routing_update(kvm);
+ 	mutex_unlock(&kvm->irq_lock);
+ 
+ 	synchronize_srcu_expedited(&kvm->irq_srcu);
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-Move-irq-notifier-implementation-into-eventfd.c.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-Move-irq-notifier-implementation-into-eventfd.c.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,177 @@
+From: Paul Mackerras <paulus at samba.org>
+Date: Mon, 30 Jun 2014 20:51:12 +1000
+Subject: KVM: Move irq notifier implementation into eventfd.c
+Origin: https://git.kernel.org/linus/e4d57e1ee1ab59f0cef0272800ac6c52e0ec814a
+
+This moves the functions kvm_irq_has_notifier(), kvm_notify_acked_irq(),
+kvm_register_irq_ack_notifier() and kvm_unregister_irq_ack_notifier()
+from irqchip.c to eventfd.c.  The reason for doing this is that those
+functions are used in connection with IRQFDs, which are implemented in
+eventfd.c.  In future we will want to use IRQFDs on platforms that
+don't implement the GSI routing implemented in irqchip.c, so we won't
+be compiling in irqchip.c, but we still need the irq notifiers.  The
+implementation is unchanged.
+
+Signed-off-by: Paul Mackerras <paulus at samba.org>
+Tested-by: Eric Auger <eric.auger at linaro.org>
+Tested-by: Cornelia Huck <cornelia.huck at de.ibm.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ virt/kvm/eventfd.c |   63 ++++++++++++++++++++++++++++++++++++++++++++++++++++
+ virt/kvm/irqchip.c |   61 --------------------------------------------------
+ 2 files changed, 63 insertions(+), 61 deletions(-)
+
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index f0075ff..99957df 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -34,7 +34,9 @@
+ #include <linux/srcu.h>
+ #include <linux/slab.h>
+ #include <linux/seqlock.h>
++#include <trace/events/kvm.h>
+ 
++#include "irq.h"
+ #include "iodev.h"
+ 
+ #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+@@ -865,3 +867,64 @@ kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+ 
+ 	return kvm_assign_ioeventfd(kvm, args);
+ }
++
++bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
++{
++	struct kvm_irq_ack_notifier *kian;
++	int gsi, idx;
++
++	idx = srcu_read_lock(&kvm->irq_srcu);
++	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
++	if (gsi != -1)
++		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
++					 link)
++			if (kian->gsi == gsi) {
++				srcu_read_unlock(&kvm->irq_srcu, idx);
++				return true;
++			}
++
++	srcu_read_unlock(&kvm->irq_srcu, idx);
++
++	return false;
++}
++EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
++
++void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
++{
++	struct kvm_irq_ack_notifier *kian;
++	int gsi, idx;
++
++	trace_kvm_ack_irq(irqchip, pin);
++
++	idx = srcu_read_lock(&kvm->irq_srcu);
++	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
++	if (gsi != -1)
++		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
++					 link)
++			if (kian->gsi == gsi)
++				kian->irq_acked(kian);
++	srcu_read_unlock(&kvm->irq_srcu, idx);
++}
++
++void kvm_register_irq_ack_notifier(struct kvm *kvm,
++				   struct kvm_irq_ack_notifier *kian)
++{
++	mutex_lock(&kvm->irq_lock);
++	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
++	mutex_unlock(&kvm->irq_lock);
++#ifdef __KVM_HAVE_IOAPIC
++	kvm_vcpu_request_scan_ioapic(kvm);
++#endif
++}
++
++void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
++				    struct kvm_irq_ack_notifier *kian)
++{
++	mutex_lock(&kvm->irq_lock);
++	hlist_del_init_rcu(&kian->link);
++	mutex_unlock(&kvm->irq_lock);
++	synchronize_srcu(&kvm->irq_srcu);
++#ifdef __KVM_HAVE_IOAPIC
++	kvm_vcpu_request_scan_ioapic(kvm);
++#endif
++}
+diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
+index 04faac5..7f256f3 100644
+--- a/virt/kvm/irqchip.c
++++ b/virt/kvm/irqchip.c
+@@ -69,67 +69,6 @@ int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
+ 	return irq_rt->chip[irqchip][pin];
+ }
+ 
+-bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
+-{
+-	struct kvm_irq_ack_notifier *kian;
+-	int gsi, idx;
+-
+-	idx = srcu_read_lock(&kvm->irq_srcu);
+-	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+-	if (gsi != -1)
+-		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+-					 link)
+-			if (kian->gsi == gsi) {
+-				srcu_read_unlock(&kvm->irq_srcu, idx);
+-				return true;
+-			}
+-
+-	srcu_read_unlock(&kvm->irq_srcu, idx);
+-
+-	return false;
+-}
+-EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+-
+-void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+-{
+-	struct kvm_irq_ack_notifier *kian;
+-	int gsi, idx;
+-
+-	trace_kvm_ack_irq(irqchip, pin);
+-
+-	idx = srcu_read_lock(&kvm->irq_srcu);
+-	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
+-	if (gsi != -1)
+-		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+-					 link)
+-			if (kian->gsi == gsi)
+-				kian->irq_acked(kian);
+-	srcu_read_unlock(&kvm->irq_srcu, idx);
+-}
+-
+-void kvm_register_irq_ack_notifier(struct kvm *kvm,
+-				   struct kvm_irq_ack_notifier *kian)
+-{
+-	mutex_lock(&kvm->irq_lock);
+-	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
+-	mutex_unlock(&kvm->irq_lock);
+-#ifdef __KVM_HAVE_IOAPIC
+-	kvm_vcpu_request_scan_ioapic(kvm);
+-#endif
+-}
+-
+-void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
+-				    struct kvm_irq_ack_notifier *kian)
+-{
+-	mutex_lock(&kvm->irq_lock);
+-	hlist_del_init_rcu(&kian->link);
+-	mutex_unlock(&kvm->irq_lock);
+-	synchronize_srcu(&kvm->irq_srcu);
+-#ifdef __KVM_HAVE_IOAPIC
+-	kvm_vcpu_request_scan_ioapic(kvm);
+-#endif
+-}
+-
+ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
+ {
+ 	struct kvm_kernel_irq_routing_entry route;
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-Controls-for-in-kernel-sPAPR-hypercal.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-Controls-for-in-kernel-sPAPR-hypercal.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,422 @@
+From: Paul Mackerras <paulus at samba.org>
+Date: Mon, 2 Jun 2014 11:02:59 +1000
+Subject: KVM: PPC: Book3S: Controls for in-kernel sPAPR
+ hypercall handling
+Origin: https://git.kernel.org/linus/699a0ea0823d32030b0666b28ff8633960f7ffa7
+
+This provides a way for userspace controls which sPAPR hcalls get
+handled in the kernel.  Each hcall can be individually enabled or
+disabled for in-kernel handling, except for H_RTAS.  The exception
+for H_RTAS is because userspace can already control whether
+individual RTAS functions are handled in-kernel or not via the
+KVM_PPC_RTAS_DEFINE_TOKEN ioctl, and because the numeric value for
+H_RTAS is out of the normal sequence of hcall numbers.
+
+Hcalls are enabled or disabled using the KVM_ENABLE_CAP ioctl for the
+KVM_CAP_PPC_ENABLE_HCALL capability on the file descriptor for the VM.
+The args field of the struct kvm_enable_cap specifies the hcall number
+in args[0] and the enable/disable flag in args[1]; 0 means disable
+in-kernel handling (so that the hcall will always cause an exit to
+userspace) and 1 means enable.  Enabling or disabling in-kernel
+handling of an hcall is effective across the whole VM.
+
+The ability for KVM_ENABLE_CAP to be used on a VM file descriptor
+on PowerPC is new, added by this commit.  The KVM_CAP_ENABLE_CAP_VM
+capability advertises that this ability exists.
+
+When a VM is created, an initial set of hcalls are enabled for
+in-kernel handling.  The set that is enabled is the set that have
+an in-kernel implementation at this point.  Any new hcall
+implementations from this point onwards should not be added to the
+default set without a good reason.
+
+No distinction is made between real-mode and virtual-mode hcall
+implementations; the one setting controls them both.
+
+Signed-off-by: Paul Mackerras <paulus at samba.org>
+Signed-off-by: Alexander Graf <agraf at suse.de>
+---
+ Documentation/virtual/kvm/api.txt       |   41 +++++++++++++++++++++++--
+ arch/powerpc/include/asm/kvm_book3s.h   |    1 +
+ arch/powerpc/include/asm/kvm_host.h     |    2 ++
+ arch/powerpc/kernel/asm-offsets.c       |    1 +
+ arch/powerpc/kvm/book3s_hv.c            |   51 +++++++++++++++++++++++++++++++
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S |   11 +++++++
+ arch/powerpc/kvm/book3s_pr.c            |    5 +++
+ arch/powerpc/kvm/book3s_pr_papr.c       |   37 ++++++++++++++++++++++
+ arch/powerpc/kvm/powerpc.c              |   45 +++++++++++++++++++++++++++
+ include/uapi/linux/kvm.h                |    1 +
+ 10 files changed, 193 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index 0fe3649..5c54d19 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -2863,8 +2863,8 @@ The fields in each entry are defined as follows:
+          this function/index combination
+ 
+ 
+-6. Capabilities that can be enabled
+------------------------------------
++6. Capabilities that can be enabled on vCPUs
++--------------------------------------------
+ 
+ There are certain capabilities that change the behavior of the virtual CPU when
+ enabled. To enable them, please see section 4.37. Below you can find a list of
+@@ -3002,3 +3002,40 @@ Parameters: args[0] is the XICS device fd
+             args[1] is the XICS CPU number (server ID) for this vcpu
+ 
+ This capability connects the vcpu to an in-kernel XICS device.
++
++
++7. Capabilities that can be enabled on VMs
++------------------------------------------
++
++There are certain capabilities that change the behavior of the virtual
++machine when enabled. To enable them, please see section 4.37. Below
++you can find a list of capabilities and what their effect on the VM
++is when enabling them.
++
++The following information is provided along with the description:
++
++  Architectures: which instruction set architectures provide this ioctl.
++      x86 includes both i386 and x86_64.
++
++  Parameters: what parameters are accepted by the capability.
++
++  Returns: the return value.  General error numbers (EBADF, ENOMEM, EINVAL)
++      are not detailed, but errors with specific meanings are.
++
++
++7.1 KVM_CAP_PPC_ENABLE_HCALL
++
++Architectures: ppc
++Parameters: args[0] is the sPAPR hcall number
++	    args[1] is 0 to disable, 1 to enable in-kernel handling
++
++This capability controls whether individual sPAPR hypercalls (hcalls)
++get handled by the kernel or not.  Enabling or disabling in-kernel
++handling of an hcall is effective across the VM.  On creation, an
++initial set of hcalls are enabled for in-kernel handling, which
++consists of those hcalls for which in-kernel handlers were implemented
++before this capability was implemented.  If disabled, the kernel will
++not to attempt to handle the hcall, but will always exit to userspace
++to handle it.  Note that it may not make sense to enable some and
++disable others of a group of related hcalls, but KVM does not prevent
++userspace from doing that.
+diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
+index a20cc0b..052ab2a 100644
+--- a/arch/powerpc/include/asm/kvm_book3s.h
++++ b/arch/powerpc/include/asm/kvm_book3s.h
+@@ -187,6 +187,7 @@ extern void kvmppc_hv_entry_trampoline(void);
+ extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
+ extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
+ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
++extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
+ extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
+ 				 struct kvm_vcpu *vcpu);
+ extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
+diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
+index f9ae696..62b2cee 100644
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -34,6 +34,7 @@
+ #include <asm/processor.h>
+ #include <asm/page.h>
+ #include <asm/cacheflush.h>
++#include <asm/hvcall.h>
+ 
+ #define KVM_MAX_VCPUS		NR_CPUS
+ #define KVM_MAX_VCORES		NR_CPUS
+@@ -263,6 +264,7 @@ struct kvm_arch {
+ #ifdef CONFIG_PPC_BOOK3S_64
+ 	struct list_head spapr_tce_tables;
+ 	struct list_head rtas_tokens;
++	DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
+ #endif
+ #ifdef CONFIG_KVM_MPIC
+ 	struct openpic *mpic;
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index f5995a9..17ffcb4 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -493,6 +493,7 @@ int main(void)
+ 	DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
+ 	DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
+ 	DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
++	DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
+ 	DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
+ 	DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
+ 	DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 1562acf..cf445d2 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -67,6 +67,8 @@
+ /* Used as a "null" value for timebase values */
+ #define TB_NIL	(~(u64)0)
+ 
++static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
++
+ static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
+ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
+ 
+@@ -562,6 +564,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
+ 	struct kvm_vcpu *tvcpu;
+ 	int idx, rc;
+ 
++	if (req <= MAX_HCALL_OPCODE &&
++	    !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
++		return RESUME_HOST;
++
+ 	switch (req) {
+ 	case H_ENTER:
+ 		idx = srcu_read_lock(&vcpu->kvm->srcu);
+@@ -2269,6 +2275,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
+ 	 */
+ 	cpumask_setall(&kvm->arch.need_tlb_flush);
+ 
++	/* Start out with the default set of hcalls enabled */
++	memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
++	       sizeof(kvm->arch.enabled_hcalls));
++
+ 	kvm->arch.rma = NULL;
+ 
+ 	kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
+@@ -2407,6 +2417,45 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
+ 	return r;
+ }
+ 
++/*
++ * List of hcall numbers to enable by default.
++ * For compatibility with old userspace, we enable by default
++ * all hcalls that were implemented before the hcall-enabling
++ * facility was added.  Note this list should not include H_RTAS.
++ */
++static unsigned int default_hcall_list[] = {
++	H_REMOVE,
++	H_ENTER,
++	H_READ,
++	H_PROTECT,
++	H_BULK_REMOVE,
++	H_GET_TCE,
++	H_PUT_TCE,
++	H_SET_DABR,
++	H_SET_XDABR,
++	H_CEDE,
++	H_PROD,
++	H_CONFER,
++	H_REGISTER_VPA,
++#ifdef CONFIG_KVM_XICS
++	H_EOI,
++	H_CPPR,
++	H_IPI,
++	H_IPOLL,
++	H_XIRR,
++	H_XIRR_X,
++#endif
++	0
++};
++
++static void init_default_hcalls(void)
++{
++	int i;
++
++	for (i = 0; default_hcall_list[i]; ++i)
++		__set_bit(default_hcall_list[i] / 4, default_enabled_hcalls);
++}
++
+ static struct kvmppc_ops kvm_ops_hv = {
+ 	.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
+ 	.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
+@@ -2454,6 +2503,8 @@ static int kvmppc_book3s_init_hv(void)
+ 	kvm_ops_hv.owner = THIS_MODULE;
+ 	kvmppc_hv_ops = &kvm_ops_hv;
+ 
++	init_default_hcalls();
++
+ 	r = kvmppc_mmu_hv_init();
+ 	return r;
+ }
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 64ac56f..33aaade 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1909,6 +1909,17 @@ hcall_try_real_mode:
+ 	clrrdi	r3,r3,2
+ 	cmpldi	r3,hcall_real_table_end - hcall_real_table
+ 	bge	guest_exit_cont
++	/* See if this hcall is enabled for in-kernel handling */
++	ld	r4, VCPU_KVM(r9)
++	srdi	r0, r3, 8	/* r0 = (r3 / 4) >> 6 */
++	sldi	r0, r0, 3	/* index into kvm->arch.enabled_hcalls[] */
++	add	r4, r4, r0
++	ld	r0, KVM_ENABLED_HCALLS(r4)
++	rlwinm	r4, r3, 32-2, 0x3f	/* r4 = (r3 / 4) & 0x3f */
++	srd	r0, r0, r4
++	andi.	r0, r0, 1
++	beq	guest_exit_cont
++	/* Get pointer to handler, if any, and call it */
+ 	LOAD_REG_ADDR(r4, hcall_real_table)
+ 	lwax	r3,r3,r4
+ 	cmpwi	r3,0
+diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
+index 3b82e86..123ac7d 100644
+--- a/arch/powerpc/kvm/book3s_pr.c
++++ b/arch/powerpc/kvm/book3s_pr.c
+@@ -1597,6 +1597,11 @@ static int kvmppc_core_init_vm_pr(struct kvm *kvm)
+ {
+ 	mutex_init(&kvm->arch.hpt_mutex);
+ 
++#ifdef CONFIG_PPC_BOOK3S_64
++	/* Start out with the default set of hcalls enabled */
++	kvmppc_pr_init_default_hcalls(kvm);
++#endif
++
+ 	if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ 		spin_lock(&kvm_global_user_count_lock);
+ 		if (++kvm_global_user_count == 1)
+diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
+index f7c25c6..eacaa6e 100644
+--- a/arch/powerpc/kvm/book3s_pr_papr.c
++++ b/arch/powerpc/kvm/book3s_pr_papr.c
+@@ -267,6 +267,10 @@ static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
+ 
+ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
+ {
++	if (cmd <= MAX_HCALL_OPCODE &&
++	    !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
++		return EMULATE_FAIL;
++
+ 	switch (cmd) {
+ 	case H_ENTER:
+ 		return kvmppc_h_pr_enter(vcpu);
+@@ -304,3 +308,36 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
+ 
+ 	return EMULATE_FAIL;
+ }
++
++
++/*
++ * List of hcall numbers to enable by default.
++ * For compatibility with old userspace, we enable by default
++ * all hcalls that were implemented before the hcall-enabling
++ * facility was added.  Note this list should not include H_RTAS.
++ */
++static unsigned int default_hcall_list[] = {
++	H_ENTER,
++	H_REMOVE,
++	H_PROTECT,
++	H_BULK_REMOVE,
++	H_PUT_TCE,
++	H_CEDE,
++#ifdef CONFIG_KVM_XICS
++	H_XIRR,
++	H_CPPR,
++	H_EOI,
++	H_IPI,
++	H_IPOLL,
++	H_XIRR_X,
++#endif
++	0
++};
++
++void kvmppc_pr_init_default_hcalls(struct kvm *kvm)
++{
++	int i;
++
++	for (i = 0; default_hcall_list[i]; ++i)
++		__set_bit(default_hcall_list[i] / 4, kvm->arch.enabled_hcalls);
++}
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index 61c738a..3222a4d 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -387,6 +387,7 @@ int kvm_dev_ioctl_check_extension(long ext)
+ 	case KVM_CAP_PPC_UNSET_IRQ:
+ 	case KVM_CAP_PPC_IRQ_LEVEL:
+ 	case KVM_CAP_ENABLE_CAP:
++	case KVM_CAP_ENABLE_CAP_VM:
+ 	case KVM_CAP_ONE_REG:
+ 	case KVM_CAP_IOEVENTFD:
+ 	case KVM_CAP_DEVICE_CTRL:
+@@ -417,6 +418,7 @@ int kvm_dev_ioctl_check_extension(long ext)
+ 	case KVM_CAP_PPC_ALLOC_HTAB:
+ 	case KVM_CAP_PPC_RTAS:
+ 	case KVM_CAP_PPC_FIXUP_HCALL:
++	case KVM_CAP_PPC_ENABLE_HCALL:
+ #ifdef CONFIG_KVM_XICS
+ 	case KVM_CAP_IRQ_XICS:
+ #endif
+@@ -1099,6 +1101,40 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
+ 	return 0;
+ }
+ 
++
++static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
++				   struct kvm_enable_cap *cap)
++{
++	int r;
++
++	if (cap->flags)
++		return -EINVAL;
++
++	switch (cap->cap) {
++#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
++	case KVM_CAP_PPC_ENABLE_HCALL: {
++		unsigned long hcall = cap->args[0];
++
++		r = -EINVAL;
++		if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
++		    cap->args[1] > 1)
++			break;
++		if (cap->args[1])
++			set_bit(hcall / 4, kvm->arch.enabled_hcalls);
++		else
++			clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
++		r = 0;
++		break;
++	}
++#endif
++	default:
++		r = -EINVAL;
++		break;
++	}
++
++	return r;
++}
++
+ long kvm_arch_vm_ioctl(struct file *filp,
+                        unsigned int ioctl, unsigned long arg)
+ {
+@@ -1118,6 +1154,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
+ 
+ 		break;
+ 	}
++	case KVM_ENABLE_CAP:
++	{
++		struct kvm_enable_cap cap;
++		r = -EFAULT;
++		if (copy_from_user(&cap, argp, sizeof(cap)))
++			goto out;
++		r = kvm_vm_ioctl_enable_cap(kvm, &cap);
++		break;
++	}
+ #ifdef CONFIG_PPC_BOOK3S_64
+ 	case KVM_CREATE_SPAPR_TCE: {
+ 		struct kvm_create_spapr_tce create_tce;
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index e11d8f1..0418b74 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -758,6 +758,7 @@ struct kvm_ppc_smmu_info {
+ #define KVM_CAP_VM_ATTRIBUTES 101
+ #define KVM_CAP_ARM_PSCI_0_2 102
+ #define KVM_CAP_PPC_FIXUP_HCALL 103
++#define KVM_CAP_PPC_ENABLE_HCALL 104
+ 
+ #ifdef KVM_CAP_IRQ_ROUTING
+ 
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Access-XICS-in-BE.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Access-XICS-in-BE.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,59 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Wed, 11 Jun 2014 10:37:52 +0200
+Subject: KVM: PPC: Book3S HV: Access XICS in BE
+Origin: https://git.kernel.org/linus/76d072fb05f646eb180f161bbe06ab185af52f38
+
+On the exit path from the guest we check what type of interrupt we received
+if we received one. This means we're doing hardware access to the XICS interrupt
+controller.
+
+However, when running on a little endian system, this access is byte reversed.
+
+So let's make sure to swizzle the bytes back again and virtually make XICS
+accesses big endian.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S |   18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index bf5270e..364ca0c 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -2350,7 +2350,18 @@ kvmppc_read_intr:
+ 	cmpdi	r6, 0
+ 	beq-	1f
+ 	lwzcix	r0, r6, r7
+-	rlwinm.	r3, r0, 0, 0xffffff
++	/*
++	 * Save XIRR for later. Since we get in in reverse endian on LE
++	 * systems, save it byte reversed and fetch it back in host endian.
++	 */
++	li	r3, HSTATE_SAVED_XIRR
++	STWX_BE	r0, r3, r13
++#ifdef __LITTLE_ENDIAN__
++	lwz	r3, HSTATE_SAVED_XIRR(r13)
++#else
++	mr	r3, r0
++#endif
++	rlwinm.	r3, r3, 0, 0xffffff
+ 	sync
+ 	beq	1f			/* if nothing pending in the ICP */
+ 
+@@ -2382,10 +2393,9 @@ kvmppc_read_intr:
+ 	li	r3, -1
+ 1:	blr
+ 
+-42:	/* It's not an IPI and it's for the host, stash it in the PACA
+-	 * before exit, it will be picked up by the host ICP driver
++42:	/* It's not an IPI and it's for the host. We saved a copy of XIRR in
++	 * the PACA earlier, it will be picked up by the host ICP driver
+ 	 */
+-	stw	r0, HSTATE_SAVED_XIRR(r13)
+ 	li	r3, 1
+ 	b	1b
+ 
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Access-guest-VPA-in-BE.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Access-guest-VPA-in-BE.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,103 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Wed, 11 Jun 2014 10:34:19 +0200
+Subject: KVM: PPC: Book3S HV: Access guest VPA in BE
+Origin: https://git.kernel.org/linus/02407552256111479fbfd23a3e01218b399aaa35
+
+There are a few shared data structures between the host and the guest. Most
+of them get registered through the VPA interface.
+
+These data structures are defined to always be in big endian byte order, so
+let's make sure we always access them in big endian.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+---
+ arch/powerpc/kvm/book3s_hv.c     |   22 +++++++++++-----------
+ arch/powerpc/kvm/book3s_hv_ras.c |    6 +++---
+ 2 files changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 7db9df2..f1281c4 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -272,7 +272,7 @@ struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
+ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
+ {
+ 	vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
+-	vpa->yield_count = 1;
++	vpa->yield_count = cpu_to_be32(1);
+ }
+ 
+ static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
+@@ -295,8 +295,8 @@ static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
+ struct reg_vpa {
+ 	u32 dummy;
+ 	union {
+-		u16 hword;
+-		u32 word;
++		__be16 hword;
++		__be32 word;
+ 	} length;
+ };
+ 
+@@ -335,9 +335,9 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
+ 		if (va == NULL)
+ 			return H_PARAMETER;
+ 		if (subfunc == H_VPA_REG_VPA)
+-			len = ((struct reg_vpa *)va)->length.hword;
++			len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
+ 		else
+-			len = ((struct reg_vpa *)va)->length.word;
++			len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
+ 		kvmppc_unpin_guest_page(kvm, va, vpa, false);
+ 
+ 		/* Check length */
+@@ -542,18 +542,18 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
+ 		return;
+ 	memset(dt, 0, sizeof(struct dtl_entry));
+ 	dt->dispatch_reason = 7;
+-	dt->processor_id = vc->pcpu + vcpu->arch.ptid;
+-	dt->timebase = now + vc->tb_offset;
+-	dt->enqueue_to_dispatch_time = stolen;
+-	dt->srr0 = kvmppc_get_pc(vcpu);
+-	dt->srr1 = vcpu->arch.shregs.msr;
++	dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
++	dt->timebase = cpu_to_be64(now + vc->tb_offset);
++	dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
++	dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
++	dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
+ 	++dt;
+ 	if (dt == vcpu->arch.dtl.pinned_end)
+ 		dt = vcpu->arch.dtl.pinned_addr;
+ 	vcpu->arch.dtl_ptr = dt;
+ 	/* order writing *dt vs. writing vpa->dtl_idx */
+ 	smp_wmb();
+-	vpa->dtl_idx = ++vcpu->arch.dtl_index;
++	vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
+ 	vcpu->arch.dtl.dirty = true;
+ }
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
+index 3a5c568..d562c8e 100644
+--- a/arch/powerpc/kvm/book3s_hv_ras.c
++++ b/arch/powerpc/kvm/book3s_hv_ras.c
+@@ -45,14 +45,14 @@ static void reload_slb(struct kvm_vcpu *vcpu)
+ 		return;
+ 
+ 	/* Sanity check */
+-	n = min_t(u32, slb->persistent, SLB_MIN_SIZE);
++	n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
+ 	if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
+ 		return;
+ 
+ 	/* Load up the SLB from that */
+ 	for (i = 0; i < n; ++i) {
+-		unsigned long rb = slb->save_area[i].esid;
+-		unsigned long rs = slb->save_area[i].vsid;
++		unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
++		unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
+ 
+ 		rb = (rb & ~0xFFFul) | i;	/* insert entry number */
+ 		asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Access-host-lppaca-and-shadow-slb-.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Access-host-lppaca-and-shadow-slb-.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,74 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Wed, 11 Jun 2014 10:36:17 +0200
+Subject: KVM: PPC: Book3S HV: Access host lppaca and shadow slb
+ in BE
+Origin: https://git.kernel.org/linus/0865a583a4881975cc4b621f4886c02f01600302
+
+Some data structures are always stored in big endian. Among those are the LPPACA
+fields as well as the shadow slb. These structures might be shared with a
+hypervisor.
+
+So whenever we access those fields, make sure we do so in big endian byte order.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S |   20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index e66c1e38..bf5270e 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -32,10 +32,6 @@
+ 
+ #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
+ 
+-#ifdef __LITTLE_ENDIAN__
+-#error Need to fix lppaca and SLB shadow accesses in little endian mode
+-#endif
+-
+ /* Values in HSTATE_NAPPING(r13) */
+ #define NAPPING_CEDE	1
+ #define NAPPING_NOVCPU	2
+@@ -595,9 +591,10 @@ kvmppc_got_guest:
+ 	ld	r3, VCPU_VPA(r4)
+ 	cmpdi	r3, 0
+ 	beq	25f
+-	lwz	r5, LPPACA_YIELDCOUNT(r3)
++	li	r6, LPPACA_YIELDCOUNT
++	LWZX_BE	r5, r3, r6
+ 	addi	r5, r5, 1
+-	stw	r5, LPPACA_YIELDCOUNT(r3)
++	STWX_BE	r5, r3, r6
+ 	li	r6, 1
+ 	stb	r6, VCPU_VPA_DIRTY(r4)
+ 25:
+@@ -1442,9 +1439,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+ 	ld	r8, VCPU_VPA(r9)	/* do they have a VPA? */
+ 	cmpdi	r8, 0
+ 	beq	25f
+-	lwz	r3, LPPACA_YIELDCOUNT(r8)
++	li	r4, LPPACA_YIELDCOUNT
++	LWZX_BE	r3, r8, r4
+ 	addi	r3, r3, 1
+-	stw	r3, LPPACA_YIELDCOUNT(r8)
++	STWX_BE	r3, r8, r4
+ 	li	r3, 1
+ 	stb	r3, VCPU_VPA_DIRTY(r9)
+ 25:
+@@ -1757,8 +1755,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ 33:	ld	r8,PACA_SLBSHADOWPTR(r13)
+ 
+ 	.rept	SLB_NUM_BOLTED
+-	ld	r5,SLBSHADOW_SAVEAREA(r8)
+-	ld	r6,SLBSHADOW_SAVEAREA+8(r8)
++	li	r3, SLBSHADOW_SAVEAREA
++	LDX_BE	r5, r8, r3
++	addi	r3, r3, 8
++	LDX_BE	r6, r8, r3
+ 	andis.	r7,r5,SLB_ESID_V at h
+ 	beq	1f
+ 	slbmte	r6,r5
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Enable-for-little-endian-hosts.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Enable-for-little-endian-hosts.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,28 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Wed, 11 Jun 2014 10:39:38 +0200
+Subject: KVM: PPC: Book3S HV: Enable for little endian hosts
+Origin: https://git.kernel.org/linus/6947f948f06128409b94306afaca5ece873ee5a2
+
+Now that we've fixed all the issues that HV KVM code had on little endian
+hosts, we can enable it in the kernel configuration for users to play with.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+---
+ arch/powerpc/kvm/Kconfig |    1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
+index d6a53b9..8aeeda1 100644
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -75,7 +75,6 @@ config KVM_BOOK3S_64
+ config KVM_BOOK3S_64_HV
+ 	tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
+ 	depends on KVM_BOOK3S_64
+-	depends on !CPU_LITTLE_ENDIAN
+ 	select KVM_BOOK3S_HV_POSSIBLE
+ 	select MMU_NOTIFIER
+ 	select CMA
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Fix-ABIv2-indirect-branch-issue.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Fix-ABIv2-indirect-branch-issue.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,32 @@
+From: Anton Blanchard <anton at samba.org>
+Date: Thu, 12 Jun 2014 18:16:10 +1000
+Subject: KVM: PPC: Book3S HV: Fix ABIv2 indirect branch issue
+Origin: https://git.kernel.org/linus/05a308c722822b0fbcc706b54be70f9bb9d52539
+
+To establish addressability quickly, ABIv2 requires the target
+address of the function being called to be in r12.
+
+Signed-off-by: Anton Blanchard <anton at samba.org>
+Signed-off-by: Alexander Graf <agraf at suse.de>
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 868347e..da1cac5 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1913,8 +1913,8 @@ hcall_try_real_mode:
+ 	lwax	r3,r3,r4
+ 	cmpwi	r3,0
+ 	beq	guest_exit_cont
+-	add	r3,r3,r4
+-	mtctr	r3
++	add	r12,r3,r4
++	mtctr	r12
+ 	mr	r3,r9		/* get vcpu pointer */
+ 	ld	r4,VCPU_GPR(R4)(r9)
+ 	bctrl
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Fix-ABIv2-on-LE.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Fix-ABIv2-on-LE.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,74 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Mon, 16 Jun 2014 14:41:15 +0200
+Subject: KVM: PPC: Book3S HV: Fix ABIv2 on LE
+Origin: https://git.kernel.org/linus/9bf163f86d0dc2f9070d9b1b8c27cedcf8eec816
+
+For code that doesn't live in modules we can just branch to the real function
+names, giving us compatibility with ABIv1 and ABIv2.
+
+Do this for the compiled-in code of HV KVM.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 364ca0c..855521e 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -668,9 +668,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+ 
+ 	mr	r31, r4
+ 	addi	r3, r31, VCPU_FPRS_TM
+-	bl	.load_fp_state
++	bl	load_fp_state
+ 	addi	r3, r31, VCPU_VRS_TM
+-	bl	.load_vr_state
++	bl	load_vr_state
+ 	mr	r4, r31
+ 	lwz	r7, VCPU_VRSAVE_TM(r4)
+ 	mtspr	SPRN_VRSAVE, r7
+@@ -1414,9 +1414,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+ 
+ 	/* Save FP/VSX. */
+ 	addi	r3, r9, VCPU_FPRS_TM
+-	bl	.store_fp_state
++	bl	store_fp_state
+ 	addi	r3, r9, VCPU_VRS_TM
+-	bl	.store_vr_state
++	bl	store_vr_state
+ 	mfspr	r6, SPRN_VRSAVE
+ 	stw	r6, VCPU_VRSAVE_TM(r9)
+ 1:
+@@ -2430,11 +2430,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+ 	mtmsrd	r8
+ 	isync
+ 	addi	r3,r3,VCPU_FPRS
+-	bl	.store_fp_state
++	bl	store_fp_state
+ #ifdef CONFIG_ALTIVEC
+ BEGIN_FTR_SECTION
+ 	addi	r3,r31,VCPU_VRS
+-	bl	.store_vr_state
++	bl	store_vr_state
+ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ #endif
+ 	mfspr	r6,SPRN_VRSAVE
+@@ -2466,11 +2466,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+ 	mtmsrd	r8
+ 	isync
+ 	addi	r3,r4,VCPU_FPRS
+-	bl	.load_fp_state
++	bl	load_fp_state
+ #ifdef CONFIG_ALTIVEC
+ BEGIN_FTR_SECTION
+ 	addi	r3,r31,VCPU_VRS
+-	bl	.load_vr_state
++	bl	load_vr_state
+ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ #endif
+ 	lwz	r7,VCPU_VRSAVE(r31)
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Make-HTAB-code-LE-host-aware.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-HV-Make-HTAB-code-LE-host-aware.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,881 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Wed, 11 Jun 2014 10:16:06 +0200
+Subject: KVM: PPC: Book3S HV: Make HTAB code LE host aware
+Origin: https://git.kernel.org/linus/6f22bd3265fb542acb2697026b953ec07298242d
+
+When running on an LE host all data structures are kept in little endian
+byte order. However, the HTAB still needs to be maintained in big endian.
+
+So every time we access any HTAB we need to make sure we do so in the right
+byte order. Fix up all accesses to manually byte swap.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+---
+ arch/powerpc/include/asm/kvm_book3s.h    |    4 
+ arch/powerpc/include/asm/kvm_book3s_64.h |   15 ++-
+ arch/powerpc/kvm/book3s_64_mmu_hv.c      |  128 ++++++++++++++-------------
+ arch/powerpc/kvm/book3s_hv_rm_mmu.c      |  146 +++++++++++++++++--------------
+ 4 files changed, 164 insertions(+), 129 deletions(-)
+
+--- a/arch/powerpc/include/asm/kvm_book3s.h
++++ b/arch/powerpc/include/asm/kvm_book3s.h
+@@ -163,9 +163,9 @@
+ 			bool *writable);
+ extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
+ 			unsigned long *rmap, long pte_index, int realmode);
+-extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
++extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
+ 			unsigned long pte_index);
+-void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
++void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
+ 			unsigned long pte_index);
+ extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
+ 			unsigned long *nb_ret);
+--- a/arch/powerpc/include/asm/kvm_book3s_64.h
++++ b/arch/powerpc/include/asm/kvm_book3s_64.h
+@@ -59,20 +59,29 @@
+ /* These bits are reserved in the guest view of the HPTE */
+ #define HPTE_GR_RESERVED	HPTE_GR_MODIFIED
+ 
+-static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
++static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
+ {
+ 	unsigned long tmp, old;
++	__be64 be_lockbit, be_bits;
++
++	/*
++	 * We load/store in native endian, but the HTAB is in big endian. If
++	 * we byte swap all data we apply on the PTE we're implicitly correct
++	 * again.
++	 */
++	be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
++	be_bits = cpu_to_be64(bits);
+ 
+ 	asm volatile("	ldarx	%0,0,%2\n"
+ 		     "	and.	%1,%0,%3\n"
+ 		     "	bne	2f\n"
+-		     "	ori	%0,%0,%4\n"
++		     "	or	%0,%0,%4\n"
+ 		     "  stdcx.	%0,0,%2\n"
+ 		     "	beq+	2f\n"
+ 		     "	mr	%1,%3\n"
+ 		     "2:	isync"
+ 		     : "=&r" (tmp), "=&r" (old)
+-		     : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
++		     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
+ 		     : "cc", "memory");
+ 	return old == 0;
+ }
+--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
+@@ -450,7 +450,7 @@
+ 	unsigned long slb_v;
+ 	unsigned long pp, key;
+ 	unsigned long v, gr;
+-	unsigned long *hptep;
++	__be64 *hptep;
+ 	int index;
+ 	int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
+ 
+@@ -473,13 +473,13 @@
+ 		preempt_enable();
+ 		return -ENOENT;
+ 	}
+-	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
+-	v = hptep[0] & ~HPTE_V_HVLOCK;
++	hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
++	v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
+ 	gr = kvm->arch.revmap[index].guest_rpte;
+ 
+ 	/* Unlock the HPTE */
+ 	asm volatile("lwsync" : : : "memory");
+-	hptep[0] = v;
++	hptep[0] = cpu_to_be64(v);
+ 	preempt_enable();
+ 
+ 	gpte->eaddr = eaddr;
+@@ -583,7 +583,8 @@
+ 				unsigned long ea, unsigned long dsisr)
+ {
+ 	struct kvm *kvm = vcpu->kvm;
+-	unsigned long *hptep, hpte[3], r;
++	unsigned long hpte[3], r;
++	__be64 *hptep;
+ 	unsigned long mmu_seq, psize, pte_size;
+ 	unsigned long gpa_base, gfn_base;
+ 	unsigned long gpa, gfn, hva, pfn;
+@@ -606,16 +607,16 @@
+ 	if (ea != vcpu->arch.pgfault_addr)
+ 		return RESUME_GUEST;
+ 	index = vcpu->arch.pgfault_index;
+-	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
++	hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+ 	rev = &kvm->arch.revmap[index];
+ 	preempt_disable();
+ 	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
+ 		cpu_relax();
+-	hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
+-	hpte[1] = hptep[1];
++	hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
++	hpte[1] = be64_to_cpu(hptep[1]);
+ 	hpte[2] = r = rev->guest_rpte;
+ 	asm volatile("lwsync" : : : "memory");
+-	hptep[0] = hpte[0];
++	hptep[0] = cpu_to_be64(hpte[0]);
+ 	preempt_enable();
+ 
+ 	if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
+@@ -731,8 +732,9 @@
+ 	preempt_disable();
+ 	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
+ 		cpu_relax();
+-	if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
+-	    rev->guest_rpte != hpte[2])
++	if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] ||
++		be64_to_cpu(hptep[1]) != hpte[1] ||
++		rev->guest_rpte != hpte[2])
+ 		/* HPTE has been changed under us; let the guest retry */
+ 		goto out_unlock;
+ 	hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
+@@ -752,20 +754,20 @@
+ 	rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
+ 	r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
+ 
+-	if (hptep[0] & HPTE_V_VALID) {
++	if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) {
+ 		/* HPTE was previously valid, so we need to invalidate it */
+ 		unlock_rmap(rmap);
+-		hptep[0] |= HPTE_V_ABSENT;
++		hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
+ 		kvmppc_invalidate_hpte(kvm, hptep, index);
+ 		/* don't lose previous R and C bits */
+-		r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
++		r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
+ 	} else {
+ 		kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
+ 	}
+ 
+-	hptep[1] = r;
++	hptep[1] = cpu_to_be64(r);
+ 	eieio();
+-	hptep[0] = hpte[0];
++	hptep[0] = cpu_to_be64(hpte[0]);
+ 	asm volatile("ptesync" : : : "memory");
+ 	preempt_enable();
+ 	if (page && hpte_is_writable(r))
+@@ -784,7 +786,7 @@
+ 	return ret;
+ 
+  out_unlock:
+-	hptep[0] &= ~HPTE_V_HVLOCK;
++	hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ 	preempt_enable();
+ 	goto out_put;
+ }
+@@ -860,7 +862,7 @@
+ {
+ 	struct revmap_entry *rev = kvm->arch.revmap;
+ 	unsigned long h, i, j;
+-	unsigned long *hptep;
++	__be64 *hptep;
+ 	unsigned long ptel, psize, rcbits;
+ 
+ 	for (;;) {
+@@ -876,11 +878,11 @@
+ 		 * rmap chain lock.
+ 		 */
+ 		i = *rmapp & KVMPPC_RMAP_INDEX;
+-		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
++		hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
+ 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
+ 			/* unlock rmap before spinning on the HPTE lock */
+ 			unlock_rmap(rmapp);
+-			while (hptep[0] & HPTE_V_HVLOCK)
++			while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
+ 				cpu_relax();
+ 			continue;
+ 		}
+@@ -899,14 +901,14 @@
+ 
+ 		/* Now check and modify the HPTE */
+ 		ptel = rev[i].guest_rpte;
+-		psize = hpte_page_size(hptep[0], ptel);
+-		if ((hptep[0] & HPTE_V_VALID) &&
++		psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
++		if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
+ 		    hpte_rpn(ptel, psize) == gfn) {
+ 			if (kvm->arch.using_mmu_notifiers)
+-				hptep[0] |= HPTE_V_ABSENT;
++				hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
+ 			kvmppc_invalidate_hpte(kvm, hptep, i);
+ 			/* Harvest R and C */
+-			rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
++			rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
+ 			*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
+ 			if (rcbits & ~rev[i].guest_rpte) {
+ 				rev[i].guest_rpte = ptel | rcbits;
+@@ -914,7 +916,7 @@
+ 			}
+ 		}
+ 		unlock_rmap(rmapp);
+-		hptep[0] &= ~HPTE_V_HVLOCK;
++		hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ 	}
+ 	return 0;
+ }
+@@ -961,7 +963,7 @@
+ {
+ 	struct revmap_entry *rev = kvm->arch.revmap;
+ 	unsigned long head, i, j;
+-	unsigned long *hptep;
++	__be64 *hptep;
+ 	int ret = 0;
+ 
+  retry:
+@@ -977,23 +979,24 @@
+ 
+ 	i = head = *rmapp & KVMPPC_RMAP_INDEX;
+ 	do {
+-		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
++		hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
+ 		j = rev[i].forw;
+ 
+ 		/* If this HPTE isn't referenced, ignore it */
+-		if (!(hptep[1] & HPTE_R_R))
++		if (!(be64_to_cpu(hptep[1]) & HPTE_R_R))
+ 			continue;
+ 
+ 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
+ 			/* unlock rmap before spinning on the HPTE lock */
+ 			unlock_rmap(rmapp);
+-			while (hptep[0] & HPTE_V_HVLOCK)
++			while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
+ 				cpu_relax();
+ 			goto retry;
+ 		}
+ 
+ 		/* Now check and modify the HPTE */
+-		if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
++		if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
++		    (be64_to_cpu(hptep[1]) & HPTE_R_R)) {
+ 			kvmppc_clear_ref_hpte(kvm, hptep, i);
+ 			if (!(rev[i].guest_rpte & HPTE_R_R)) {
+ 				rev[i].guest_rpte |= HPTE_R_R;
+@@ -1001,7 +1004,7 @@
+ 			}
+ 			ret = 1;
+ 		}
+-		hptep[0] &= ~HPTE_V_HVLOCK;
++		hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ 	} while ((i = j) != head);
+ 
+ 	unlock_rmap(rmapp);
+@@ -1035,7 +1038,7 @@
+ 		do {
+ 			hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
+ 			j = rev[i].forw;
+-			if (hp[1] & HPTE_R_R)
++			if (be64_to_cpu(hp[1]) & HPTE_R_R)
+ 				goto out;
+ 		} while ((i = j) != head);
+ 	}
+@@ -1075,7 +1078,7 @@
+ 	unsigned long head, i, j;
+ 	unsigned long n;
+ 	unsigned long v, r;
+-	unsigned long *hptep;
++	__be64 *hptep;
+ 	int npages_dirty = 0;
+ 
+  retry:
+@@ -1091,7 +1094,8 @@
+ 
+ 	i = head = *rmapp & KVMPPC_RMAP_INDEX;
+ 	do {
+-		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
++		unsigned long hptep1;
++		hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
+ 		j = rev[i].forw;
+ 
+ 		/*
+@@ -1108,29 +1112,30 @@
+ 		 * Otherwise we need to do the tlbie even if C==0 in
+ 		 * order to pick up any delayed writeback of C.
+ 		 */
+-		if (!(hptep[1] & HPTE_R_C) &&
+-		    (!hpte_is_writable(hptep[1]) || vcpus_running(kvm)))
++		hptep1 = be64_to_cpu(hptep[1]);
++		if (!(hptep1 & HPTE_R_C) &&
++		    (!hpte_is_writable(hptep1) || vcpus_running(kvm)))
+ 			continue;
+ 
+ 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
+ 			/* unlock rmap before spinning on the HPTE lock */
+ 			unlock_rmap(rmapp);
+-			while (hptep[0] & HPTE_V_HVLOCK)
++			while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK))
+ 				cpu_relax();
+ 			goto retry;
+ 		}
+ 
+ 		/* Now check and modify the HPTE */
+-		if (!(hptep[0] & HPTE_V_VALID))
++		if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID)))
+ 			continue;
+ 
+ 		/* need to make it temporarily absent so C is stable */
+-		hptep[0] |= HPTE_V_ABSENT;
++		hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
+ 		kvmppc_invalidate_hpte(kvm, hptep, i);
+-		v = hptep[0];
+-		r = hptep[1];
++		v = be64_to_cpu(hptep[0]);
++		r = be64_to_cpu(hptep[1]);
+ 		if (r & HPTE_R_C) {
+-			hptep[1] = r & ~HPTE_R_C;
++			hptep[1] = cpu_to_be64(r & ~HPTE_R_C);
+ 			if (!(rev[i].guest_rpte & HPTE_R_C)) {
+ 				rev[i].guest_rpte |= HPTE_R_C;
+ 				note_hpte_modification(kvm, &rev[i]);
+@@ -1143,7 +1148,7 @@
+ 		}
+ 		v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
+ 		v |= HPTE_V_VALID;
+-		hptep[0] = v;
++		hptep[0] = cpu_to_be64(v);
+ 	} while ((i = j) != head);
+ 
+ 	unlock_rmap(rmapp);
+@@ -1307,7 +1312,7 @@
+  * Returns 1 if this HPT entry has been modified or has pending
+  * R/C bit changes.
+  */
+-static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp)
++static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp)
+ {
+ 	unsigned long rcbits_unset;
+ 
+@@ -1316,13 +1321,14 @@
+ 
+ 	/* Also need to consider changes in reference and changed bits */
+ 	rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
+-	if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset))
++	if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) &&
++	    (be64_to_cpu(hptp[1]) & rcbits_unset))
+ 		return 1;
+ 
+ 	return 0;
+ }
+ 
+-static long record_hpte(unsigned long flags, unsigned long *hptp,
++static long record_hpte(unsigned long flags, __be64 *hptp,
+ 			unsigned long *hpte, struct revmap_entry *revp,
+ 			int want_valid, int first_pass)
+ {
+@@ -1337,10 +1343,10 @@
+ 		return 0;
+ 
+ 	valid = 0;
+-	if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) {
++	if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) {
+ 		valid = 1;
+ 		if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
+-		    !(hptp[0] & HPTE_V_BOLTED))
++		    !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED))
+ 			valid = 0;
+ 	}
+ 	if (valid != want_valid)
+@@ -1352,7 +1358,7 @@
+ 		preempt_disable();
+ 		while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
+ 			cpu_relax();
+-		v = hptp[0];
++		v = be64_to_cpu(hptp[0]);
+ 
+ 		/* re-evaluate valid and dirty from synchronized HPTE value */
+ 		valid = !!(v & HPTE_V_VALID);
+@@ -1360,9 +1366,9 @@
+ 
+ 		/* Harvest R and C into guest view if necessary */
+ 		rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
+-		if (valid && (rcbits_unset & hptp[1])) {
+-			revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) |
+-				HPTE_GR_MODIFIED;
++		if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) {
++			revp->guest_rpte |= (be64_to_cpu(hptp[1]) &
++				(HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
+ 			dirty = 1;
+ 		}
+ 
+@@ -1381,13 +1387,13 @@
+ 			revp->guest_rpte = r;
+ 		}
+ 		asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
+-		hptp[0] &= ~HPTE_V_HVLOCK;
++		hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ 		preempt_enable();
+ 		if (!(valid == want_valid && (first_pass || dirty)))
+ 			ok = 0;
+ 	}
+-	hpte[0] = v;
+-	hpte[1] = r;
++	hpte[0] = cpu_to_be64(v);
++	hpte[1] = cpu_to_be64(r);
+ 	return ok;
+ }
+ 
+@@ -1397,7 +1403,7 @@
+ 	struct kvm_htab_ctx *ctx = file->private_data;
+ 	struct kvm *kvm = ctx->kvm;
+ 	struct kvm_get_htab_header hdr;
+-	unsigned long *hptp;
++	__be64 *hptp;
+ 	struct revmap_entry *revp;
+ 	unsigned long i, nb, nw;
+ 	unsigned long __user *lbuf;
+@@ -1413,7 +1419,7 @@
+ 	flags = ctx->flags;
+ 
+ 	i = ctx->index;
+-	hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
++	hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+ 	revp = kvm->arch.revmap + i;
+ 	lbuf = (unsigned long __user *)buf;
+ 
+@@ -1497,7 +1503,7 @@
+ 	unsigned long i, j;
+ 	unsigned long v, r;
+ 	unsigned long __user *lbuf;
+-	unsigned long *hptp;
++	__be64 *hptp;
+ 	unsigned long tmp[2];
+ 	ssize_t nb;
+ 	long int err, ret;
+@@ -1539,7 +1545,7 @@
+ 		    i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
+ 			break;
+ 
+-		hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
++		hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+ 		lbuf = (unsigned long __user *)buf;
+ 		for (j = 0; j < hdr.n_valid; ++j) {
+ 			err = -EFAULT;
+@@ -1551,7 +1557,7 @@
+ 			lbuf += 2;
+ 			nb += HPTE_SIZE;
+ 
+-			if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
++			if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
+ 				kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
+ 			err = -EIO;
+ 			ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
+@@ -1577,7 +1583,7 @@
+ 		}
+ 
+ 		for (j = 0; j < hdr.n_invalid; ++j) {
+-			if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
++			if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
+ 				kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
+ 			++i;
+ 			hptp += 2;
+--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+@@ -154,10 +154,10 @@
+ 	return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
+ }
+ 
+-static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
++static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
+ {
+ 	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
+-	hpte[0] = hpte_v;
++	hpte[0] = cpu_to_be64(hpte_v);
+ }
+ 
+ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
+@@ -166,7 +166,7 @@
+ {
+ 	unsigned long i, pa, gpa, gfn, psize;
+ 	unsigned long slot_fn, hva;
+-	unsigned long *hpte;
++	__be64 *hpte;
+ 	struct revmap_entry *rev;
+ 	unsigned long g_ptel;
+ 	struct kvm_memory_slot *memslot;
+@@ -275,9 +275,9 @@
+ 		return H_PARAMETER;
+ 	if (likely((flags & H_EXACT) == 0)) {
+ 		pte_index &= ~7UL;
+-		hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
++		hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+ 		for (i = 0; i < 8; ++i) {
+-			if ((*hpte & HPTE_V_VALID) == 0 &&
++			if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
+ 			    try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
+ 					  HPTE_V_ABSENT))
+ 				break;
+@@ -292,11 +292,13 @@
+ 			 */
+ 			hpte -= 16;
+ 			for (i = 0; i < 8; ++i) {
++				u64 pte;
+ 				while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
+ 					cpu_relax();
+-				if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
++				pte = be64_to_cpu(*hpte);
++				if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
+ 					break;
+-				*hpte &= ~HPTE_V_HVLOCK;
++				*hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ 				hpte += 2;
+ 			}
+ 			if (i == 8)
+@@ -304,14 +306,17 @@
+ 		}
+ 		pte_index += i;
+ 	} else {
+-		hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
++		hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+ 		if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
+ 				   HPTE_V_ABSENT)) {
+ 			/* Lock the slot and check again */
++			u64 pte;
++
+ 			while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
+ 				cpu_relax();
+-			if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
+-				*hpte &= ~HPTE_V_HVLOCK;
++			pte = be64_to_cpu(*hpte);
++			if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
++				*hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ 				return H_PTEG_FULL;
+ 			}
+ 		}
+@@ -347,11 +352,11 @@
+ 		}
+ 	}
+ 
+-	hpte[1] = ptel;
++	hpte[1] = cpu_to_be64(ptel);
+ 
+ 	/* Write the first HPTE dword, unlocking the HPTE and making it valid */
+ 	eieio();
+-	hpte[0] = pteh;
++	hpte[0] = cpu_to_be64(pteh);
+ 	asm volatile("ptesync" : : : "memory");
+ 
+ 	*pte_idx_ret = pte_index;
+@@ -468,30 +473,35 @@
+ 			unsigned long pte_index, unsigned long avpn,
+ 			unsigned long *hpret)
+ {
+-	unsigned long *hpte;
++	__be64 *hpte;
+ 	unsigned long v, r, rb;
+ 	struct revmap_entry *rev;
++	u64 pte;
+ 
+ 	if (pte_index >= kvm->arch.hpt_npte)
+ 		return H_PARAMETER;
+-	hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
++	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+ 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
+ 		cpu_relax();
+-	if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+-	    ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
+-	    ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
+-		hpte[0] &= ~HPTE_V_HVLOCK;
++	pte = be64_to_cpu(hpte[0]);
++	if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
++	    ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
++	    ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
++		hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ 		return H_NOT_FOUND;
+ 	}
+ 
+ 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+-	v = hpte[0] & ~HPTE_V_HVLOCK;
++	v = pte & ~HPTE_V_HVLOCK;
+ 	if (v & HPTE_V_VALID) {
+-		hpte[0] &= ~HPTE_V_VALID;
+-		rb = compute_tlbie_rb(v, hpte[1], pte_index);
++		u64 pte1;
++
++		pte1 = be64_to_cpu(hpte[1]);
++		hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
++		rb = compute_tlbie_rb(v, pte1, pte_index);
+ 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
+ 		/* Read PTE low word after tlbie to get final R/C values */
+-		remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
++		remove_revmap_chain(kvm, pte_index, rev, v, pte1);
+ 	}
+ 	r = rev->guest_rpte & ~HPTE_GR_RESERVED;
+ 	note_hpte_modification(kvm, rev);
+@@ -514,12 +524,14 @@
+ {
+ 	struct kvm *kvm = vcpu->kvm;
+ 	unsigned long *args = &vcpu->arch.gpr[4];
+-	unsigned long *hp, *hptes[4], tlbrb[4];
++	__be64 *hp, *hptes[4];
++	unsigned long tlbrb[4];
+ 	long int i, j, k, n, found, indexes[4];
+ 	unsigned long flags, req, pte_index, rcbits;
+ 	int global;
+ 	long int ret = H_SUCCESS;
+ 	struct revmap_entry *rev, *revs[4];
++	u64 hp0;
+ 
+ 	global = global_invalidates(kvm, 0);
+ 	for (i = 0; i < 4 && ret == H_SUCCESS; ) {
+@@ -542,8 +554,7 @@
+ 				ret = H_PARAMETER;
+ 				break;
+ 			}
+-			hp = (unsigned long *)
+-				(kvm->arch.hpt_virt + (pte_index << 4));
++			hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
+ 			/* to avoid deadlock, don't spin except for first */
+ 			if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
+ 				if (n)
+@@ -552,23 +563,24 @@
+ 					cpu_relax();
+ 			}
+ 			found = 0;
+-			if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
++			hp0 = be64_to_cpu(hp[0]);
++			if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
+ 				switch (flags & 3) {
+ 				case 0:		/* absolute */
+ 					found = 1;
+ 					break;
+ 				case 1:		/* andcond */
+-					if (!(hp[0] & args[j + 1]))
++					if (!(hp0 & args[j + 1]))
+ 						found = 1;
+ 					break;
+ 				case 2:		/* AVPN */
+-					if ((hp[0] & ~0x7fUL) == args[j + 1])
++					if ((hp0 & ~0x7fUL) == args[j + 1])
+ 						found = 1;
+ 					break;
+ 				}
+ 			}
+ 			if (!found) {
+-				hp[0] &= ~HPTE_V_HVLOCK;
++				hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ 				args[j] = ((0x90 | flags) << 56) + pte_index;
+ 				continue;
+ 			}
+@@ -577,7 +589,7 @@
+ 			rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+ 			note_hpte_modification(kvm, rev);
+ 
+-			if (!(hp[0] & HPTE_V_VALID)) {
++			if (!(hp0 & HPTE_V_VALID)) {
+ 				/* insert R and C bits from PTE */
+ 				rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
+ 				args[j] |= rcbits << (56 - 5);
+@@ -585,8 +597,10 @@
+ 				continue;
+ 			}
+ 
+-			hp[0] &= ~HPTE_V_VALID;		/* leave it locked */
+-			tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
++			/* leave it locked */
++			hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
++			tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
++				be64_to_cpu(hp[1]), pte_index);
+ 			indexes[n] = j;
+ 			hptes[n] = hp;
+ 			revs[n] = rev;
+@@ -605,7 +619,8 @@
+ 			pte_index = args[j] & ((1ul << 56) - 1);
+ 			hp = hptes[k];
+ 			rev = revs[k];
+-			remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
++			remove_revmap_chain(kvm, pte_index, rev,
++				be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
+ 			rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
+ 			args[j] |= rcbits << (56 - 5);
+ 			hp[0] = 0;
+@@ -620,23 +635,25 @@
+ 		      unsigned long va)
+ {
+ 	struct kvm *kvm = vcpu->kvm;
+-	unsigned long *hpte;
++	__be64 *hpte;
+ 	struct revmap_entry *rev;
+ 	unsigned long v, r, rb, mask, bits;
++	u64 pte;
+ 
+ 	if (pte_index >= kvm->arch.hpt_npte)
+ 		return H_PARAMETER;
+ 
+-	hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
++	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+ 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
+ 		cpu_relax();
+-	if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+-	    ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
+-		hpte[0] &= ~HPTE_V_HVLOCK;
++	pte = be64_to_cpu(hpte[0]);
++	if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
++	    ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
++		hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
+ 		return H_NOT_FOUND;
+ 	}
+ 
+-	v = hpte[0];
++	v = pte;
+ 	bits = (flags << 55) & HPTE_R_PP0;
+ 	bits |= (flags << 48) & HPTE_R_KEY_HI;
+ 	bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
+@@ -650,12 +667,12 @@
+ 		rev->guest_rpte = r;
+ 		note_hpte_modification(kvm, rev);
+ 	}
+-	r = (hpte[1] & ~mask) | bits;
++	r = (be64_to_cpu(hpte[1]) & ~mask) | bits;
+ 
+ 	/* Update HPTE */
+ 	if (v & HPTE_V_VALID) {
+ 		rb = compute_tlbie_rb(v, r, pte_index);
+-		hpte[0] = v & ~HPTE_V_VALID;
++		hpte[0] = cpu_to_be64(v & ~HPTE_V_VALID);
+ 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
+ 		/*
+ 		 * If the host has this page as readonly but the guest
+@@ -681,9 +698,9 @@
+ 			}
+ 		}
+ 	}
+-	hpte[1] = r;
++	hpte[1] = cpu_to_be64(r);
+ 	eieio();
+-	hpte[0] = v & ~HPTE_V_HVLOCK;
++	hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK);
+ 	asm volatile("ptesync" : : : "memory");
+ 	return H_SUCCESS;
+ }
+@@ -692,7 +709,8 @@
+ 		   unsigned long pte_index)
+ {
+ 	struct kvm *kvm = vcpu->kvm;
+-	unsigned long *hpte, v, r;
++	__be64 *hpte;
++	unsigned long v, r;
+ 	int i, n = 1;
+ 	struct revmap_entry *rev = NULL;
+ 
+@@ -704,9 +722,9 @@
+ 	}
+ 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+ 	for (i = 0; i < n; ++i, ++pte_index) {
+-		hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+-		v = hpte[0] & ~HPTE_V_HVLOCK;
+-		r = hpte[1];
++		hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
++		v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
++		r = be64_to_cpu(hpte[1]);
+ 		if (v & HPTE_V_ABSENT) {
+ 			v &= ~HPTE_V_ABSENT;
+ 			v |= HPTE_V_VALID;
+@@ -721,25 +739,27 @@
+ 	return H_SUCCESS;
+ }
+ 
+-void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
++void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
+ 			unsigned long pte_index)
+ {
+ 	unsigned long rb;
+ 
+-	hptep[0] &= ~HPTE_V_VALID;
+-	rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
++	hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
++	rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
++			      pte_index);
+ 	do_tlbies(kvm, &rb, 1, 1, true);
+ }
+ EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
+ 
+-void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
++void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
+ 			   unsigned long pte_index)
+ {
+ 	unsigned long rb;
+ 	unsigned char rbyte;
+ 
+-	rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
+-	rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
++	rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
++			      pte_index);
++	rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
+ 	/* modify only the second-last byte, which contains the ref bit */
+ 	*((char *)hptep + 14) = rbyte;
+ 	do_tlbies(kvm, &rb, 1, 1, false);
+@@ -765,7 +785,7 @@
+ 	unsigned long somask;
+ 	unsigned long vsid, hash;
+ 	unsigned long avpn;
+-	unsigned long *hpte;
++	__be64 *hpte;
+ 	unsigned long mask, val;
+ 	unsigned long v, r;
+ 
+@@ -797,11 +817,11 @@
+ 	val |= avpn;
+ 
+ 	for (;;) {
+-		hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
++		hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
+ 
+ 		for (i = 0; i < 16; i += 2) {
+ 			/* Read the PTE racily */
+-			v = hpte[i] & ~HPTE_V_HVLOCK;
++			v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
+ 
+ 			/* Check valid/absent, hash, segment size and AVPN */
+ 			if (!(v & valid) || (v & mask) != val)
+@@ -810,8 +830,8 @@
+ 			/* Lock the PTE and read it under the lock */
+ 			while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
+ 				cpu_relax();
+-			v = hpte[i] & ~HPTE_V_HVLOCK;
+-			r = hpte[i+1];
++			v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
++			r = be64_to_cpu(hpte[i+1]);
+ 
+ 			/*
+ 			 * Check the HPTE again, including base page size
+@@ -822,7 +842,7 @@
+ 				return (hash << 3) + (i >> 1);
+ 
+ 			/* Unlock and move on */
+-			hpte[i] = v;
++			hpte[i] = cpu_to_be64(v);
+ 		}
+ 
+ 		if (val & HPTE_V_SECONDARY)
+@@ -851,7 +871,7 @@
+ 	struct kvm *kvm = vcpu->kvm;
+ 	long int index;
+ 	unsigned long v, r, gr;
+-	unsigned long *hpte;
++	__be64 *hpte;
+ 	unsigned long valid;
+ 	struct revmap_entry *rev;
+ 	unsigned long pp, key;
+@@ -867,9 +887,9 @@
+ 			return status;	/* there really was no HPTE */
+ 		return 0;		/* for prot fault, HPTE disappeared */
+ 	}
+-	hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
+-	v = hpte[0] & ~HPTE_V_HVLOCK;
+-	r = hpte[1];
++	hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
++	v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
++	r = be64_to_cpu(hpte[1]);
+ 	rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
+ 	gr = rev->guest_rpte;
+ 

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-PR-Fix-sparse-endian-checks.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-PR-Fix-sparse-endian-checks.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,76 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Mon, 16 Jun 2014 13:58:11 +0200
+Subject: KVM: PPC: Book3S PR: Fix sparse endian checks
+Origin: https://git.kernel.org/linus/f396df35188c59a5ecb83932190505ef297754e6
+
+While sending sparse with endian checks over the code base, it triggered at
+some places that were missing casts or had wrong types. Fix them up.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+---
+ arch/powerpc/kvm/book3s_pr_papr.c |   21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
+index 52a63bf..f7c25c6 100644
+--- a/arch/powerpc/kvm/book3s_pr_papr.c
++++ b/arch/powerpc/kvm/book3s_pr_papr.c
+@@ -40,8 +40,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
+ {
+ 	long flags = kvmppc_get_gpr(vcpu, 4);
+ 	long pte_index = kvmppc_get_gpr(vcpu, 5);
+-	unsigned long pteg[2 * 8];
+-	unsigned long pteg_addr, i, *hpte;
++	__be64 pteg[2 * 8];
++	__be64 *hpte;
++	unsigned long pteg_addr, i;
+ 	long int ret;
+ 
+ 	i = pte_index & 7;
+@@ -93,8 +94,8 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
+ 	pteg = get_pteg_addr(vcpu, pte_index);
+ 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
+ 	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+-	pte[0] = be64_to_cpu(pte[0]);
+-	pte[1] = be64_to_cpu(pte[1]);
++	pte[0] = be64_to_cpu((__force __be64)pte[0]);
++	pte[1] = be64_to_cpu((__force __be64)pte[1]);
+ 
+ 	ret = H_NOT_FOUND;
+ 	if ((pte[0] & HPTE_V_VALID) == 0 ||
+@@ -171,8 +172,8 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
+ 
+ 		pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
+ 		copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+-		pte[0] = be64_to_cpu(pte[0]);
+-		pte[1] = be64_to_cpu(pte[1]);
++		pte[0] = be64_to_cpu((__force __be64)pte[0]);
++		pte[1] = be64_to_cpu((__force __be64)pte[1]);
+ 
+ 		/* tsl = AVPN */
+ 		flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
+@@ -211,8 +212,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
+ 	pteg = get_pteg_addr(vcpu, pte_index);
+ 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
+ 	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+-	pte[0] = be64_to_cpu(pte[0]);
+-	pte[1] = be64_to_cpu(pte[1]);
++	pte[0] = be64_to_cpu((__force __be64)pte[0]);
++	pte[1] = be64_to_cpu((__force __be64)pte[1]);
+ 
+ 	ret = H_NOT_FOUND;
+ 	if ((pte[0] & HPTE_V_VALID) == 0 ||
+@@ -231,8 +232,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
+ 
+ 	rb = compute_tlbie_rb(v, r, pte_index);
+ 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
+-	pte[0] = cpu_to_be64(pte[0]);
+-	pte[1] = cpu_to_be64(pte[1]);
++	pte[0] = (__force u64)cpu_to_be64(pte[0]);
++	pte[1] = (__force u64)cpu_to_be64(pte[1]);
+ 	copy_to_user((void __user *)pteg, pte, sizeof(pte));
+ 	ret = H_SUCCESS;
+ 
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-PR-Handle-hyp-doorbell-exits.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3S-PR-Handle-hyp-doorbell-exits.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,28 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Mon, 16 Jun 2014 16:37:38 +0200
+Subject: KVM: PPC: Book3S PR: Handle hyp doorbell exits
+Origin: https://git.kernel.org/linus/568fccc43f901889b94b228cd0238916cb40e0bd
+
+If we're running PR KVM in HV mode, we may get hypervisor doorbell interrupts.
+Handle those the same way we treat normal doorbells.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+---
+ arch/powerpc/kvm/book3s_pr.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
+index 8ea7da4..3b82e86 100644
+--- a/arch/powerpc/kvm/book3s_pr.c
++++ b/arch/powerpc/kvm/book3s_pr.c
+@@ -988,6 +988,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ 	case BOOK3S_INTERRUPT_DECREMENTER:
+ 	case BOOK3S_INTERRUPT_HV_DECREMENTER:
+ 	case BOOK3S_INTERRUPT_DOORBELL:
++	case BOOK3S_INTERRUPT_H_DOORBELL:
+ 		vcpu->stat.dec_exits++;
+ 		r = RESUME_GUEST;
+ 		break;
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3s-HV-Fix-tlbie-compile-error.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3s-HV-Fix-tlbie-compile-error.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,44 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Wed, 11 Jun 2014 17:13:55 +0200
+Subject: KVM: PPC: Book3s HV: Fix tlbie compile error
+Origin: https://git.kernel.org/linus/f6bf3a66227447f89f25b9db0ae39357decf2509
+
+Some compilers complain about uninitialized variables in the compute_tlbie_rb
+function. When you follow the code path you'll realize that we'll never get
+to that point, but the compiler isn't all that smart.
+
+So just default to 4k page sizes for everything, making the compiler happy
+and the code slightly easier to read.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+Acked-by: Paul Mackerras <paulus at samba.org>
+---
+ arch/powerpc/include/asm/kvm_book3s_64.h |    8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
+index fddb72b..c7871f3 100644
+--- a/arch/powerpc/include/asm/kvm_book3s_64.h
++++ b/arch/powerpc/include/asm/kvm_book3s_64.h
+@@ -110,16 +110,12 @@ static inline int __hpte_actual_psize(unsigned int lp, int psize)
+ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+ 					     unsigned long pte_index)
+ {
+-	int b_psize, a_psize;
++	int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
+ 	unsigned int penc;
+ 	unsigned long rb = 0, va_low, sllp;
+ 	unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+ 
+-	if (!(v & HPTE_V_LARGE)) {
+-		/* both base and actual psize is 4k */
+-		b_psize = MMU_PAGE_4K;
+-		a_psize = MMU_PAGE_4K;
+-	} else {
++	if (v & HPTE_V_LARGE) {
+ 		for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
+ 
+ 			/* valid entries have a shift value */
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3s-PR-Disable-AIL-mode-with-OPAL.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Book3s-PR-Disable-AIL-mode-with-OPAL.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,63 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Mon, 9 Jun 2014 01:16:32 +0200
+Subject: KVM: PPC: Book3s PR: Disable AIL mode with OPAL
+Origin: https://git.kernel.org/linus/fb4188bad02f4871b26cf19b98e8d92499ca5d31
+
+When we're using PR KVM we must not allow the CPU to take interrupts
+in virtual mode, as the SLB does not contain host kernel mappings
+when running inside the guest context.
+
+To make sure we get good performance for non-KVM tasks but still
+properly functioning PR KVM, let's just disable AIL whenever a vcpu
+is scheduled in.
+
+This is fundamentally different from how we deal with AIL on pSeries
+type machines where we disable AIL for the whole machine as soon as
+a single KVM VM is up.
+
+The reason for that is easy - on pSeries we do not have control over
+per-cpu configuration of AIL. We also don't want to mess with CPU hotplug
+races and AIL configuration, so setting it per CPU is easier and more
+flexible.
+
+This patch fixes running PR KVM on POWER8 bare metal for me.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+Acked-by: Paul Mackerras <paulus at samba.org>
+---
+ arch/powerpc/kvm/book3s_pr.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
+index 3da412e..8ea7da4 100644
+--- a/arch/powerpc/kvm/book3s_pr.c
++++ b/arch/powerpc/kvm/book3s_pr.c
+@@ -71,6 +71,12 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
+ 	svcpu->in_use = 0;
+ 	svcpu_put(svcpu);
+ #endif
++
++	/* Disable AIL if supported */
++	if (cpu_has_feature(CPU_FTR_HVMODE) &&
++	    cpu_has_feature(CPU_FTR_ARCH_207S))
++		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
++
+ 	vcpu->cpu = smp_processor_id();
+ #ifdef CONFIG_PPC_BOOK3S_32
+ 	current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
+@@ -91,6 +97,12 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
+ 
+ 	kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
+ 	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
++
++	/* Enable AIL if supported */
++	if (cpu_has_feature(CPU_FTR_HVMODE) &&
++	    cpu_has_feature(CPU_FTR_ARCH_207S))
++		mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
++
+ 	vcpu->cpu = -1;
+ }
+ 
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Enable-IRQFD-support-for-the-XICS-interrupt-.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-PPC-Enable-IRQFD-support-for-the-XICS-interrupt-.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,193 @@
+From: Paul Mackerras <paulus at samba.org>
+Date: Mon, 30 Jun 2014 20:51:14 +1000
+Subject: KVM: PPC: Enable IRQFD support for the XICS interrupt
+ controller
+Origin: https://git.kernel.org/linus/25a2150bee00b4d996487552948b9b3ba21d0257
+
+This makes it possible to use IRQFDs on platforms that use the XICS
+interrupt controller.  To do this we implement kvm_irq_map_gsi() and
+kvm_irq_map_chip_pin() in book3s_xics.c, so as to provide a 1-1 mapping
+between global interrupt numbers and XICS interrupt source numbers.
+For now, all interrupts are mapped as "IRQCHIP" interrupts, and no
+MSI support is provided.
+
+This means that kvm_set_irq can now get called with level == 0 or 1
+as well as the powerpc-specific values KVM_INTERRUPT_SET,
+KVM_INTERRUPT_UNSET and KVM_INTERRUPT_SET_LEVEL.  We change
+ics_deliver_irq() to accept all those values, and remove its
+report_status argument, as it is always false, given that we don't
+support KVM_IRQ_LINE_STATUS.
+
+This also adds support for interrupt ack notifiers to the XICS code
+so that the IRQFD resampler functionality can be supported.
+
+Signed-off-by: Paul Mackerras <paulus at samba.org>
+Tested-by: Eric Auger <eric.auger at linaro.org>
+Tested-by: Cornelia Huck <cornelia.huck at de.ibm.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ arch/powerpc/kvm/Kconfig             |    2 ++
+ arch/powerpc/kvm/book3s_hv_rm_xics.c |    5 ++++
+ arch/powerpc/kvm/book3s_xics.c       |   55 ++++++++++++++++++++++++++++------
+ arch/powerpc/kvm/book3s_xics.h       |    2 ++
+ 4 files changed, 55 insertions(+), 9 deletions(-)
+
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
+index d4741db..602eb51 100644
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -170,6 +170,8 @@ config KVM_MPIC
+ config KVM_XICS
+ 	bool "KVM in-kernel XICS emulation"
+ 	depends on KVM_BOOK3S_64 && !KVM_MPIC
++	select HAVE_KVM_IRQCHIP
++	select HAVE_KVM_IRQFD
+ 	---help---
+ 	  Include support for the XICS (eXternal Interrupt Controller
+ 	  Specification) interrupt controller architecture used on
+diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
+index b4b0082..3ee38e6 100644
+--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
+@@ -401,6 +401,11 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+ 		icp->rm_action |= XICS_RM_REJECT;
+ 		icp->rm_reject = irq;
+ 	}
++
++	if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
++		icp->rm_action |= XICS_RM_NOTIFY_EOI;
++		icp->rm_eoied_irq = irq;
++	}
+  bail:
+ 	return check_too_hard(xics, icp);
+ }
+diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
+index d1acd32..eaeb780 100644
+--- a/arch/powerpc/kvm/book3s_xics.c
++++ b/arch/powerpc/kvm/book3s_xics.c
+@@ -64,8 +64,12 @@
+ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ 			    u32 new_irq);
+ 
+-static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level,
+-			   bool report_status)
++/*
++ * Return value ideally indicates how the interrupt was handled, but no
++ * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
++ * so just return 0.
++ */
++static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
+ {
+ 	struct ics_irq_state *state;
+ 	struct kvmppc_ics *ics;
+@@ -82,17 +86,14 @@ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level,
+ 	if (!state->exists)
+ 		return -EINVAL;
+ 
+-	if (report_status)
+-		return state->asserted;
+-
+ 	/*
+ 	 * We set state->asserted locklessly. This should be fine as
+ 	 * we are the only setter, thus concurrent access is undefined
+ 	 * to begin with.
+ 	 */
+-	if (level == KVM_INTERRUPT_SET_LEVEL)
++	if (level == 1 || level == KVM_INTERRUPT_SET_LEVEL)
+ 		state->asserted = 1;
+-	else if (level == KVM_INTERRUPT_UNSET) {
++	else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
+ 		state->asserted = 0;
+ 		return 0;
+ 	}
+@@ -100,7 +101,7 @@ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level,
+ 	/* Attempt delivery */
+ 	icp_deliver_irq(xics, NULL, irq);
+ 
+-	return state->asserted;
++	return 0;
+ }
+ 
+ static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
+@@ -772,6 +773,8 @@ static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+ 	if (state->asserted)
+ 		icp_deliver_irq(xics, icp, irq);
+ 
++	kvm_notify_acked_irq(vcpu->kvm, 0, irq);
++
+ 	return H_SUCCESS;
+ }
+ 
+@@ -789,6 +792,8 @@ static noinline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
+ 		icp_check_resend(xics, icp);
+ 	if (icp->rm_action & XICS_RM_REJECT)
+ 		icp_deliver_irq(xics, icp, icp->rm_reject);
++	if (icp->rm_action & XICS_RM_NOTIFY_EOI)
++		kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
+ 
+ 	icp->rm_action = 0;
+ 
+@@ -1170,7 +1175,16 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ {
+ 	struct kvmppc_xics *xics = kvm->arch.xics;
+ 
+-	return ics_deliver_irq(xics, irq, level, line_status);
++	return ics_deliver_irq(xics, irq, level);
++}
++
++int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
++		int irq_source_id, int level, bool line_status)
++{
++	if (!level)
++		return -1;
++	return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
++			   level, line_status);
+ }
+ 
+ static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+@@ -1301,3 +1315,26 @@ void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
+ 	vcpu->arch.icp = NULL;
+ 	vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
+ }
++
++static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
++			struct kvm *kvm, int irq_source_id, int level,
++			bool line_status)
++{
++	return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
++}
++
++int kvm_irq_map_gsi(struct kvm *kvm,
++		    struct kvm_kernel_irq_routing_entry *entries, int gsi)
++{
++	entries->gsi = gsi;
++	entries->type = KVM_IRQ_ROUTING_IRQCHIP;
++	entries->set = xics_set_irq;
++	entries->irqchip.irqchip = 0;
++	entries->irqchip.pin = gsi;
++	return 1;
++}
++
++int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
++{
++	return pin;
++}
+diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
+index dd9326c..e8aaa7a 100644
+--- a/arch/powerpc/kvm/book3s_xics.h
++++ b/arch/powerpc/kvm/book3s_xics.h
+@@ -71,9 +71,11 @@ struct kvmppc_icp {
+ #define XICS_RM_KICK_VCPU	0x1
+ #define XICS_RM_CHECK_RESEND	0x2
+ #define XICS_RM_REJECT		0x4
++#define XICS_RM_NOTIFY_EOI	0x8
+ 	u32 rm_action;
+ 	struct kvm_vcpu *rm_kick_target;
+ 	u32  rm_reject;
++	u32  rm_eoied_irq;
+ 
+ 	/* Debug stuff for real mode */
+ 	union kvmppc_icp_state rm_dbgstate;
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-Rename-and-add-argument-to-check_extension.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-Rename-and-add-argument-to-check_extension.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,148 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Mon, 14 Jul 2014 18:27:35 +0200
+Subject: KVM: Rename and add argument to check_extension
+Origin: https://git.kernel.org/linus/784aa3d7fb6f729c06d5836c9d9569f58e4d05ae
+
+In preparation to make the check_extension function available to VM scope
+we add a struct kvm * argument to the function header and rename the function
+accordingly. It will still be called from the /dev/kvm fd, but with a NULL
+argument for struct kvm *.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+Acked-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ arch/arm/kvm/arm.c         |    2 +-
+ arch/ia64/kvm/kvm-ia64.c   |    2 +-
+ arch/mips/kvm/kvm_mips.c   |    2 +-
+ arch/powerpc/kvm/powerpc.c |    2 +-
+ arch/s390/kvm/kvm-s390.c   |    2 +-
+ arch/x86/kvm/x86.c         |    2 +-
+ include/linux/kvm_host.h   |    2 +-
+ virt/kvm/kvm_main.c        |    6 +++---
+ 8 files changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 3c82b37..cb77f999 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -184,7 +184,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
+ 	}
+ }
+ 
+-int kvm_dev_ioctl_check_extension(long ext)
++int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ {
+ 	int r;
+ 	switch (ext) {
+diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
+index 6a4309b..0729ba6 100644
+--- a/arch/ia64/kvm/kvm-ia64.c
++++ b/arch/ia64/kvm/kvm-ia64.c
+@@ -190,7 +190,7 @@ void kvm_arch_check_processor_compat(void *rtn)
+ 	*(int *)rtn = 0;
+ }
+ 
+-int kvm_dev_ioctl_check_extension(long ext)
++int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ {
+ 
+ 	int r;
+diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
+index d687c6e..3ca79aa 100644
+--- a/arch/mips/kvm/kvm_mips.c
++++ b/arch/mips/kvm/kvm_mips.c
+@@ -885,7 +885,7 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+ 	return VM_FAULT_SIGBUS;
+ }
+ 
+-int kvm_dev_ioctl_check_extension(long ext)
++int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ {
+ 	int r;
+ 
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index 8e03568..d870bac 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -391,7 +391,7 @@ void kvm_arch_sync_events(struct kvm *kvm)
+ {
+ }
+ 
+-int kvm_dev_ioctl_check_extension(long ext)
++int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ {
+ 	int r;
+ 	/* FIXME!!
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 2f3e14f..00268ca 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -146,7 +146,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
+ 	return -EINVAL;
+ }
+ 
+-int kvm_dev_ioctl_check_extension(long ext)
++int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ {
+ 	int r;
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 5a8691b..5a62d91 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2616,7 +2616,7 @@ out:
+ 	return r;
+ }
+ 
+-int kvm_dev_ioctl_check_extension(long ext)
++int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ {
+ 	int r;
+ 
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index ec4e3bd..5065b95 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -602,7 +602,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ 			 unsigned int ioctl, unsigned long arg);
+ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
+ 
+-int kvm_dev_ioctl_check_extension(long ext);
++int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
+ 
+ int kvm_get_dirty_log(struct kvm *kvm,
+ 			struct kvm_dirty_log *log, int *is_dirty);
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 4b6c01b..e28f3ca 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2571,7 +2571,7 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
+ 	return r;
+ }
+ 
+-static long kvm_dev_ioctl_check_extension_generic(long arg)
++static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
+ {
+ 	switch (arg) {
+ 	case KVM_CAP_USER_MEMORY:
+@@ -2595,7 +2595,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
+ 	default:
+ 		break;
+ 	}
+-	return kvm_dev_ioctl_check_extension(arg);
++	return kvm_vm_ioctl_check_extension(kvm, arg);
+ }
+ 
+ static long kvm_dev_ioctl(struct file *filp,
+@@ -2614,7 +2614,7 @@ static long kvm_dev_ioctl(struct file *filp,
+ 		r = kvm_dev_ioctl_create_vm(arg);
+ 		break;
+ 	case KVM_CHECK_EXTENSION:
+-		r = kvm_dev_ioctl_check_extension_generic(arg);
++		r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
+ 		break;
+ 	case KVM_GET_VCPU_MMAP_SIZE:
+ 		r = -EINVAL;
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-irqchip-Provide-and-use-accessors-for-irq-routin.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-irqchip-Provide-and-use-accessors-for-irq-routin.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,278 @@
+From: Paul Mackerras <paulus at samba.org>
+Date: Mon, 30 Jun 2014 20:51:10 +1000
+Subject: KVM: irqchip: Provide and use accessors for irq
+ routing table
+Origin: https://git.kernel.org/linus/8ba918d488caded2c4368b0b922eb905fe3bb101
+
+This provides accessor functions for the KVM interrupt mappings, in
+order to reduce the amount of code that accesses the fields of the
+kvm_irq_routing_table struct, and restrict that code to one file,
+virt/kvm/irqchip.c.  The new functions are kvm_irq_map_gsi(), which
+maps from a global interrupt number to a set of IRQ routing entries,
+and kvm_irq_map_chip_pin, which maps from IRQ chip and pin numbers to
+a global interrupt number.
+
+This also moves the update of kvm_irq_routing_table::chip[][]
+into irqchip.c, out of the various kvm_set_routing_entry
+implementations.  That means that none of the kvm_set_routing_entry
+implementations need the kvm_irq_routing_table argument anymore,
+so this removes it.
+
+This does not change any locking or data lifetime rules.
+
+Signed-off-by: Paul Mackerras <paulus at samba.org>
+Tested-by: Eric Auger <eric.auger at linaro.org>
+Tested-by: Cornelia Huck <cornelia.huck at de.ibm.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ arch/powerpc/kvm/mpic.c   |    4 +---
+ arch/s390/kvm/interrupt.c |    3 +--
+ include/linux/kvm_host.h  |    8 ++++++--
+ virt/kvm/eventfd.c        |   10 ++++++----
+ virt/kvm/irq_comm.c       |   20 +++++++++-----------
+ virt/kvm/irqchip.c        |   42 ++++++++++++++++++++++++++++++++++--------
+ 6 files changed, 57 insertions(+), 30 deletions(-)
+
+diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
+index b68d0dc..39b3a8f 100644
+--- a/arch/powerpc/kvm/mpic.c
++++ b/arch/powerpc/kvm/mpic.c
+@@ -1826,8 +1826,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+ 	return 0;
+ }
+ 
+-int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
+-			  struct kvm_kernel_irq_routing_entry *e,
++int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+ 			  const struct kvm_irq_routing_entry *ue)
+ {
+ 	int r = -EINVAL;
+@@ -1839,7 +1838,6 @@ int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
+ 		e->irqchip.pin = ue->u.irqchip.pin;
+ 		if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS)
+ 			goto out;
+-		rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
+ 		break;
+ 	case KVM_IRQ_ROUTING_MSI:
+ 		e->set = kvm_set_msi;
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 92528a0..f4c819b 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -1556,8 +1556,7 @@ static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
+ 	return ret;
+ }
+ 
+-int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
+-			  struct kvm_kernel_irq_routing_entry *e,
++int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+ 			  const struct kvm_irq_routing_entry *ue)
+ {
+ 	int ret;
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 5065b95..4956149 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -752,6 +752,11 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
+ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
+ 			     bool mask);
+ 
++int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries,
++		    struct kvm_irq_routing_table *irq_rt, int gsi);
++int kvm_irq_map_chip_pin(struct kvm_irq_routing_table *irq_rt,
++			 unsigned irqchip, unsigned pin);
++
+ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ 		bool line_status);
+ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
+@@ -942,8 +947,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
+ 			const struct kvm_irq_routing_entry *entries,
+ 			unsigned nr,
+ 			unsigned flags);
+-int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
+-			  struct kvm_kernel_irq_routing_entry *e,
++int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+ 			  const struct kvm_irq_routing_entry *ue);
+ void kvm_free_irq_routing(struct kvm *kvm);
+ 
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index bae593a..15fa948 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -282,20 +282,22 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
+ 			 struct kvm_irq_routing_table *irq_rt)
+ {
+ 	struct kvm_kernel_irq_routing_entry *e;
++	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
++	int i, n_entries;
++
++	n_entries = kvm_irq_map_gsi(entries, irq_rt, irqfd->gsi);
+ 
+ 	write_seqcount_begin(&irqfd->irq_entry_sc);
+ 
+ 	irqfd->irq_entry.type = 0;
+-	if (irqfd->gsi >= irq_rt->nr_rt_entries)
+-		goto out;
+ 
+-	hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) {
++	e = entries;
++	for (i = 0; i < n_entries; ++i, ++e) {
+ 		/* Only fast-path MSI. */
+ 		if (e->type == KVM_IRQ_ROUTING_MSI)
+ 			irqfd->irq_entry = *e;
+ 	}
+ 
+- out:
+ 	write_seqcount_end(&irqfd->irq_entry_sc);
+ }
+ 
+diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
+index a228ee8..1758445 100644
+--- a/virt/kvm/irq_comm.c
++++ b/virt/kvm/irq_comm.c
+@@ -160,6 +160,7 @@ static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
+  */
+ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
+ {
++	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
+ 	struct kvm_kernel_irq_routing_entry *e;
+ 	int ret = -EINVAL;
+ 	struct kvm_irq_routing_table *irq_rt;
+@@ -177,14 +178,13 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
+ 	 */
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+ 	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+-	if (irq < irq_rt->nr_rt_entries)
+-		hlist_for_each_entry(e, &irq_rt->map[irq], link) {
+-			if (likely(e->type == KVM_IRQ_ROUTING_MSI))
+-				ret = kvm_set_msi_inatomic(e, kvm);
+-			else
+-				ret = -EWOULDBLOCK;
+-			break;
+-		}
++	if (kvm_irq_map_gsi(entries, irq_rt, irq) > 0) {
++		e = &entries[0];
++		if (likely(e->type == KVM_IRQ_ROUTING_MSI))
++			ret = kvm_set_msi_inatomic(e, kvm);
++		else
++			ret = -EWOULDBLOCK;
++	}
+ 	srcu_read_unlock(&kvm->irq_srcu, idx);
+ 	return ret;
+ }
+@@ -272,8 +272,7 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
+ 	srcu_read_unlock(&kvm->irq_srcu, idx);
+ }
+ 
+-int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
+-			  struct kvm_kernel_irq_routing_entry *e,
++int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+ 			  const struct kvm_irq_routing_entry *ue)
+ {
+ 	int r = -EINVAL;
+@@ -304,7 +303,6 @@ int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
+ 		e->irqchip.pin = ue->u.irqchip.pin + delta;
+ 		if (e->irqchip.pin >= max_pin)
+ 			goto out;
+-		rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
+ 		break;
+ 	case KVM_IRQ_ROUTING_MSI:
+ 		e->set = kvm_set_msi;
+diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
+index b43c275..f4648dd 100644
+--- a/virt/kvm/irqchip.c
++++ b/virt/kvm/irqchip.c
+@@ -31,13 +31,37 @@
+ #include <trace/events/kvm.h>
+ #include "irq.h"
+ 
++int kvm_irq_map_gsi(struct kvm_kernel_irq_routing_entry *entries,
++		    struct kvm_irq_routing_table *irq_rt, int gsi)
++{
++	struct kvm_kernel_irq_routing_entry *e;
++	int n = 0;
++
++	if (gsi < irq_rt->nr_rt_entries) {
++		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
++			entries[n] = *e;
++			++n;
++		}
++	}
++
++	return n;
++}
++
++int kvm_irq_map_chip_pin(struct kvm_irq_routing_table *irq_rt,
++			 unsigned irqchip, unsigned pin)
++{
++	return irq_rt->chip[irqchip][pin];
++}
++
+ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
+ {
++	struct kvm_irq_routing_table *irq_rt;
+ 	struct kvm_irq_ack_notifier *kian;
+ 	int gsi, idx;
+ 
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+-	gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
++	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
++	gsi = kvm_irq_map_chip_pin(irq_rt, irqchip, pin);
+ 	if (gsi != -1)
+ 		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+ 					 link)
+@@ -54,13 +78,15 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+ 
+ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+ {
++	struct kvm_irq_routing_table *irq_rt;
+ 	struct kvm_irq_ack_notifier *kian;
+ 	int gsi, idx;
+ 
+ 	trace_kvm_ack_irq(irqchip, pin);
+ 
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+-	gsi = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu)->chip[irqchip][pin];
++	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
++	gsi = kvm_irq_map_chip_pin(irq_rt, irqchip, pin);
+ 	if (gsi != -1)
+ 		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
+ 					 link)
+@@ -115,8 +141,8 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
+ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ 		bool line_status)
+ {
+-	struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
+-	int ret = -1, i = 0, idx;
++	struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS];
++	int ret = -1, i, idx;
+ 	struct kvm_irq_routing_table *irq_rt;
+ 
+ 	trace_kvm_set_irq(irq, level, irq_source_id);
+@@ -127,9 +153,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ 	 */
+ 	idx = srcu_read_lock(&kvm->irq_srcu);
+ 	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+-	if (irq < irq_rt->nr_rt_entries)
+-		hlist_for_each_entry(e, &irq_rt->map[irq], link)
+-			irq_set[i++] = *e;
++	i = kvm_irq_map_gsi(irq_set, irq_rt, irq);
+ 	srcu_read_unlock(&kvm->irq_srcu, idx);
+ 
+ 	while(i--) {
+@@ -171,9 +195,11 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
+ 
+ 	e->gsi = ue->gsi;
+ 	e->type = ue->type;
+-	r = kvm_set_routing_entry(rt, e, ue);
++	r = kvm_set_routing_entry(e, ue);
+ 	if (r)
+ 		goto out;
++	if (e->type == KVM_IRQ_ROUTING_IRQCHIP)
++		rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi;
+ 
+ 	hlist_add_head(&e->link, &rt->map[e->gsi]);
+ 	r = 0;
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/KVM-prepare-for-KVM_-S-G-ET_MP_STATE-on-other-archit.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/KVM-prepare-for-KVM_-S-G-ET_MP_STATE-on-other-archit.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,84 @@
+From: David Hildenbrand <dahi at linux.vnet.ibm.com>
+Date: Mon, 12 May 2014 16:05:13 +0200
+Subject: KVM: prepare for KVM_(S|G)ET_MP_STATE on other
+ architectures
+Origin: https://git.kernel.org/linus/0b4820d6d8b6448bc9f7fac1bb1a801a53b425e1
+
+Highlight the aspects of the ioctls that are actually specific to x86
+and ia64. As defined restrictions (irqchip) and mp states may not apply
+to other architectures, these parts are flagged to belong to x86 and ia64.
+
+In preparation for the use of KVM_(S|G)ET_MP_STATE by s390.
+Fix a spelling error (KVM_SET_MP_STATE vs. KVM_SET_MPSTATE) on the way.
+
+Signed-off-by: David Hildenbrand <dahi at linux.vnet.ibm.com>
+Reviewed-by: Cornelia Huck <cornelia.huck at de.ibm.com>
+Acked-by: Christian Borntraeger <borntraeger at de.ibm.com>
+Signed-off-by: Christian Borntraeger <borntraeger at de.ibm.com>
+---
+ Documentation/virtual/kvm/api.txt |   21 ++++++++++++---------
+ include/uapi/linux/kvm.h          |    3 ++-
+ 2 files changed, 14 insertions(+), 10 deletions(-)
+
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index 0fe3649..904c61c 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -988,18 +988,20 @@ uniprocessor guests).
+ 
+ Possible values are:
+ 
+- - KVM_MP_STATE_RUNNABLE:        the vcpu is currently running
++ - KVM_MP_STATE_RUNNABLE:        the vcpu is currently running [x86, ia64]
+  - KVM_MP_STATE_UNINITIALIZED:   the vcpu is an application processor (AP)
+-                                 which has not yet received an INIT signal
++                                 which has not yet received an INIT signal [x86,
++                                 ia64]
+  - KVM_MP_STATE_INIT_RECEIVED:   the vcpu has received an INIT signal, and is
+-                                 now ready for a SIPI
++                                 now ready for a SIPI [x86, ia64]
+  - KVM_MP_STATE_HALTED:          the vcpu has executed a HLT instruction and
+-                                 is waiting for an interrupt
++                                 is waiting for an interrupt [x86, ia64]
+  - KVM_MP_STATE_SIPI_RECEIVED:   the vcpu has just received a SIPI (vector
+-                                 accessible via KVM_GET_VCPU_EVENTS)
++                                 accessible via KVM_GET_VCPU_EVENTS) [x86, ia64]
+ 
+-This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
+-irqchip, the multiprocessing state must be maintained by userspace.
++On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
++in-kernel irqchip, the multiprocessing state must be maintained by userspace on
++these architectures.
+ 
+ 
+ 4.39 KVM_SET_MP_STATE
+@@ -1013,8 +1015,9 @@ Returns: 0 on success; -1 on error
+ Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for
+ arguments.
+ 
+-This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
+-irqchip, the multiprocessing state must be maintained by userspace.
++On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
++in-kernel irqchip, the multiprocessing state must be maintained by userspace on
++these architectures.
+ 
+ 
+ 4.40 KVM_SET_IDENTITY_MAP_ADDR
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index e11d8f1..37d4ec6 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -399,8 +399,9 @@ struct kvm_vapic_addr {
+ 	__u64 vapic_addr;
+ };
+ 
+-/* for KVM_SET_MPSTATE */
++/* for KVM_SET_MP_STATE */
+ 
++/* not all states are valid on all architectures */
+ #define KVM_MP_STATE_RUNNABLE          0
+ #define KVM_MP_STATE_UNINITIALIZED     1
+ #define KVM_MP_STATE_INIT_RECEIVED     2
+-- 
+1.7.10.4
+

Added: dists/trunk/linux/debian/patches/features/powerpc/PPC-Add-asm-helpers-for-BE-32bit-load-store.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/trunk/linux/debian/patches/features/powerpc/PPC-Add-asm-helpers-for-BE-32bit-load-store.patch	Mon Sep  1 19:34:44 2014	(r21758)
@@ -0,0 +1,37 @@
+From: Alexander Graf <agraf at suse.de>
+Date: Wed, 11 Jun 2014 10:07:40 +0200
+Subject: PPC: Add asm helpers for BE 32bit load/store
+Origin: https://git.kernel.org/linus/8f6822c4b9fac6e47414d2f1e11dbabda9bc2163
+
+From assembly code we might not only have to explicitly BE access 64bit values,
+but sometimes also 32bit ones. Add helpers that allow for easy use of lwzx/stwx
+in their respective byte-reverse or native form.
+
+Signed-off-by: Alexander Graf <agraf at suse.de>
+CC: Benjamin Herrenschmidt <benh at kernel.crashing.org>
+---
+ arch/powerpc/include/asm/asm-compat.h |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
+index 4b237aa..21be8ae 100644
+--- a/arch/powerpc/include/asm/asm-compat.h
++++ b/arch/powerpc/include/asm/asm-compat.h
+@@ -34,10 +34,14 @@
+ #define PPC_MIN_STKFRM	112
+ 
+ #ifdef __BIG_ENDIAN__
++#define LWZX_BE	stringify_in_c(lwzx)
+ #define LDX_BE	stringify_in_c(ldx)
++#define STWX_BE	stringify_in_c(stwx)
+ #define STDX_BE	stringify_in_c(stdx)
+ #else
++#define LWZX_BE	stringify_in_c(lwbrx)
+ #define LDX_BE	stringify_in_c(ldbrx)
++#define STWX_BE	stringify_in_c(stwbrx)
+ #define STDX_BE	stringify_in_c(stdbrx)
+ #endif
+ 
+-- 
+1.7.10.4
+

Modified: dists/trunk/linux/debian/patches/series
==============================================================================
--- dists/trunk/linux/debian/patches/series	Sat Aug 30 20:28:09 2014	(r21757)
+++ dists/trunk/linux/debian/patches/series	Mon Sep  1 19:34:44 2014	(r21758)
@@ -71,6 +71,28 @@
 features/mips/MIPS-Loongson-3-Enable-the-COP2-usage.patch
 features/mips/MIPS-Loongson-Rename-CONFIG_LEMOTE_MACH3A-to-CONFIG_.patch
 features/mips/MIPS-Loongson-3-Add-Loongson-LS3A-RS780E-1-way-machi.patch
+features/powerpc/KVM-prepare-for-KVM_-S-G-ET_MP_STATE-on-other-archit.patch
+features/powerpc/KVM-PPC-Book3s-PR-Disable-AIL-mode-with-OPAL.patch
+features/powerpc/KVM-PPC-Book3s-HV-Fix-tlbie-compile-error.patch
+features/powerpc/KVM-PPC-Book3S-PR-Handle-hyp-doorbell-exits.patch
+features/powerpc/KVM-PPC-Book3S-HV-Fix-ABIv2-indirect-branch-issue.patch
+features/powerpc/KVM-PPC-Book3S-PR-Fix-sparse-endian-checks.patch
+features/powerpc/KVM-PPC-Book3S-Controls-for-in-kernel-sPAPR-hypercal.patch
+features/powerpc/PPC-Add-asm-helpers-for-BE-32bit-load-store.patch
+features/powerpc/KVM-PPC-Book3S-HV-Make-HTAB-code-LE-host-aware.patch
+features/powerpc/KVM-PPC-Book3S-HV-Access-guest-VPA-in-BE.patch
+features/powerpc/KVM-PPC-Book3S-HV-Access-host-lppaca-and-shadow-slb-.patch
+features/powerpc/KVM-PPC-Book3S-HV-Access-XICS-in-BE.patch
+features/powerpc/KVM-PPC-Book3S-HV-Fix-ABIv2-on-LE.patch
+features/powerpc/KVM-PPC-Book3S-HV-Enable-for-little-endian-hosts.patch
+features/powerpc/KVM-Rename-and-add-argument-to-check_extension.patch
+features/powerpc/KVM-Allow-KVM_CHECK_EXTENSION-on-the-vm-fd.patch
+features/powerpc/KVM-Don-t-keep-reference-to-irq-routing-table-in-irq.patch
+features/powerpc/KVM-irqchip-Provide-and-use-accessors-for-irq-routin.patch
+features/powerpc/KVM-Move-all-accesses-to-kvm-irq_routing-into-irqchi.patch
+features/powerpc/KVM-Move-irq-notifier-implementation-into-eventfd.c.patch
+features/powerpc/KVM-Give-IRQFD-its-own-separate-enabling-Kconfig-opt.patch
+features/powerpc/KVM-PPC-Enable-IRQFD-support-for-the-XICS-interrupt-.patch
 
 # Miscellaneous bug fixes
 bugfix/all/misc-bmp085-Enable-building-as-a-module.patch



More information about the Kernel-svn-changes mailing list