[Pkg-xen-changes] [xen] 01/01: Import xen_4.5.1~rc1.orig.tar.xz
Bastian Blank
waldi at moszumanska.debian.org
Sun May 31 20:14:31 UTC 2015
This is an automated email from the git hooks/post-receive script.
waldi pushed a commit to branch upstream-develop
in repository xen.
commit 9245d82e421a7ce0f7387b7c1b8185088769ddb3
Author: Bastian Blank <waldi at debian.org>
Date: Sun May 31 18:33:47 2015 +0200
Import xen_4.5.1~rc1.orig.tar.xz
---
Config.mk | 8 +-
MAINTAINERS | 11 ++-
docs/misc/xen-command-line.markdown | 26 ++++++-
tools/configure | 4 +-
tools/configure.ac | 4 +-
tools/firmware/hvmloader/pci.c | 36 ++++++---
tools/libxc/xc_core.h | 3 +
tools/libxc/xc_core_arm.c | 17 +++++
tools/libxc/xc_core_x86.c | 17 +++++
tools/libxc/xc_dom_boot.c | 18 +++--
tools/libxc/xc_domain.c | 46 +++++++++++-
tools/libxc/xc_private.c | 2 +-
tools/libxl/Makefile | 2 +-
tools/libxl/libxl_create.c | 5 +-
tools/libxl/libxl_dm.c | 21 +++++-
tools/xenstore/xs_lib.c | 2 +-
xen/Makefile | 2 +-
xen/arch/arm/domain.c | 61 +++++++++++-----
xen/arch/arm/domain_build.c | 2 +
xen/arch/arm/gic-v2.c | 2 -
xen/arch/arm/gic-v3.c | 2 -
xen/arch/arm/setup.c | 30 +++++---
xen/arch/arm/traps.c | 9 +--
xen/arch/arm/vgic-v2.c | 86 +++++++++++++---------
xen/arch/arm/vgic-v3.c | 129 ++++++++++++++++++++-------------
xen/arch/arm/vgic.c | 15 ++--
xen/arch/x86/crash.c | 59 ++++++---------
xen/arch/x86/domctl.c | 10 ++-
xen/arch/x86/hvm/hvm.c | 32 +++++++-
xen/arch/x86/hvm/i8254.c | 1 +
xen/arch/x86/hvm/pmtimer.c | 1 +
xen/arch/x86/hvm/rtc.c | 3 +-
xen/arch/x86/hvm/vmx/vmx.c | 8 +-
xen/arch/x86/hvm/vpic.c | 1 +
xen/arch/x86/hvm/vpmu.c | 13 ++++
xen/arch/x86/irq.c | 27 ++++---
xen/arch/x86/mm.c | 6 +-
xen/arch/x86/msi.c | 14 +++-
xen/arch/x86/setup.c | 2 +-
xen/arch/x86/shutdown.c | 40 ++++++++--
xen/arch/x86/tboot.c | 3 +
xen/arch/x86/traps.c | 20 +++--
xen/arch/x86/x86_64/entry.S | 14 +---
xen/arch/x86/x86_emulate/x86_emulate.c | 33 +++++----
xen/arch/x86/xstate.c | 17 ++++-
xen/common/bunzip2.c | 2 +-
xen/common/compat/memory.c | 48 ++++++++++++
xen/common/cpupool.c | 6 --
xen/common/domctl.c | 11 ++-
xen/common/efi/runtime.c | 8 +-
xen/common/event_channel.c | 10 ++-
xen/common/kernel.c | 6 ++
xen/common/lz4/decompress.c | 3 +
xen/common/memory.c | 5 +-
xen/common/page_alloc.c | 3 +-
xen/common/softirq.c | 2 +-
xen/common/sysctl.c | 2 +-
xen/drivers/char/dt-uart.c | 6 +-
xen/drivers/char/pl011.c | 16 ++++
xen/drivers/char/serial.c | 34 +++++++++
xen/drivers/passthrough/vtd/iommu.c | 66 ++++++++++-------
xen/drivers/passthrough/vtd/iommu.h | 12 ++-
xen/drivers/passthrough/vtd/utils.c | 2 +
xen/include/asm-arm/arm64/page.h | 4 +-
xen/include/asm-arm/asm_defns.h | 9 +++
xen/include/asm-arm/bitops.h | 7 +-
xen/include/asm-arm/domain.h | 2 +-
xen/include/asm-arm/gic.h | 1 +
xen/include/asm-arm/gic_v3_defs.h | 3 +
xen/include/asm-arm/processor.h | 5 +-
xen/include/asm-arm/regs.h | 8 ++
xen/include/asm-arm/vgic.h | 4 +-
xen/include/asm-x86/hvm/domain.h | 1 +
xen/include/asm-x86/processor.h | 6 +-
xen/include/asm-x86/xstate.h | 4 +
xen/include/public/domctl.h | 1 +
xen/include/public/xen-compat.h | 2 +-
xen/include/xen/lib.h | 2 +
xen/include/xen/serial.h | 4 +
xen/include/xlat.lst | 2 +
80 files changed, 838 insertions(+), 333 deletions(-)
diff --git a/Config.mk b/Config.mk
index 7288326..b9a89a1 100644
--- a/Config.mk
+++ b/Config.mk
@@ -252,7 +252,7 @@ QEMU_TRADITIONAL_URL ?= git://xenbits.xen.org/qemu-xen-4.5-testing.git
SEABIOS_UPSTREAM_URL ?= git://xenbits.xen.org/seabios.git
endif
OVMF_UPSTREAM_REVISION ?= 447d264115c476142f884af0be287622cd244423
-QEMU_UPSTREAM_REVISION ?= qemu-xen-4.5.0
+QEMU_UPSTREAM_REVISION ?= qemu-xen-4.5.1-rc1
SEABIOS_UPSTREAM_REVISION ?= rel-1.7.5
# Thu May 22 16:59:16 2014 -0400
# python3 fixes for vgabios and csm builds.
@@ -260,9 +260,9 @@ SEABIOS_UPSTREAM_REVISION ?= rel-1.7.5
ETHERBOOT_NICS ?= rtl8139 8086100e
-QEMU_TRADITIONAL_REVISION ?= xen-4.5.0
-# Mon Oct 6 16:24:46 2014 +0100
-# qemu-xen-trad: Switch to $(LIBEXEC_BIN) from $(LIBEXEC)
+QEMU_TRADITIONAL_REVISION ?= xen-4.5.1-rc1
+# Tue Mar 31 16:27:45 2015 +0100
+# xen: limit guest control of PCI command register
# Specify which qemu-dm to use. This may be `ioemu' to use the old
# Mercurial in-tree version, or a local directory, or a git URL.
diff --git a/MAINTAINERS b/MAINTAINERS
index ee2fe90..a205136 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -49,9 +49,14 @@ for inclusion in xen-unstable.
Please see http://wiki.xen.org/wiki/Xen_Maintenance_Releases for more
information.
-Remember to copy the appropriate stable branch maintainer who will be
-listed in this section of the MAINTAINERS file in the appropriate
-branch.
+Remember to copy the stable branch maintainer. The maintainer for this
+branch is:
+
+ Jan Beulich <jbeulich at suse.com>
+
+Tools backport requests should also be copied to:
+
+ Ian Jackson <Ian.Jackson at eu.citrix.com>
Unstable Subsystem Maintainers
==============================
diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
index 152ae03..1d877f9 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -237,6 +237,17 @@ and not running softirqs. Reduce this if softirqs are not being run frequently
enough. Setting this to a high value may cause boot failure, particularly if
the NMI watchdog is also enabled.
+### xenheap\_megabytes (arm32)
+> `= <size>`
+
+> Default: `0` (1/32 of RAM)
+
+Amount of RAM to set aside for the Xenheap.
+
+By default will use 1/32 of the RAM up to a maximum of 1GB and with a
+minimum of 32M, subject to a suitably aligned and sized contiguous
+region of memory being available.
+
### clocksource
> `= pit | hpet | acpi`
@@ -550,7 +561,7 @@ Pin dom0 vcpus to their respective pcpus
Flag that makes a 64bit dom0 boot in PVH mode. No 32bit support at present.
### dtuart (ARM)
-> `= path [,options]`
+> `= path [:options]`
> Default: `""`
@@ -1091,7 +1102,7 @@ The following resources are available:
* `rmid_max` indicates the max value for rmid.
### reboot
-> `= t[riple] | k[bd] | a[cpi] | p[ci] | n[o] [, [w]arm | [c]old]`
+> `= t[riple] | k[bd] | a[cpi] | p[ci] | e[fi] | n[o] [, [w]arm | [c]old]`
> Default: `0`
@@ -1111,6 +1122,9 @@ Specify the host reboot method.
`pci` instructs Xen to reboot the host using PCI reset register (port CF9).
+'efi' instructs Xen to reboot using the EFI reboot call (in EFI mode by
+ default it will use that method first).
+
### sched
> `= credit | credit2 | sedf | arinc653`
@@ -1330,6 +1344,8 @@ wrong behaviour (see handle\_pmc\_quirk()).
If 'vpmu=bts' is specified the virtualisation of the Branch Trace Store (BTS)
feature is switched on on Intel processors supporting this feature.
+Note that if **watchdog** option is also specified vpmu will be turned off.
+
*Warning:*
As the BTS virtualisation is not 100% safe and because of the nehalem quirk
don't use the vpmu flag on production systems with Intel cpus!
@@ -1362,9 +1378,11 @@ Permit use of x2apic setup for SMP environments.
### x2apic\_phys
> `= <boolean>`
-> Default: `true`
+> Default: `true` if **FADT** mandates physical mode, `false` otherwise.
-Use the x2apic physical apic driver. The alternative is the x2apic cluster driver.
+In the case that x2apic is in use, this option switches between physical and
+clustered mode. The default, given no hint from the **FADT**, is cluster
+mode.
### xsave
> `= <boolean>`
diff --git a/tools/configure b/tools/configure
index b0aea0a..2fa7426 100755
--- a/tools/configure
+++ b/tools/configure
@@ -2396,8 +2396,8 @@ case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
case $host_vendor in
-rumpxen) CONFIG_RUMP=y; rump=true ;;
-*) CONFIG_RUMP=n; rump=false ;;
+rumpxen|rumprun) CONFIG_RUMP=y; rump=true ;;
+*) CONFIG_RUMP=n; rump=false ;;
esac
diff --git a/tools/configure.ac b/tools/configure.ac
index 1ac63a3..b7f1513 100644
--- a/tools/configure.ac
+++ b/tools/configure.ac
@@ -43,8 +43,8 @@ APPEND_INCLUDES and APPEND_LIB instead when possible.])
AC_CANONICAL_HOST
case $host_vendor in
-rumpxen) CONFIG_RUMP=y; rump=true ;;
-*) CONFIG_RUMP=n; rump=false ;;
+rumpxen|rumprun) CONFIG_RUMP=y; rump=true ;;
+*) CONFIG_RUMP=n; rump=false ;;
esac
AC_SUBST(CONFIG_RUMP)
diff --git a/tools/firmware/hvmloader/pci.c b/tools/firmware/hvmloader/pci.c
index 4e8d803..5ff87a7 100644
--- a/tools/firmware/hvmloader/pci.c
+++ b/tools/firmware/hvmloader/pci.c
@@ -179,18 +179,31 @@ void pci_setup(void)
bar_reg = PCI_ROM_ADDRESS;
bar_data = pci_readl(devfn, bar_reg);
- is_64bar = !!((bar_data & (PCI_BASE_ADDRESS_SPACE |
- PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
- (PCI_BASE_ADDRESS_SPACE_MEMORY |
- PCI_BASE_ADDRESS_MEM_TYPE_64));
- pci_writel(devfn, bar_reg, ~0);
+ if ( bar_reg != PCI_ROM_ADDRESS )
+ {
+ is_64bar = !!((bar_data & (PCI_BASE_ADDRESS_SPACE |
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
+ (PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_64));
+ pci_writel(devfn, bar_reg, ~0);
+ }
+ else
+ {
+ is_64bar = 0;
+ pci_writel(devfn, bar_reg,
+ (bar_data | PCI_ROM_ADDRESS_MASK) &
+ ~PCI_ROM_ADDRESS_ENABLE);
+ }
bar_sz = pci_readl(devfn, bar_reg);
pci_writel(devfn, bar_reg, bar_data);
- bar_sz &= (((bar_data & PCI_BASE_ADDRESS_SPACE) ==
- PCI_BASE_ADDRESS_SPACE_MEMORY) ?
- PCI_BASE_ADDRESS_MEM_MASK :
- (PCI_BASE_ADDRESS_IO_MASK & 0xffff));
+ if ( bar_reg != PCI_ROM_ADDRESS )
+ bar_sz &= (((bar_data & PCI_BASE_ADDRESS_SPACE) ==
+ PCI_BASE_ADDRESS_SPACE_MEMORY) ?
+ PCI_BASE_ADDRESS_MEM_MASK :
+ (PCI_BASE_ADDRESS_IO_MASK & 0xffff));
+ else
+ bar_sz &= PCI_ROM_ADDRESS_MASK;
if (is_64bar) {
bar_data_upper = pci_readl(devfn, bar_reg + 4);
pci_writel(devfn, bar_reg + 4, ~0);
@@ -214,8 +227,9 @@ void pci_setup(void)
bars[i].bar_reg = bar_reg;
bars[i].bar_sz = bar_sz;
- if ( (bar_data & PCI_BASE_ADDRESS_SPACE) ==
- PCI_BASE_ADDRESS_SPACE_MEMORY )
+ if ( ((bar_data & PCI_BASE_ADDRESS_SPACE) ==
+ PCI_BASE_ADDRESS_SPACE_MEMORY) ||
+ (bar_reg == PCI_ROM_ADDRESS) )
mmio_total += bar_sz;
nr_bars++;
diff --git a/tools/libxc/xc_core.h b/tools/libxc/xc_core.h
index 10cbfca..5867030 100644
--- a/tools/libxc/xc_core.h
+++ b/tools/libxc/xc_core.h
@@ -148,6 +148,9 @@ int xc_core_arch_map_p2m_writable(xc_interface *xch, unsigned int guest_width,
shared_info_any_t *live_shinfo,
xen_pfn_t **live_p2m, unsigned long *pfnp);
+int xc_core_arch_get_scratch_gpfn(xc_interface *xch, domid_t domid,
+ xen_pfn_t *gpfn);
+
#if defined (__i386__) || defined (__x86_64__)
# include "xc_core_x86.h"
diff --git a/tools/libxc/xc_core_arm.c b/tools/libxc/xc_core_arm.c
index 2fbcf3f..16508e7 100644
--- a/tools/libxc/xc_core_arm.c
+++ b/tools/libxc/xc_core_arm.c
@@ -96,6 +96,23 @@ xc_core_arch_map_p2m_writable(xc_interface *xch, unsigned int guest_width, xc_do
return xc_core_arch_map_p2m_rw(xch, dinfo, info,
live_shinfo, live_p2m, pfnp, 1);
}
+
+int
+xc_core_arch_get_scratch_gpfn(xc_interface *xch, domid_t domid,
+ xen_pfn_t *gpfn)
+{
+ /*
+ * The Grant Table region space is not used until the guest is
+ * booting. Use the first page for the scratch pfn.
+ */
+ XC_BUILD_BUG_ON(GUEST_GNTTAB_SIZE < XC_PAGE_SIZE);
+
+ *gpfn = GUEST_GNTTAB_BASE >> XC_PAGE_SHIFT;
+
+ return 0;
+}
+
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxc/xc_core_x86.c b/tools/libxc/xc_core_x86.c
index f05060a..d8846f1 100644
--- a/tools/libxc/xc_core_x86.c
+++ b/tools/libxc/xc_core_x86.c
@@ -205,6 +205,23 @@ xc_core_arch_map_p2m_writable(xc_interface *xch, unsigned int guest_width, xc_do
return xc_core_arch_map_p2m_rw(xch, dinfo, info,
live_shinfo, live_p2m, pfnp, 1);
}
+
+int
+xc_core_arch_get_scratch_gpfn(xc_interface *xch, domid_t domid,
+ xen_pfn_t *gpfn)
+{
+ int rc;
+
+ rc = xc_domain_maximum_gpfn(xch, domid);
+
+ if ( rc < 0 )
+ return rc;
+
+ *gpfn = (xen_pfn_t)rc + 1;
+
+ return 0;
+}
+
/*
* Local variables:
* mode: C
diff --git a/tools/libxc/xc_dom_boot.c b/tools/libxc/xc_dom_boot.c
index f0a1c64..a141eb5 100644
--- a/tools/libxc/xc_dom_boot.c
+++ b/tools/libxc/xc_dom_boot.c
@@ -33,6 +33,7 @@
#include "xg_private.h"
#include "xc_dom.h"
+#include "xc_core.h"
#include <xen/hvm/params.h>
#include <xen/grant_table.h>
@@ -365,7 +366,7 @@ int xc_dom_gnttab_hvm_seed(xc_interface *xch, domid_t domid,
domid_t xenstore_domid)
{
int rc;
- xen_pfn_t max_gfn;
+ xen_pfn_t scratch_gpfn;
struct xen_add_to_physmap xatp = {
.domid = domid,
.space = XENMAPSPACE_grant_table,
@@ -375,16 +376,21 @@ int xc_dom_gnttab_hvm_seed(xc_interface *xch, domid_t domid,
.domid = domid,
};
- max_gfn = xc_domain_maximum_gpfn(xch, domid);
- if ( max_gfn <= 0 ) {
+ rc = xc_core_arch_get_scratch_gpfn(xch, domid, &scratch_gpfn);
+ if ( rc < 0 )
+ {
xc_dom_panic(xch, XC_INTERNAL_ERROR,
- "%s: failed to get max gfn "
+ "%s: failed to get a scratch gfn "
"[errno=%d]\n",
__FUNCTION__, errno);
return -1;
}
- xatp.gpfn = max_gfn + 1;
- xrfp.gpfn = max_gfn + 1;
+ xatp.gpfn = scratch_gpfn;
+ xrfp.gpfn = scratch_gpfn;
+
+ xc_dom_printf(xch, "%s: called, pfn=0x%"PRI_xen_pfn, __FUNCTION__,
+ scratch_gpfn);
+
rc = do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
if ( rc != 0 )
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index b864872..eb88eee 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1992,6 +1992,8 @@ int xc_domain_memory_mapping(
{
DECLARE_DOMCTL;
xc_dominfo_t info;
+ int ret = 0, err;
+ unsigned long done = 0, nr, max_batch_sz;
if ( xc_domain_getinfo(xch, domid, 1, &info) != 1 ||
info.domid != domid )
@@ -2002,14 +2004,50 @@ int xc_domain_memory_mapping(
if ( !xc_core_arch_auto_translated_physmap(&info) )
return 0;
+ if ( !nr_mfns )
+ return 0;
+
domctl.cmd = XEN_DOMCTL_memory_mapping;
domctl.domain = domid;
- domctl.u.memory_mapping.first_gfn = first_gfn;
- domctl.u.memory_mapping.first_mfn = first_mfn;
- domctl.u.memory_mapping.nr_mfns = nr_mfns;
domctl.u.memory_mapping.add_mapping = add_mapping;
+ max_batch_sz = nr_mfns;
+ do
+ {
+ nr = min(nr_mfns - done, max_batch_sz);
+ domctl.u.memory_mapping.nr_mfns = nr;
+ domctl.u.memory_mapping.first_gfn = first_gfn + done;
+ domctl.u.memory_mapping.first_mfn = first_mfn + done;
+ err = do_domctl(xch, &domctl);
+ if ( err && errno == E2BIG )
+ {
+ if ( max_batch_sz <= 1 )
+ break;
+ max_batch_sz >>= 1;
+ continue;
+ }
+ /* Save the first error... */
+ if ( !ret )
+ ret = err;
+ /* .. and ignore the rest of them when removing. */
+ if ( err && add_mapping != DPCI_REMOVE_MAPPING )
+ break;
- return do_domctl(xch, &domctl);
+ done += nr;
+ } while ( done < nr_mfns );
+
+ /*
+ * Undo what we have done unless unmapping, by unmapping the entire region.
+ * Errors here are ignored.
+ */
+ if ( ret && add_mapping != DPCI_REMOVE_MAPPING )
+ xc_domain_memory_mapping(xch, domid, first_gfn, first_mfn, nr_mfns,
+ DPCI_REMOVE_MAPPING);
+
+ /* We might get E2BIG so many times that we never advance. */
+ if ( !done && !ret )
+ ret = -1;
+
+ return ret;
}
int xc_domain_ioport_mapping(
diff --git a/tools/libxc/xc_private.c b/tools/libxc/xc_private.c
index e2441ad..df6cd9b 100644
--- a/tools/libxc/xc_private.c
+++ b/tools/libxc/xc_private.c
@@ -33,7 +33,7 @@
#define XENCTRL_OSDEP "XENCTRL_OSDEP"
-#if !defined (__MINIOS__) && !defined(__RUMPUSER_XEN__)
+#if !defined (__MINIOS__) && !defined(__RUMPUSER_XEN__) && !defined(__RUMPRUN__)
#define DO_DYNAMIC_OSDEP
#endif
diff --git a/tools/libxl/Makefile b/tools/libxl/Makefile
index b417372..6a8575b 100644
--- a/tools/libxl/Makefile
+++ b/tools/libxl/Makefile
@@ -266,7 +266,7 @@ install: all
$(SYMLINK_SHLIB) libxlutil.so.$(XLUMAJOR).$(XLUMINOR) $(DESTDIR)$(LIBDIR)/libxlutil.so.$(XLUMAJOR)
$(SYMLINK_SHLIB) libxlutil.so.$(XLUMAJOR) $(DESTDIR)$(LIBDIR)/libxlutil.so
$(INSTALL_DATA) libxlutil.a $(DESTDIR)$(LIBDIR)
- $(INSTALL_DATA) libxl.h libxl_event.h libxl_json.h _libxl_types.h _libxl_types_json.h _libxl_list.h libxl_utils.h libxl_uuid.h $(DESTDIR)$(INCLUDEDIR)
+ $(INSTALL_DATA) libxl.h libxl_event.h libxl_json.h _libxl_types.h _libxl_types_json.h _libxl_list.h libxl_utils.h libxl_uuid.h libxlutil.h $(DESTDIR)$(INCLUDEDIR)
$(INSTALL_DATA) bash-completion $(DESTDIR)$(BASH_COMPLETION_DIR)/xl.sh
.PHONY: clean
diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
index 1198225..6f87d1c 100644
--- a/tools/libxl/libxl_create.c
+++ b/tools/libxl/libxl_create.c
@@ -1167,6 +1167,7 @@ static void domcreate_launch_dm(libxl__egc *egc, libxl__multidev *multidev,
"failed give dom%d access to ioports %"PRIx32"-%"PRIx32,
domid, io->first, io->first + io->number - 1);
ret = ERROR_FAIL;
+ goto error_out;
}
}
@@ -1182,6 +1183,7 @@ static void domcreate_launch_dm(libxl__egc *egc, libxl__multidev *multidev,
if (ret < 0) {
LOGE(ERROR, "failed give dom%d access to irq %d", domid, irq);
ret = ERROR_FAIL;
+ goto error_out;
}
}
@@ -1198,7 +1200,7 @@ static void domcreate_launch_dm(libxl__egc *egc, libxl__multidev *multidev,
"failed give dom%d access to iomem range %"PRIx64"-%"PRIx64,
domid, io->start, io->start + io->number - 1);
ret = ERROR_FAIL;
- continue;
+ goto error_out;
}
ret = xc_domain_memory_mapping(CTX->xch, domid,
io->gfn, io->start,
@@ -1209,6 +1211,7 @@ static void domcreate_launch_dm(libxl__egc *egc, libxl__multidev *multidev,
" to guest address %"PRIx64,
domid, io->start, io->start + io->number - 1, io->gfn);
ret = ERROR_FAIL;
+ goto error_out;
}
}
diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c
index c2b0487..094a133 100644
--- a/tools/libxl/libxl_dm.c
+++ b/tools/libxl/libxl_dm.c
@@ -180,7 +180,14 @@ static char ** libxl__build_device_model_args_old(libxl__gc *gc,
if (libxl_defbool_val(vnc->findunused)) {
flexarray_append(dm_args, "-vncunused");
}
- }
+ } else
+ /*
+ * VNC is not enabled by default by qemu-xen-traditional,
+ * however passing -vnc none causes SDL to not be
+ * (unexpectedly) enabled by default. This is overridden by
+ * explicitly passing -sdl below as required.
+ */
+ flexarray_append_pair(dm_args, "-vnc", "none");
if (sdl) {
flexarray_append(dm_args, "-sdl");
@@ -513,7 +520,17 @@ static char ** libxl__build_device_model_args_new(libxl__gc *gc,
}
flexarray_append(dm_args, vncarg);
- }
+ } else
+ /*
+ * Ensure that by default no vnc server is created.
+ */
+ flexarray_append_pair(dm_args, "-vnc", "none");
+
+ /*
+ * Ensure that by default no display backend is created. Further
+ * options given below might then enable more.
+ */
+ flexarray_append_pair(dm_args, "-display", "none");
if (sdl) {
flexarray_append(dm_args, "-sdl");
diff --git a/tools/xenstore/xs_lib.c b/tools/xenstore/xs_lib.c
index d166497..4795162 100644
--- a/tools/xenstore/xs_lib.c
+++ b/tools/xenstore/xs_lib.c
@@ -79,7 +79,7 @@ const char *xs_domain_dev(void)
char *s = getenv("XENSTORED_PATH");
if (s)
return s;
-#if defined(__RUMPUSER_XEN__)
+#if defined(__RUMPUSER_XEN__) || defined(__RUMPRUN__)
return "/dev/xen/xenbus";
#elif defined(__linux__)
return "/proc/xen/xenbus";
diff --git a/xen/Makefile b/xen/Makefile
index 5d70741..5720393 100644
--- a/xen/Makefile
+++ b/xen/Makefile
@@ -2,7 +2,7 @@
# All other places this is stored (eg. compile.h) should be autogenerated.
export XEN_VERSION = 4
export XEN_SUBVERSION = 5
-export XEN_EXTRAVERSION ?= .0$(XEN_VENDORVERSION)
+export XEN_EXTRAVERSION ?= .1-rc1$(XEN_VENDORVERSION)
export XEN_FULLVERSION = $(XEN_VERSION).$(XEN_SUBVERSION)$(XEN_EXTRAVERSION)
-include xen-version
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 7221bc8..d486632 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -64,7 +64,7 @@ static void ctxt_switch_from(struct vcpu *p)
* mode. Therefore we don't need to save the context of an idle VCPU.
*/
if ( is_idle_vcpu(p) )
- goto end_context;
+ return;
p2m_save_state(p);
@@ -138,9 +138,6 @@ static void ctxt_switch_from(struct vcpu *p)
gic_save_state(p);
isb();
-
-end_context:
- context_saved(p);
}
static void ctxt_switch_to(struct vcpu *n)
@@ -246,6 +243,8 @@ static void schedule_tail(struct vcpu *prev)
local_irq_enable();
+ context_saved(prev);
+
if ( prev != current )
update_runstate_area(current);
@@ -357,29 +356,57 @@ unsigned long hypercall_create_continuation(
}
else
{
- regs = guest_cpu_user_regs();
- regs->r12 = op;
+ regs = guest_cpu_user_regs();
/* Ensure the hypercall trap instruction is re-executed. */
regs->pc -= 4; /* re-execute 'hvc #XEN_HYPERCALL_TAG' */
- for ( i = 0; *p != '\0'; i++ )
+#ifdef CONFIG_ARM_64
+ if ( !is_32bit_domain(current->domain) )
{
- arg = next_arg(p, args);
+ regs->x16 = op;
- switch ( i )
+ for ( i = 0; *p != '\0'; i++ )
{
- case 0: regs->r0 = arg; break;
- case 1: regs->r1 = arg; break;
- case 2: regs->r2 = arg; break;
- case 3: regs->r3 = arg; break;
- case 4: regs->r4 = arg; break;
- case 5: regs->r5 = arg; break;
+ arg = next_arg(p, args);
+
+ switch ( i )
+ {
+ case 0: regs->x0 = arg; break;
+ case 1: regs->x1 = arg; break;
+ case 2: regs->x2 = arg; break;
+ case 3: regs->x3 = arg; break;
+ case 4: regs->x4 = arg; break;
+ case 5: regs->x5 = arg; break;
+ }
}
+
+ /* Return value gets written back to x0 */
+ rc = regs->x0;
}
+ else
+#endif
+ {
+ regs->r12 = op;
+
+ for ( i = 0; *p != '\0'; i++ )
+ {
+ arg = next_arg(p, args);
+
+ switch ( i )
+ {
+ case 0: regs->r0 = arg; break;
+ case 1: regs->r1 = arg; break;
+ case 2: regs->r2 = arg; break;
+ case 3: regs->r3 = arg; break;
+ case 4: regs->r4 = arg; break;
+ case 5: regs->r5 = arg; break;
+ }
+ }
- /* Return value gets written back to r0 */
- rc = regs->r0;
+ /* Return value gets written back to r0 */
+ rc = regs->r0;
+ }
}
va_end(args);
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index de180d8..da868e3 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -1029,6 +1029,8 @@ static int handle_node(struct domain *d, struct kernel_info *kinfo,
DT_MATCH_COMPATIBLE("arm,psci"),
DT_MATCH_PATH("/cpus"),
DT_MATCH_TYPE("memory"),
+ /* The memory mapped timer is not supported by Xen. */
+ DT_MATCH_COMPATIBLE("arm,armv7-timer-mem"),
{ /* sentinel */ },
};
static const struct dt_device_match gic_matches[] __initconst =
diff --git a/xen/arch/arm/gic-v2.c b/xen/arch/arm/gic-v2.c
index faad1ff..31fb81a 100644
--- a/xen/arch/arm/gic-v2.c
+++ b/xen/arch/arm/gic-v2.c
@@ -432,8 +432,6 @@ static int gicv2v_setup(struct domain *d)
d->arch.vgic.cbase = GUEST_GICC_BASE;
}
- d->arch.vgic.nr_lines = 0;
-
/*
* Map the gic virtual cpu interface in the gic cpu interface
* region of the guest.
diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c
index 076aa62..47452ca 100644
--- a/xen/arch/arm/gic-v3.c
+++ b/xen/arch/arm/gic-v3.c
@@ -922,8 +922,6 @@ static int gicv_v3_init(struct domain *d)
d->arch.vgic.rbase_size[0] = GUEST_GICV3_GICR0_SIZE;
}
- d->arch.vgic.nr_lines = 0;
-
return 0;
}
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index f49569d..1e488ee 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -50,6 +50,11 @@ struct bootinfo __initdata bootinfo;
struct cpuinfo_arm __read_mostly boot_cpu_data;
+#ifdef CONFIG_ARM_32
+static unsigned long opt_xenheap_megabytes __initdata;
+integer_param("xenheap_megabytes", opt_xenheap_megabytes);
+#endif
+
static __used void init_done(void)
{
free_init_memory();
@@ -497,20 +502,26 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
total_pages = ram_pages = ram_size >> PAGE_SHIFT;
/*
- * Locate the xenheap using these constraints:
+ * If the user has not requested otherwise via the command line
+ * then locate the xenheap using these constraints:
*
* - must be 32 MiB aligned
* - must not include Xen itself or the boot modules
- * - must be at most 1GB or 1/8 the total RAM in the system if less
- * - must be at least 128M
+ * - must be at most 1GB or 1/32 the total RAM in the system if less
+ * - must be at least 32M
*
* We try to allocate the largest xenheap possible within these
* constraints.
*/
heap_pages = ram_pages;
- xenheap_pages = (heap_pages/8 + 0x1fffUL) & ~0x1fffUL;
- xenheap_pages = max(xenheap_pages, 128UL<<(20-PAGE_SHIFT));
- xenheap_pages = min(xenheap_pages, 1UL<<(30-PAGE_SHIFT));
+ if ( opt_xenheap_megabytes )
+ xenheap_pages = opt_xenheap_megabytes << (20-PAGE_SHIFT);
+ else
+ {
+ xenheap_pages = (heap_pages/32 + 0x1fffUL) & ~0x1fffUL;
+ xenheap_pages = max(xenheap_pages, 32UL<<(20-PAGE_SHIFT));
+ xenheap_pages = min(xenheap_pages, 1UL<<(30-PAGE_SHIFT));
+ }
do
{
@@ -521,15 +532,16 @@ static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
break;
xenheap_pages >>= 1;
- } while ( xenheap_pages > 128<<(20-PAGE_SHIFT) );
+ } while ( !opt_xenheap_megabytes && xenheap_pages > 32<<(20-PAGE_SHIFT) );
if ( ! e )
panic("Not not enough space for xenheap");
domheap_pages = heap_pages - xenheap_pages;
- printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages)\n",
- e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages);
+ printk("Xen heap: %"PRIpaddr"-%"PRIpaddr" (%lu pages%s)\n",
+ e - (pfn_to_paddr(xenheap_pages)), e, xenheap_pages,
+ opt_xenheap_megabytes ? ", from command-line" : "");
printk("Dom heap: %lu pages\n", domheap_pages);
setup_xenheap_mappings((e >> PAGE_SHIFT) - xenheap_pages, xenheap_pages);
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 4c93250..4063a80 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -454,14 +454,7 @@ static void inject_abt64_exception(struct cpu_user_regs *regs,
.len = instr_len,
};
- /*
- * Trap may have been taken from EL0, which might be in AArch32
- * mode (PSR_MODE_BIT set), or in AArch64 mode (PSR_MODE_EL0t).
- *
- * Since we know the kernel must be 64-bit any trap from a 32-bit
- * mode must have been from EL0.
- */
- if ( psr_mode_is_32bit(regs->cpsr) || psr_mode(regs->cpsr,PSR_MODE_EL0t) )
+ if ( psr_mode_is_user(regs) )
esr.ec = prefetch
? HSR_EC_INSTR_ABORT_LOWER_EL : HSR_EC_DATA_ABORT_LOWER_EL;
else
diff --git a/xen/arch/arm/vgic-v2.c b/xen/arch/arm/vgic-v2.c
index 1369f78..86d3628 100644
--- a/xen/arch/arm/vgic-v2.c
+++ b/xen/arch/arm/vgic-v2.c
@@ -53,8 +53,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
if ( dabt.size != DABT_WORD ) goto bad_width;
/* No secure world support for guests. */
vgic_lock(v);
- *r = ( (v->domain->max_vcpus << 5) & GICD_TYPE_CPUS )
- |( ((v->domain->arch.vgic.nr_lines / 32)) & GICD_TYPE_LINES );
+ *r = ( ((v->domain->max_vcpus - 1) << GICD_TYPE_CPUS_SHIFT) )
+ |( ((v->domain->arch.vgic.nr_spis / 32)) & GICD_TYPE_LINES );
vgic_unlock(v);
return 1;
case GICD_IIDR:
@@ -72,7 +72,7 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
case GICD_IGROUPR ... GICD_IGROUPRN:
/* We do not implement security extensions for guests, read zero */
- goto read_as_zero;
+ goto read_as_zero_32;
case GICD_ISENABLER ... GICD_ISENABLERN:
if ( dabt.size != DABT_WORD ) goto bad_width;
@@ -93,7 +93,7 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
return 1;
case GICD_ISPENDR ... GICD_ISPENDRN:
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
+ if ( dabt.size != DABT_WORD ) goto bad_width;
rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ISPENDR, DABT_WORD);
if ( rank == NULL) goto read_as_zero;
vgic_lock_rank(v, rank, flags);
@@ -102,8 +102,8 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
return 1;
case GICD_ICPENDR ... GICD_ICPENDRN:
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
- rank = vgic_rank_offset(v, 1, gicd_reg - GICD_ICPENDR, DABT_WORD);
+ if ( dabt.size != DABT_WORD ) goto bad_width;
+ rank = vgic_rank_offset(v, 0, gicd_reg - GICD_ICPENDR, DABT_WORD);
if ( rank == NULL) goto read_as_zero;
vgic_lock_rank(v, rank, flags);
*r = vgic_byte_read(rank->ipend, dabt.sign, gicd_reg);
@@ -164,7 +164,7 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
case GICD_NSACR ... GICD_NSACRN:
/* We do not implement security extensions for guests, read zero */
- goto read_as_zero;
+ goto read_as_zero_32;
case GICD_SGIR:
if ( dabt.size != DABT_WORD ) goto bad_width;
@@ -196,7 +196,7 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
case GICD_ICPIDR2:
if ( dabt.size != DABT_WORD ) goto bad_width;
- printk("vGICD: unhandled read from ICPIDR2\n");
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled read from ICPIDR2\n", v);
return 0;
/* Implementation defined -- read as zero */
@@ -213,19 +213,20 @@ static int vgic_v2_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
goto read_as_zero;
default:
- printk("vGICD: unhandled read r%d offset %#08x\n",
- dabt.reg, gicd_reg);
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n",
+ v, dabt.reg, gicd_reg);
return 0;
}
bad_width:
- printk("vGICD: bad read width %d r%d offset %#08x\n",
- dabt.size, dabt.reg, gicd_reg);
+ printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n",
+ v, dabt.size, dabt.reg, gicd_reg);
domain_crash_synchronous();
return 0;
-read_as_zero:
+read_as_zero_32:
if ( dabt.size != DABT_WORD ) goto bad_width;
+read_as_zero:
*r = 0;
return 1;
}
@@ -255,7 +256,10 @@ static int vgic_v2_to_sgi(struct vcpu *v, register_t sgir)
sgi_mode = SGI_TARGET_SELF;
break;
default:
- BUG();
+ printk(XENLOG_G_DEBUG
+ "%pv: vGICD: unhandled GICD_SGIR write %"PRIregister" with wrong mode\n",
+ v, sgir);
+ return 0;
}
return vgic_to_sgi(v, sgir, sgi_mode, virq, vcpu_mask);
@@ -276,13 +280,16 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case GICD_CTLR:
if ( dabt.size != DABT_WORD ) goto bad_width;
/* Ignore all but the enable bit */
+ vgic_lock(v);
v->domain->arch.vgic.ctlr = (*r) & GICD_CTL_ENABLE;
+ vgic_unlock(v);
+
return 1;
/* R/O -- write ignored */
case GICD_TYPER:
case GICD_IIDR:
- goto write_ignore;
+ goto write_ignore_32;
/* Implementation defined -- write ignored */
case 0x020 ... 0x03c:
@@ -290,7 +297,7 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case GICD_IGROUPR ... GICD_IGROUPRN:
/* We do not implement security extensions for guests, write ignore */
- goto write_ignore;
+ goto write_ignore_32;
case GICD_ISENABLER ... GICD_ISENABLERN:
if ( dabt.size != DABT_WORD ) goto bad_width;
@@ -323,15 +330,17 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
return 1;
case GICD_ISPENDR ... GICD_ISPENDRN:
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
- printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDR%d\n",
- dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ISPENDR);
+ if ( dabt.size != DABT_WORD ) goto bad_width;
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled word write %#"PRIregister" to ISPENDR%d\n",
+ v, *r, gicd_reg - GICD_ISPENDR);
return 0;
case GICD_ICPENDR ... GICD_ICPENDRN:
- if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
- printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDR%d\n",
- dabt.size ? "word" : "byte", *r, gicd_reg - GICD_ICPENDR);
+ if ( dabt.size != DABT_WORD ) goto bad_width;
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled word write %#"PRIregister" to ICPENDR%d\n",
+ v, *r, gicd_reg - GICD_ICPENDR);
return 0;
case GICD_ISACTIVER ... GICD_ISACTIVERN:
@@ -354,7 +363,7 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case GICD_ITARGETSR ... GICD_ITARGETSR + 7:
/* SGI/PPI target is read only */
- goto write_ignore;
+ goto write_ignore_32;
case GICD_ITARGETSR + 8 ... GICD_ITARGETSRN:
{
@@ -427,10 +436,10 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
return 1;
case GICD_ICFGR: /* SGIs */
- goto write_ignore;
+ goto write_ignore_32;
case GICD_ICFGR + 1: /* PPIs */
/* It is implementation defined if these are writeable. We chose not */
- goto write_ignore;
+ goto write_ignore_32;
case GICD_ICFGR + 2 ... GICD_ICFGRN: /* SPIs */
if ( dabt.size != DABT_WORD ) goto bad_width;
rank = vgic_rank_offset(v, 2, gicd_reg - GICD_ICFGR, DABT_WORD);
@@ -442,7 +451,7 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case GICD_NSACR ... GICD_NSACRN:
/* We do not implement security extensions for guests, write ignore */
- goto write_ignore;
+ goto write_ignore_32;
case GICD_SGIR:
if ( dabt.size != DABT_WORD ) goto bad_width;
@@ -450,14 +459,16 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case GICD_CPENDSGIR ... GICD_CPENDSGIRN:
if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
- printk("vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
- dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled %s write %#"PRIregister" to ICPENDSGIR%d\n",
+ v, dabt.size ? "word" : "byte", *r, gicd_reg - GICD_CPENDSGIR);
return 0;
case GICD_SPENDSGIR ... GICD_SPENDSGIRN:
if ( dabt.size != DABT_BYTE && dabt.size != DABT_WORD ) goto bad_width;
- printk("vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n",
- dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled %s write %#"PRIregister" to ISPENDSGIR%d\n",
+ v, dabt.size ? "word" : "byte", *r, gicd_reg - GICD_SPENDSGIR);
return 0;
/* Implementation defined -- write ignored */
@@ -466,7 +477,7 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
/* R/O -- write ignore */
case GICD_ICPIDR2:
- goto write_ignore;
+ goto write_ignore_32;
/* Implementation defined -- write ignored */
case 0xfec ... 0xffc:
@@ -482,19 +493,22 @@ static int vgic_v2_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
goto write_ignore;
default:
- printk("vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
- dabt.reg, *r, gicd_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.reg, *r, gicd_reg);
return 0;
}
bad_width:
- printk("vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
- dabt.size, dabt.reg, *r, gicd_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, *r, gicd_reg);
domain_crash_synchronous();
return 0;
-write_ignore:
+write_ignore_32:
if ( dabt.size != DABT_WORD ) goto bad_width;
+write_ignore:
return 1;
}
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index ff99e50..d0f1ea1 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -168,13 +168,14 @@ static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
/* Reserved0 */
goto read_as_zero;
default:
- printk("vGICv3: vGICR: read r%d offset %#08x\n not found",
- dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICR: read r%d offset %#08x\n not found",
+ v, dabt.reg, gicr_reg);
return 0;
}
bad_width:
- printk("vGICv3: vGICR: bad read width %d r%d offset %#08x\n",
- dabt.size, dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR "%pv vGICR: bad read width %d r%d offset %#08x\n",
+ v, dabt.size, dabt.reg, gicr_reg);
domain_crash_synchronous();
return 0;
@@ -244,12 +245,14 @@ static int __vgic_v3_rdistr_rd_mmio_write(struct vcpu *v, mmio_info_t *info,
/* RO */
goto write_ignore;
default:
- printk("vGICR: write r%d offset %#08x\n not found", dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR "%pv: vGICR: write r%d offset %#08x\n not found",
+ v, dabt.reg, gicr_reg);
return 0;
}
bad_width:
- printk("vGICR: bad write width %d r%d=%"PRIregister" offset %#08x\n",
- dabt.size, dabt.reg, *r, gicr_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICR: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, *r, gicr_reg);
domain_crash_synchronous();
return 0;
@@ -345,15 +348,16 @@ static int __vgic_v3_distr_common_mmio_read(struct vcpu *v, mmio_info_t *info,
vgic_unlock_rank(v, rank, flags);
return 1;
default:
- printk("vGICv3: vGICD/vGICR: unhandled read r%d offset %#08x\n",
- dabt.reg, reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD/vGICR: unhandled read r%d offset %#08x\n",
+ v, dabt.reg, reg);
return 0;
}
bad_width:
- dprintk(XENLOG_ERR,
- "vGICv3: vGICD/vGICR: bad read width %d r%d offset %#08x\n",
- dabt.size, dabt.reg, reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD/vGICR: bad read width %d r%d offset %#08x\n",
+ v, dabt.size, dabt.reg, reg);
domain_crash_synchronous();
return 0;
@@ -458,15 +462,16 @@ static int __vgic_v3_distr_common_mmio_write(struct vcpu *v, mmio_info_t *info,
vgic_unlock_rank(v, rank, flags);
return 1;
default:
- printk("vGICv3: vGICD/vGICR: unhandled write r%d "
- "=%"PRIregister" offset %#08x\n", dabt.reg, *r, reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD/vGICR: unhandled write r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.reg, *r, reg);
return 0;
}
bad_width:
- dprintk(XENLOG_ERR,
- "vGICv3: vGICD/vGICR: bad write width %d r%d=%"PRIregister" "
- "offset %#08x\n", dabt.size, dabt.reg, *r, reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD/vGICR: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, *r, reg);
domain_crash_synchronous();
return 0;
@@ -521,13 +526,14 @@ static int vgic_v3_rdistr_sgi_mmio_read(struct vcpu *v, mmio_info_t *info,
if ( dabt.size != DABT_WORD ) goto bad_width;
return 1;
default:
- printk("vGICv3: vGICR: read r%d offset %#08x\n not found",
- dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICR: SGI: read r%d offset %#08x\n not found",
+ v, dabt.reg, gicr_reg);
return 0;
}
bad_width:
- printk("vGICv3: vGICR: bad read width %d r%d offset %#08x\n",
- dabt.size, dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR "%pv: vGICR: SGI: bad read width %d r%d offset %#08x\n",
+ v, dabt.size, dabt.reg, gicr_reg);
domain_crash_synchronous();
return 0;
@@ -585,14 +591,16 @@ static int vgic_v3_rdistr_sgi_mmio_write(struct vcpu *v, mmio_info_t *info,
/* We do not implement security extensions for guests, write ignore */
goto write_ignore;
default:
- printk("vGICv3: vGICR SGI: write r%d offset %#08x\n not found",
- dabt.reg, gicr_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICR: SGI: write r%d offset %#08x\n not found",
+ v, dabt.reg, gicr_reg);
return 0;
}
bad_width:
- printk("vGICR SGI: bad write width %d r%d=%"PRIregister" offset %#08x\n",
- dabt.size, dabt.reg, *r, gicr_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICR: SGI: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, *r, gicr_reg);
domain_crash_synchronous();
return 0;
@@ -616,9 +624,9 @@ static int vgic_v3_rdistr_mmio_read(struct vcpu *v, mmio_info_t *info)
else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) )
return vgic_v3_rdistr_sgi_mmio_read(v, info, (offset - SZ_64K));
else
- gdprintk(XENLOG_WARNING,
- "vGICv3: vGICR: unknown gpa read address %"PRIpaddr"\n",
- info->gpa);
+ printk(XENLOG_G_WARNING
+ "%pv: vGICR: unknown gpa read address %"PRIpaddr"\n",
+ v, info->gpa);
return 0;
}
@@ -638,9 +646,9 @@ static int vgic_v3_rdistr_mmio_write(struct vcpu *v, mmio_info_t *info)
else if ( (offset >= SZ_64K) && (offset < 2 * SZ_64K) )
return vgic_v3_rdistr_sgi_mmio_write(v, info, (offset - SZ_64K));
else
- gdprintk(XENLOG_WARNING,
- "vGICV3: vGICR: unknown gpa write address %"PRIpaddr"\n",
- info->gpa);
+ printk(XENLOG_G_WARNING
+ "%pv: vGICR: unknown gpa write address %"PRIpaddr"\n",
+ v, info->gpa);
return 0;
}
@@ -665,11 +673,27 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
vgic_unlock(v);
return 1;
case GICD_TYPER:
+ {
+ /*
+ * Number of interrupt identifier bits supported by the GIC
+ * Stream Protocol Interface
+ */
+ unsigned int irq_bits = get_count_order(vgic_num_irqs(v->domain));
+ /*
+ * Number of processors that may be used as interrupt targets when ARE
+ * bit is zero. The maximum is 8.
+ */
+ unsigned int ncpus = min_t(unsigned int, v->domain->max_vcpus, 8);
+
if ( dabt.size != DABT_WORD ) goto bad_width;
/* No secure world support for guests. */
- *r = (((v->domain->max_vcpus << 5) & GICD_TYPE_CPUS ) |
- ((v->domain->arch.vgic.nr_lines / 32) & GICD_TYPE_LINES));
+ *r = ((ncpus - 1) << GICD_TYPE_CPUS_SHIFT |
+ ((v->domain->arch.vgic.nr_spis / 32) & GICD_TYPE_LINES));
+
+ *r |= (irq_bits - 1) << GICD_TYPE_ID_BITS_SHIFT;
+
return 1;
+ }
case GICD_STATUSR:
/*
* Optional, Not implemented for now.
@@ -764,18 +788,19 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
case 0xf30 ... 0x5fcc:
case 0x8000 ... 0xbfcc:
/* These are reserved register addresses */
- printk("vGICv3: vGICD: read unknown 0x00c .. 0xfcc r%d offset %#08x\n",
- dabt.reg, gicd_reg);
+ printk(XENLOG_G_DEBUG
+ "%pv: vGICD: RAZ on reserved register offset %#08x\n",
+ v, gicd_reg);
goto read_as_zero;
default:
- printk("vGICv3: vGICD: unhandled read r%d offset %#08x\n",
- dabt.reg, gicd_reg);
+ printk(XENLOG_G_ERR "%pv: vGICD: unhandled read r%d offset %#08x\n",
+ v, dabt.reg, gicd_reg);
return 0;
}
bad_width:
- dprintk(XENLOG_ERR, "vGICv3: vGICD: bad read width %d r%d offset %#08x\n",
- dabt.size, dabt.reg, gicd_reg);
+ printk(XENLOG_G_ERR "%pv: vGICD: bad read width %d r%d offset %#08x\n",
+ v, dabt.size, dabt.reg, gicd_reg);
domain_crash_synchronous();
return 0;
@@ -832,8 +857,9 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case 0x020 ... 0x03c:
case 0xc000 ... 0xffcc:
/* Implementation defined -- write ignored */
- printk("vGICv3: vGICD: write unknown 0x020 - 0x03c r%d offset %#08x\n",
- dabt.reg, gicd_reg);
+ printk(XENLOG_G_DEBUG
+ "%pv: vGICD: WI on implementation defined register offset %#08x\n",
+ v, gicd_reg);
goto write_ignore;
case GICD_IGROUPR ... GICD_IGROUPRN:
case GICD_ISENABLER ... GICD_ISENABLERN:
@@ -877,8 +903,9 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
new_target = new_irouter & MPIDR_AFF0_MASK;
if ( new_target >= v->domain->max_vcpus )
{
- printk("vGICv3: vGICD: wrong irouter at offset %#08x\n val 0x%lx vcpu %x",
- gicd_reg, new_target, v->domain->max_vcpus);
+ printk(XENLOG_G_DEBUG
+ "%pv: vGICD: wrong irouter at offset %#08x\n val 0x%lx vcpu %x",
+ v, gicd_reg, new_target, v->domain->max_vcpus);
vgic_unlock_rank(v, rank, flags);
return 0;
}
@@ -918,19 +945,21 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
case 0xf30 ... 0x5fcc:
case 0x8000 ... 0xbfcc:
/* Reserved register addresses */
- printk("vGICv3: vGICD: write unknown 0x00c 0xfcc r%d offset %#08x\n",
- dabt.reg, gicd_reg);
+ printk(XENLOG_G_DEBUG
+ "%pv: vGICD: write unknown 0x00c 0xfcc r%d offset %#08x\n",
+ v, dabt.reg, gicd_reg);
goto write_ignore;
default:
- printk("vGICv3: vGICD: unhandled write r%d=%"PRIregister" "
- "offset %#08x\n", dabt.reg, *r, gicd_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: unhandled write r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.reg, *r, gicd_reg);
return 0;
}
bad_width:
- dprintk(XENLOG_ERR,
- "VGICv3: vGICD: bad write width %d r%d=%"PRIregister" "
- "offset %#08x\n", dabt.size, dabt.reg, *r, gicd_reg);
+ printk(XENLOG_G_ERR
+ "%pv: vGICD: bad write width %d r%d=%"PRIregister" offset %#08x\n",
+ v, dabt.size, dabt.reg, *r, gicd_reg);
domain_crash_synchronous();
return 0;
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index b8bd38b..41d3e48 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -66,13 +66,10 @@ int domain_vgic_init(struct domain *d)
d->arch.vgic.ctlr = 0;
- /* Currently nr_lines in vgic and gic doesn't have the same meanings
- * Here nr_lines = number of SPIs
- */
if ( is_hardware_domain(d) )
- d->arch.vgic.nr_lines = gic_number_lines() - 32;
+ d->arch.vgic.nr_spis = gic_number_lines() - 32;
else
- d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
+ d->arch.vgic.nr_spis = 0; /* We don't need SPIs for the guest */
switch ( gic_hw_version() )
{
@@ -98,11 +95,11 @@ int domain_vgic_init(struct domain *d)
return -ENOMEM;
d->arch.vgic.pending_irqs =
- xzalloc_array(struct pending_irq, d->arch.vgic.nr_lines);
+ xzalloc_array(struct pending_irq, d->arch.vgic.nr_spis);
if ( d->arch.vgic.pending_irqs == NULL )
return -ENOMEM;
- for (i=0; i<d->arch.vgic.nr_lines; i++)
+ for (i=0; i<d->arch.vgic.nr_spis; i++)
{
INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue);
@@ -220,7 +217,7 @@ void arch_move_irqs(struct vcpu *v)
struct vcpu *v_target;
int i;
- for ( i = 32; i < (d->arch.vgic.nr_lines + 32); i++ )
+ for ( i = 32; i < vgic_num_irqs(d); i++ )
{
v_target = vgic_get_target_vcpu(v, i);
p = irq_to_pending(v_target, i);
@@ -346,7 +343,7 @@ int vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode, int
struct pending_irq *irq_to_pending(struct vcpu *v, unsigned int irq)
{
struct pending_irq *n;
- /* Pending irqs allocation strategy: the first vgic.nr_lines irqs
+ /* Pending irqs allocation strategy: the first vgic.nr_spis irqs
* are used for SPIs; the rests are used for per cpu irqs */
if ( irq < 32 )
n = &v->arch.vgic.pending_irqs[irq];
diff --git a/xen/arch/x86/crash.c b/xen/arch/x86/crash.c
index c0b83df..eb7be9c 100644
--- a/xen/arch/x86/crash.c
+++ b/xen/arch/x86/crash.c
@@ -36,9 +36,11 @@ static unsigned int crashing_cpu;
static DEFINE_PER_CPU_READ_MOSTLY(bool_t, crash_save_done);
/* This becomes the NMI handler for non-crashing CPUs, when Xen is crashing. */
-void do_nmi_crash(struct cpu_user_regs *regs)
+static void noreturn do_nmi_crash(const struct cpu_user_regs *regs)
{
- int cpu = smp_processor_id();
+ unsigned int cpu = smp_processor_id();
+
+ stac();
/* nmi_shootdown_cpus() should ensure that this assertion is correct. */
ASSERT(cpu != crashing_cpu);
@@ -113,11 +115,10 @@ void do_nmi_crash(struct cpu_user_regs *regs)
halt();
}
-void nmi_crash(void);
static void nmi_shootdown_cpus(void)
{
unsigned long msecs;
- int i, cpu = smp_processor_id();
+ unsigned int cpu = smp_processor_id();
disable_lapic_nmi_watchdog();
local_irq_disable();
@@ -127,38 +128,26 @@ static void nmi_shootdown_cpus(void)
cpumask_andnot(&waiting_to_crash, &cpu_online_map, cpumask_of(cpu));
- /* Change NMI trap handlers. Non-crashing pcpus get nmi_crash which
- * invokes do_nmi_crash (above), which cause them to write state and
- * fall into a loop. The crashing pcpu gets the nop handler to
- * cause it to return to this function ASAP.
+ /*
+ * Disable IST for MCEs to avoid stack corruption race conditions, and
+ * change the NMI handler to a nop to avoid deviation from this codepath.
*/
- for ( i = 0; i < nr_cpu_ids; i++ )
- {
- if ( idt_tables[i] == NULL )
- continue;
-
- if ( i == cpu )
- {
- /*
- * Disable the interrupt stack tables for this cpu's MCE and NMI
- * handlers, and alter the NMI handler to have no operation.
- * Disabling the stack tables prevents stack corruption race
- * conditions, while changing the handler helps prevent cascading
- * faults; we are certainly going to crash by this point.
- *
- * This update is safe from a security point of view, as this pcpu
- * is never going to try to sysret back to a PV vcpu.
- */
- _set_gate_lower(&idt_tables[i][TRAP_nmi],
- SYS_DESC_irq_gate, 0, &trap_nop);
- set_ist(&idt_tables[i][TRAP_machine_check], IST_NONE);
- }
- else
- {
- /* Do not update stack table for other pcpus. */
- _update_gate_addr_lower(&idt_tables[i][TRAP_nmi], &nmi_crash);
- }
- }
+ _set_gate_lower(&idt_tables[cpu][TRAP_nmi],
+ SYS_DESC_irq_gate, 0, &trap_nop);
+ set_ist(&idt_tables[cpu][TRAP_machine_check], IST_NONE);
+
+ /*
+ * Ideally would be:
+ * exception_table[TRAP_nmi] = &do_nmi_crash;
+ *
+ * but the exception_table is read only. Borrow an unused fixmap entry
+ * to construct a writable mapping.
+ */
+ set_fixmap(FIX_TBOOT_MAP_ADDRESS, __pa(&exception_table[TRAP_nmi]));
+ write_atomic((unsigned long *)
+ (fix_to_virt(FIX_TBOOT_MAP_ADDRESS) +
+ ((unsigned long)&exception_table[TRAP_nmi] & ~PAGE_MASK)),
+ (unsigned long)&do_nmi_crash);
/* Ensure the new callback function is set before sending out the NMI. */
wmb();
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 82365a4..1585526 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -886,7 +886,11 @@ long arch_do_domctl(
case XEN_DOMCTL_gettscinfo:
{
- xen_guest_tsc_info_t info;
+ xen_guest_tsc_info_t info = { 0 };
+
+ ret = -EINVAL;
+ if ( d == current->domain ) /* no domain_pause() */
+ break;
domain_pause(d);
tsc_get_info(d, &info.tsc_mode,
@@ -903,6 +907,10 @@ long arch_do_domctl(
case XEN_DOMCTL_settscinfo:
{
+ ret = -EINVAL;
+ if ( d == current->domain ) /* no domain_pause() */
+ break;
+
domain_pause(d);
tsc_set_info(d, domctl->u.tsc_info.info.tsc_mode,
domctl->u.tsc_info.info.elapsed_nsec,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 72be5b9..55077f9 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -532,9 +532,16 @@ static void hvm_remove_ioreq_gmfn(
static int hvm_add_ioreq_gmfn(
struct domain *d, struct hvm_ioreq_page *iorp)
{
+ int rc;
+
clear_page(iorp->va);
- return guest_physmap_add_page(d, iorp->gmfn,
- page_to_mfn(iorp->page), 0);
+
+ rc = guest_physmap_add_page(d, iorp->gmfn,
+ page_to_mfn(iorp->page), 0);
+ if ( rc == 0 )
+ paging_mark_dirty(d, page_to_mfn(iorp->page));
+
+ return rc;
}
static int hvm_print_line(
@@ -874,6 +881,13 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
done:
spin_unlock(&s->lock);
+
+ /* This check is protected by the domain ioreq server lock. */
+ if ( d->arch.hvm_domain.ioreq_server.waiting )
+ {
+ d->arch.hvm_domain.ioreq_server.waiting = 0;
+ domain_unpause(d);
+ }
}
static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
@@ -1424,6 +1438,20 @@ int hvm_domain_initialise(struct domain *d)
spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock);
INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
+
+ /*
+ * In the case where a stub domain is providing emulation for
+ * the guest, there is no interlock in the toolstack to prevent
+ * the guest from running before the stub domain is ready.
+ * Hence the domain must remain paused until at least one ioreq
+ * server is created and enabled.
+ */
+ if ( !is_pvh_domain(d) )
+ {
+ domain_pause(d);
+ d->arch.hvm_domain.ioreq_server.waiting = 1;
+ }
+
spin_lock_init(&d->arch.hvm_domain.irq_lock);
spin_lock_init(&d->arch.hvm_domain.uc_lock);
diff --git a/xen/arch/x86/hvm/i8254.c b/xen/arch/x86/hvm/i8254.c
index 3ec01c0..36a0a53 100644
--- a/xen/arch/x86/hvm/i8254.c
+++ b/xen/arch/x86/hvm/i8254.c
@@ -486,6 +486,7 @@ static int handle_pit_io(
if ( bytes != 1 )
{
gdprintk(XENLOG_WARNING, "PIT bad access\n");
+ *val = ~0;
return X86EMUL_OKAY;
}
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 01ae31d..6ad2797 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -213,6 +213,7 @@ static int handle_pmt_io(
if ( bytes != 4 )
{
gdprintk(XENLOG_WARNING, "HVM_PMT bad access\n");
+ *val = ~0;
return X86EMUL_OKAY;
}
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index 3fab660..3448971 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -703,7 +703,8 @@ static int handle_rtc_io(
if ( bytes != 1 )
{
- gdprintk(XENLOG_WARNING, "HVM_RTC bas access\n");
+ gdprintk(XENLOG_WARNING, "HVM_RTC bad access\n");
+ *val = ~0;
return X86EMUL_OKAY;
}
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index f2554d6..8584f1f 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2606,7 +2606,8 @@ static void vmx_idtv_reinject(unsigned long idtv_info)
* Clear NMI-blocking interruptibility info if an NMI delivery faulted.
* Re-delivery will re-set it (see SDM 3B 25.7.1.2).
*/
- if ( (idtv_info & INTR_INFO_INTR_TYPE_MASK) == (X86_EVENTTYPE_NMI<<8) )
+ if ( cpu_has_vmx_vnmi && ((idtv_info & INTR_INFO_INTR_TYPE_MASK) ==
+ (X86_EVENTTYPE_NMI<<8)) )
{
unsigned long intr_info;
@@ -2686,7 +2687,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
&& ((intr_info & INTR_INFO_INTR_TYPE_MASK) ==
(X86_EVENTTYPE_NMI << 8)) )
{
- do_nmi(regs);
+ exception_table[TRAP_nmi](regs);
enable_nmis();
}
break;
@@ -2757,8 +2758,7 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
hvm_maybe_deassert_evtchn_irq();
__vmread(IDT_VECTORING_INFO, &idtv_info);
- if ( !nestedhvm_vcpu_in_guestmode(v) &&
- exit_reason != EXIT_REASON_TASK_SWITCH )
+ if ( exit_reason != EXIT_REASON_TASK_SWITCH )
vmx_idtv_reinject(idtv_info);
switch ( exit_reason )
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index 2c6e6e5..c2c8fb6 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -331,6 +331,7 @@ static int vpic_intercept_pic_io(
if ( bytes != 1 )
{
gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", bytes);
+ *val = ~0;
return X86EMUL_OKAY;
}
diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index 37f0d9f..654b8b5 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -24,6 +24,7 @@
#include <asm/regs.h>
#include <asm/types.h>
#include <asm/msr.h>
+#include <asm/nmi.h>
#include <asm/hvm/support.h>
#include <asm/hvm/vmx/vmx.h>
#include <asm/hvm/vmx/vmcs.h>
@@ -284,3 +285,15 @@ void vpmu_dump(struct vcpu *v)
vpmu->arch_vpmu_ops->arch_vpmu_dump(v);
}
+static int __init vpmu_init(void)
+{
+ /* NMI watchdog uses LVTPC and HW counter */
+ if ( opt_watchdog && opt_vpmu_enabled )
+ {
+ printk(XENLOG_WARNING "NMI watchdog is enabled. Turning VPMU off.\n");
+ opt_vpmu_enabled = 0;
+ }
+
+ return 0;
+}
+__initcall(vpmu_init);
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index f214072..84738e5 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1973,6 +1973,8 @@ int map_domain_pirq(
dprintk(XENLOG_G_ERR, "dom%d: irq %d in use\n",
d->domain_id, irq);
pci_disable_msi(msi_desc);
+ msi_desc->irq = -1;
+ msi_free_irq(msi_desc);
ret = -EBUSY;
goto done;
}
@@ -2027,22 +2029,29 @@ int map_domain_pirq(
if ( ret )
{
spin_unlock_irqrestore(&desc->lock, flags);
+ pci_disable_msi(msi_desc);
+ if ( nr )
+ {
+ ASSERT(msi_desc->irq >= 0);
+ desc = irq_to_desc(msi_desc->irq);
+ spin_lock_irqsave(&desc->lock, flags);
+ desc->handler = &no_irq_type;
+ desc->msi_desc = NULL;
+ spin_unlock_irqrestore(&desc->lock, flags);
+ }
while ( nr-- )
{
- if ( irq >= 0 )
- {
- if ( irq_deny_access(d, irq) )
- printk(XENLOG_G_ERR
- "dom%d: could not revoke access to IRQ%d (pirq %d)\n",
- d->domain_id, irq, pirq);
- destroy_irq(irq);
- }
+ if ( irq >= 0 && irq_deny_access(d, irq) )
+ printk(XENLOG_G_ERR
+ "dom%d: could not revoke access to IRQ%d (pirq %d)\n",
+ d->domain_id, irq, pirq);
if ( info )
cleanup_domain_irq_pirq(d, irq, info);
info = pirq_info(d, pirq + nr);
irq = info->arch.irq;
}
- pci_disable_msi(msi_desc);
+ msi_desc->irq = -1;
+ msi_free_irq(msi_desc);
goto done;
}
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 6e9c2c0..d4965da 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2677,7 +2677,11 @@ int vcpu_destroy_pagetables(struct vcpu *v)
v->arch.cr3 = 0;
- return rc;
+ /*
+ * put_page_and_type_preemptible() is liable to return -EINTR. The
+ * callers of us expect -ERESTART so convert it over.
+ */
+ return rc != -EINTR ? rc : -ERESTART;
}
int new_guest_cr3(unsigned long mfn)
diff --git a/xen/arch/x86/msi.c b/xen/arch/x86/msi.c
index 14d37ec..7410d03 100644
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -470,6 +470,7 @@ static struct msi_desc *alloc_msi_entry(unsigned int nr)
while ( nr-- )
{
entry[nr].dev = NULL;
+ entry[nr].irq = -1;
entry[nr].remap_index = -1;
}
@@ -487,11 +488,19 @@ int __setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc,
hw_irq_controller *handler)
{
struct msi_msg msg;
+ int ret;
desc->msi_desc = msidesc;
desc->handler = handler;
msi_compose_msg(desc->arch.vector, desc->arch.cpu_mask, &msg);
- return write_msi_msg(msidesc, &msg);
+ ret = write_msi_msg(msidesc, &msg);
+ if ( unlikely(ret) )
+ {
+ desc->handler = &no_irq_type;
+ desc->msi_desc = NULL;
+ }
+
+ return ret;
}
int msi_free_irq(struct msi_desc *entry)
@@ -501,7 +510,8 @@ int msi_free_irq(struct msi_desc *entry)
while ( nr-- )
{
- destroy_irq(entry[nr].irq);
+ if ( entry[nr].irq >= 0 )
+ destroy_irq(entry[nr].irq);
/* Free the unused IRTE if intr remap enabled */
if ( iommu_intremap )
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index c27c49c..fefa0b7 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -106,7 +106,7 @@ struct cpuinfo_x86 __read_mostly boot_cpu_data = { 0, 0, 0, 0, -1 };
unsigned long __read_mostly mmu_cr4_features = XEN_MINIMAL_CR4;
-bool_t __initdata acpi_disabled;
+bool_t __read_mostly acpi_disabled;
bool_t __initdata acpi_force;
static char __initdata acpi_param[10] = "";
static void __init parse_acpi_param(char *s)
diff --git a/xen/arch/x86/shutdown.c b/xen/arch/x86/shutdown.c
index 21f6cf5..9ec8f97 100644
--- a/xen/arch/x86/shutdown.c
+++ b/xen/arch/x86/shutdown.c
@@ -28,16 +28,18 @@
#include <asm/apic.h>
enum reboot_type {
+ BOOT_INVALID,
BOOT_TRIPLE = 't',
BOOT_KBD = 'k',
BOOT_ACPI = 'a',
BOOT_CF9 = 'p',
+ BOOT_EFI = 'e',
};
static int reboot_mode;
/*
- * reboot=t[riple] | k[bd] | a[cpi] | p[ci] | n[o] [, [w]arm | [c]old]
+ * reboot=t[riple] | k[bd] | a[cpi] | p[ci] | n[o] | [e]fi [, [w]arm | [c]old]
* warm Don't set the cold reboot flag
* cold Set the cold reboot flag
* no Suppress automatic reboot after panics or crashes
@@ -45,8 +47,9 @@ static int reboot_mode;
* kbd Use the keyboard controller. cold reset (default)
* acpi Use the RESET_REG in the FADT
* pci Use the so-called "PCI reset register", CF9
+ * efi Use the EFI reboot (if running under EFI)
*/
-static enum reboot_type reboot_type = BOOT_ACPI;
+static enum reboot_type reboot_type = BOOT_INVALID;
static void __init set_reboot_type(char *str)
{
for ( ; ; )
@@ -63,6 +66,7 @@ static void __init set_reboot_type(char *str)
reboot_mode = 0x0;
break;
case 'a':
+ case 'e':
case 'k':
case 't':
case 'p':
@@ -106,6 +110,14 @@ void machine_halt(void)
__machine_halt(NULL);
}
+static void default_reboot_type(void)
+{
+ if ( reboot_type == BOOT_INVALID )
+ reboot_type = efi_enabled ? BOOT_EFI
+ : acpi_disabled ? BOOT_KBD
+ : BOOT_ACPI;
+}
+
static int __init override_reboot(struct dmi_system_id *d)
{
enum reboot_type type = (long)d->driver_data;
@@ -452,6 +464,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
static int __init reboot_init(void)
{
+ /*
+ * Only do the DMI check if reboot_type hasn't been overridden
+ * on the command line
+ */
+ if ( reboot_type != BOOT_INVALID )
+ return 0;
+
+ default_reboot_type();
dmi_check_system(reboot_dmi_table);
return 0;
}
@@ -465,7 +485,7 @@ static void noreturn __machine_restart(void *pdelay)
void machine_restart(unsigned int delay_millisecs)
{
unsigned int i, attempt;
- enum reboot_type orig_reboot_type = reboot_type;
+ enum reboot_type orig_reboot_type;
const struct desc_ptr no_idt = { 0 };
watchdog_disable();
@@ -504,15 +524,20 @@ void machine_restart(unsigned int delay_millisecs)
tboot_shutdown(TB_SHUTDOWN_REBOOT);
}
- efi_reset_system(reboot_mode != 0);
+ /* Just in case reboot_init() didn't run yet. */
+ default_reboot_type();
+ orig_reboot_type = reboot_type;
/* Rebooting needs to touch the page at absolute address 0. */
- *((unsigned short *)__va(0x472)) = reboot_mode;
+ if ( reboot_type != BOOT_EFI )
+ *((unsigned short *)__va(0x472)) = reboot_mode;
for ( attempt = 0; ; attempt++ )
{
switch ( reboot_type )
{
+ case BOOT_INVALID:
+ ASSERT_UNREACHABLE();
case BOOT_KBD:
/* Pulse the keyboard reset line. */
for ( i = 0; i < 100; i++ )
@@ -532,6 +557,11 @@ void machine_restart(unsigned int delay_millisecs)
reboot_type = (((attempt == 1) && (orig_reboot_type == BOOT_ACPI))
? BOOT_ACPI : BOOT_TRIPLE);
break;
+ case BOOT_EFI:
+ reboot_type = acpi_disabled ? BOOT_KBD : BOOT_ACPI;
+ efi_reset_system(reboot_mode != 0);
+ *((unsigned short *)__va(0x472)) = reboot_mode;
+ break;
case BOOT_TRIPLE:
asm volatile ("lidt %0; int3" : : "m" (no_idt));
reboot_type = BOOT_KBD;
diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
index a8fb3a0..ca4839e 100644
--- a/xen/arch/x86/tboot.c
+++ b/xen/arch/x86/tboot.c
@@ -138,6 +138,7 @@ void __init tboot_probe(void)
TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_BASE);
tboot_copy_memory((unsigned char *)&sinit_size, sizeof(sinit_size),
TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_SIZE);
+ __set_fixmap(FIX_TBOOT_MAP_ADDRESS, 0, 0);
}
/* definitions from xen/drivers/passthrough/vtd/iommu.h
@@ -476,6 +477,8 @@ int __init tboot_parse_dmar_table(acpi_table_handler dmar_handler)
dmar_table_raw = xmalloc_array(unsigned char, dmar_table_length);
tboot_copy_memory(dmar_table_raw, dmar_table_length, pa);
dmar_table = (struct acpi_table_header *)dmar_table_raw;
+ __set_fixmap(FIX_TBOOT_MAP_ADDRESS, 0, 0);
+
rc = dmar_handler(dmar_table);
xfree(dmar_table_raw);
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 3cd8746..61316ba 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -772,7 +772,7 @@ void pv_cpuid(struct cpu_user_regs *regs)
switch ( cpuid_leaf )
{
- case 0xd:
+ case XSTATE_CPUID:
{
unsigned int _eax, _ebx, _ecx, _edx;
/* EBX value of main leaf 0 depends on enabled xsave features */
@@ -790,7 +790,7 @@ void pv_cpuid(struct cpu_user_regs *regs)
b = _eax + _ebx;
}
}
- break;
+ goto xstate;
}
}
goto out;
@@ -816,7 +816,7 @@ void pv_cpuid(struct cpu_user_regs *regs)
}
}
- switch ( (uint32_t)regs->eax )
+ switch ( regs->_eax )
{
case 0x00000001:
/* Modify Feature Information. */
@@ -851,7 +851,7 @@ void pv_cpuid(struct cpu_user_regs *regs)
break;
case 0x00000007:
- if ( regs->ecx == 0 )
+ if ( regs->_ecx == 0 )
b &= (cpufeat_mask(X86_FEATURE_BMI1) |
cpufeat_mask(X86_FEATURE_HLE) |
cpufeat_mask(X86_FEATURE_AVX2) |
@@ -866,9 +866,19 @@ void pv_cpuid(struct cpu_user_regs *regs)
a = c = d = 0;
break;
- case 0x0000000d: /* XSAVE */
+ case XSTATE_CPUID:
+ xstate:
if ( !cpu_has_xsave )
goto unsupported;
+ if ( regs->_ecx == 1 )
+ {
+ a &= XSTATE_FEATURE_XSAVEOPT |
+ XSTATE_FEATURE_XSAVEC |
+ (cpu_has_xgetbv1 ? XSTATE_FEATURE_XGETBV1 : 0) |
+ (cpu_has_xsaves ? XSTATE_FEATURE_XSAVES : 0);
+ if ( !cpu_has_xsaves )
+ b = c = d = 0;
+ }
break;
case 0x80000001:
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index b3d6e32..2d25d57 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -669,15 +669,6 @@ handle_ist_exception:
je restore_all_guest
jmp compat_restore_all_guest
-ENTRY(nmi_crash)
- pushq $0
- movl $TRAP_nmi,4(%rsp)
- /* Set AC to reduce chance of further SMAP faults */
- SAVE_ALL STAC
- movq %rsp,%rdi
- callq do_nmi_crash /* Does not return */
- ud2
-
ENTRY(machine_check)
pushq $0
movl $TRAP_machine_check,4(%rsp)
@@ -728,9 +719,10 @@ ENTRY(exception_table)
.quad do_alignment_check
.quad do_machine_check
.quad do_simd_coprocessor_error
- .rept TRAP_last_reserved + 1 - ((. - exception_table) / 8)
+ .rept TRAP_nr - ((. - exception_table) / 8)
.quad do_reserved_trap /* Architecturally reserved exceptions. */
.endr
+ .size exception_table, . - exception_table
ENTRY(hypercall_table)
.quad do_set_trap_table /* 0 */
@@ -857,7 +849,7 @@ autogen_stubs: /* Automatically generated stubs. */
entrypoint 1b
/* Reserved exceptions, heading towards do_reserved_trap(). */
- .elseif vec == TRAP_copro_seg || vec == TRAP_spurious_int || (vec > TRAP_simd_error && vec <= TRAP_last_reserved)
+ .elseif vec == TRAP_copro_seg || vec == TRAP_spurious_int || (vec > TRAP_simd_error && vec < TRAP_nr)
1: test $8,%spl /* 64bit exception frames are 16 byte aligned, but the word */
jz 2f /* size is 8 bytes. Check whether the processor gave us an */
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index fef97ea..656a06f 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -325,16 +325,20 @@ struct operand {
uint32_t orig_bigval[4];
};
- union {
- /* OP_REG: Pointer to register field. */
- unsigned long *reg;
- /* OP_MEM: Segment and offset. */
- struct {
- enum x86_segment seg;
- unsigned long off;
- } mem;
- };
+ /* OP_REG: Pointer to register field. */
+ unsigned long *reg;
+
+ /* OP_MEM: Segment and offset. */
+ struct {
+ enum x86_segment seg;
+ unsigned long off;
+ } mem;
};
+#ifdef __x86_64__
+#define REG_POISON ((unsigned long *) 0x8086000000008086UL) /* non-canonical */
+#else
+#define REG_POISON NULL /* 32-bit builds are for user-space, so NULL is OK. */
+#endif
typedef union {
uint64_t mmx;
@@ -1460,14 +1464,15 @@ x86_emulate(
unsigned int op_bytes, def_op_bytes, ad_bytes, def_ad_bytes;
bool_t lock_prefix = 0;
int override_seg = -1, rc = X86EMUL_OKAY;
- struct operand src, dst;
+ struct operand src = { .reg = REG_POISON };
+ struct operand dst = { .reg = REG_POISON };
enum x86_swint_type swint_type;
DECLARE_ALIGNED(mmval_t, mmval);
/*
* Data operand effective address (usually computed from ModRM).
* Default is a memory operand relative to segment DS.
*/
- struct operand ea = { .type = OP_MEM };
+ struct operand ea = { .type = OP_MEM, .reg = REG_POISON };
ea.mem.seg = x86_seg_ds; /* gcc may reject anon union initializer */
ctxt->retire.byte = 0;
@@ -1756,7 +1761,7 @@ x86_emulate(
}
}
- if ( override_seg != -1 )
+ if ( override_seg != -1 && ea.type == OP_MEM )
ea.mem.seg = override_seg;
/* Early operand adjustments. */
@@ -4400,7 +4405,9 @@ x86_emulate(
case 0xae: /* Grp15 */
switch ( modrm_reg & 7 )
{
- case 7: /* clflush */
+ case 7: /* clflush{,opt} */
+ fail_if(modrm_mod == 3);
+ fail_if(rep_prefix());
fail_if(ops->wbinvd == NULL);
if ( (rc = ops->wbinvd(ctxt)) != 0 )
goto done;
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 5f3b161..d5f5e3b 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -14,7 +14,10 @@
#include <asm/xstate.h>
#include <asm/asm_defns.h>
-bool_t __read_mostly cpu_has_xsaveopt;
+static bool_t __read_mostly cpu_has_xsaveopt;
+static bool_t __read_mostly cpu_has_xsavec;
+bool_t __read_mostly cpu_has_xgetbv1;
+bool_t __read_mostly cpu_has_xsaves;
/*
* Maximum size (in byte) of the XSAVE/XRSTOR save area required by all
@@ -320,12 +323,22 @@ void xstate_init(bool_t bsp)
BUG_ON(xsave_cntxt_size != _xstate_ctxt_size(feature_mask));
}
- /* Check XSAVEOPT feature. */
+ /* Check extended XSAVE features. */
cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
if ( bsp )
+ {
cpu_has_xsaveopt = !!(eax & XSTATE_FEATURE_XSAVEOPT);
+ cpu_has_xsavec = !!(eax & XSTATE_FEATURE_XSAVEC);
+ /* XXX cpu_has_xgetbv1 = !!(eax & XSTATE_FEATURE_XGETBV1); */
+ /* XXX cpu_has_xsaves = !!(eax & XSTATE_FEATURE_XSAVES); */
+ }
else
+ {
BUG_ON(!cpu_has_xsaveopt != !(eax & XSTATE_FEATURE_XSAVEOPT));
+ BUG_ON(!cpu_has_xsavec != !(eax & XSTATE_FEATURE_XSAVEC));
+ /* XXX BUG_ON(!cpu_has_xgetbv1 != !(eax & XSTATE_FEATURE_XGETBV1)); */
+ /* XXX BUG_ON(!cpu_has_xsaves != !(eax & XSTATE_FEATURE_XSAVES)); */
+ }
}
static bool_t valid_xcr0(u64 xcr0)
diff --git a/xen/common/bunzip2.c b/xen/common/bunzip2.c
index 2eb70ab..6d6e8b1 100644
--- a/xen/common/bunzip2.c
+++ b/xen/common/bunzip2.c
@@ -174,7 +174,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
if (get_bits(bd, 1))
return RETVAL_OBSOLETE_INPUT;
origPtr = get_bits(bd, 24);
- if (origPtr > dbufSize)
+ if (origPtr >= dbufSize)
return RETVAL_DATA_ERROR;
/* mapping table: if some byte values are never used (encoding things
like ascii text), the compression code removes the gaps to have fewer
diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index 06c90be..b258138 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -15,6 +15,7 @@ CHECK_TYPE(domid);
#undef xen_domid_t
CHECK_mem_access_op;
+CHECK_vmemrange;
int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
{
@@ -32,12 +33,14 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
struct xen_add_to_physmap *atp;
struct xen_add_to_physmap_batch *atpb;
struct xen_remove_from_physmap *xrfp;
+ struct xen_vnuma_topology_info *vnuma;
} nat;
union {
struct compat_memory_reservation rsrv;
struct compat_memory_exchange xchg;
struct compat_add_to_physmap atp;
struct compat_add_to_physmap_batch atpb;
+ struct compat_vnuma_topology_info vnuma;
} cmp;
set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE);
@@ -273,13 +276,50 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
break;
}
+ case XENMEM_get_vnumainfo:
+ {
+ enum XLAT_vnuma_topology_info_vdistance vdistance =
+ XLAT_vnuma_topology_info_vdistance_h;
+ enum XLAT_vnuma_topology_info_vcpu_to_vnode vcpu_to_vnode =
+ XLAT_vnuma_topology_info_vcpu_to_vnode_h;
+ enum XLAT_vnuma_topology_info_vmemrange vmemrange =
+ XLAT_vnuma_topology_info_vmemrange_h;
+
+ if ( copy_from_guest(&cmp.vnuma, compat, 1) )
+ return -EFAULT;
+
+#define XLAT_vnuma_topology_info_HNDL_vdistance_h(_d_, _s_) \
+ guest_from_compat_handle((_d_)->vdistance.h, (_s_)->vdistance.h)
+#define XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h(_d_, _s_) \
+ guest_from_compat_handle((_d_)->vcpu_to_vnode.h, (_s_)->vcpu_to_vnode.h)
+#define XLAT_vnuma_topology_info_HNDL_vmemrange_h(_d_, _s_) \
+ guest_from_compat_handle((_d_)->vmemrange.h, (_s_)->vmemrange.h)
+
+ XLAT_vnuma_topology_info(nat.vnuma, &cmp.vnuma);
+
+#undef XLAT_vnuma_topology_info_HNDL_vdistance_h
+#undef XLAT_vnuma_topology_info_HNDL_vcpu_to_vnode_h
+#undef XLAT_vnuma_topology_info_HNDL_vmemrange_h
+ break;
+ }
+
default:
return compat_arch_memory_op(cmd, compat);
}
rc = do_memory_op(cmd, nat.hnd);
if ( rc < 0 )
+ {
+ if ( rc == -ENOBUFS && op == XENMEM_get_vnumainfo )
+ {
+ cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes;
+ cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus;
+ cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges;
+ if ( __copy_to_guest(compat, &cmp.vnuma, 1) )
+ rc = -EFAULT;
+ }
break;
+ }
cmd = 0;
if ( hypercall_xlat_continuation(&cmd, 2, 0x02, nat.hnd, compat) )
@@ -398,6 +438,14 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
case XENMEM_remove_from_physmap:
break;
+ case XENMEM_get_vnumainfo:
+ cmp.vnuma.nr_vnodes = nat.vnuma->nr_vnodes;
+ cmp.vnuma.nr_vcpus = nat.vnuma->nr_vcpus;
+ cmp.vnuma.nr_vmemranges = nat.vnuma->nr_vmemranges;
+ if ( __copy_to_guest(compat, &cmp.vnuma, 1) )
+ rc = -EFAULT;
+ break;
+
default:
domain_crash(current->domain);
split = 0;
diff --git a/xen/common/cpupool.c b/xen/common/cpupool.c
index a758a8b..cd6aab9 100644
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -379,12 +379,6 @@ static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
atomic_inc(&c->refcnt);
cpupool_cpu_moving = c;
cpumask_clear_cpu(cpu, c->cpu_valid);
-
- rcu_read_lock(&domlist_read_lock);
- for_each_domain_in_cpupool(d, c)
- domain_update_node_affinity(d);
- rcu_read_unlock(&domlist_read_lock);
-
spin_unlock(&cpupool_lock);
work_cpu = smp_processor_id();
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 8bca6da..3641296 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -522,8 +522,10 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
case XEN_DOMCTL_resumedomain:
{
- domain_resume(d);
- ret = 0;
+ if ( d == current->domain ) /* no domain_pause() */
+ ret = -EINVAL;
+ else
+ domain_resume(d);
}
break;
@@ -1036,6 +1038,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
(gfn + nr_mfns - 1) < gfn ) /* wrap? */
break;
+ ret = -E2BIG;
+ /* Must break hypercall up as this could take a while. */
+ if ( nr_mfns > 64 )
+ break;
+
ret = -EPERM;
if ( !iomem_access_permitted(current->domain, mfn, mfn_end) ||
!iomem_access_permitted(d, mfn, mfn_end) )
diff --git a/xen/common/efi/runtime.c b/xen/common/efi/runtime.c
index c840e08..f5df51e 100644
--- a/xen/common/efi/runtime.c
+++ b/xen/common/efi/runtime.c
@@ -515,9 +515,13 @@ int efi_runtime_call(struct xenpf_efi_runtime_call *op)
cast_guid(&op->u.get_next_variable_name.vendor_guid));
efi_rs_leave(cr3);
+ /*
+ * Copy the variable name if necessary. The caller provided size
+ * is used because some firmwares update size when they shouldn't.
+ * */
if ( !EFI_ERROR(status) &&
- copy_to_guest(op->u.get_next_variable_name.name,
- name.raw, size) )
+ __copy_to_guest(op->u.get_next_variable_name.name,
+ name.raw, op->u.get_next_variable_name.size) )
rc = -EFAULT;
op->u.get_next_variable_name.size = size;
}
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 7d6de54..eece46b 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -1155,21 +1155,25 @@ int alloc_unbound_xen_event_channel(
spin_lock(&d->event_lock);
- if ( (port = get_free_port(d)) < 0 )
+ rc = get_free_port(d);
+ if ( rc < 0 )
goto out;
+ port = rc;
chn = evtchn_from_port(d, port);
rc = xsm_evtchn_unbound(XSM_TARGET, d, chn, remote_domid);
+ if ( rc )
+ goto out;
chn->state = ECS_UNBOUND;
chn->xen_consumer = get_xen_consumer(notification_fn);
chn->notify_vcpu_id = local_vcpu->vcpu_id;
- chn->u.unbound.remote_domid = !rc ? remote_domid : DOMID_INVALID;
+ chn->u.unbound.remote_domid = remote_domid;
out:
spin_unlock(&d->event_lock);
- return port;
+ return rc < 0 ? rc : port;
}
diff --git a/xen/common/kernel.c b/xen/common/kernel.c
index d23c422..bafd44f 100644
--- a/xen/common/kernel.c
+++ b/xen/common/kernel.c
@@ -240,6 +240,8 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
case XENVER_extraversion:
{
xen_extraversion_t extraversion;
+
+ memset(extraversion, 0, sizeof(extraversion));
safe_strcpy(extraversion, xen_extra_version());
if ( copy_to_guest(arg, extraversion, ARRAY_SIZE(extraversion)) )
return -EFAULT;
@@ -249,6 +251,8 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
case XENVER_compile_info:
{
struct xen_compile_info info;
+
+ memset(&info, 0, sizeof(info));
safe_strcpy(info.compiler, xen_compiler());
safe_strcpy(info.compile_by, xen_compile_by());
safe_strcpy(info.compile_domain, xen_compile_domain());
@@ -284,6 +288,8 @@ DO(xen_version)(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
case XENVER_changeset:
{
xen_changeset_info_t chgset;
+
+ memset(chgset, 0, sizeof(chgset));
safe_strcpy(chgset, xen_changeset());
if ( copy_to_guest(arg, chgset, ARRAY_SIZE(chgset)) )
return -EFAULT;
diff --git a/xen/common/lz4/decompress.c b/xen/common/lz4/decompress.c
index 5cf8f37..94ad591 100644
--- a/xen/common/lz4/decompress.c
+++ b/xen/common/lz4/decompress.c
@@ -132,6 +132,9 @@ static int INIT lz4_uncompress(const unsigned char *source, unsigned char *dest,
/* Error: request to write beyond destination buffer */
if (cpy > oend)
goto _output_error;
+ if ((ref + COPYLENGTH) > oend ||
+ (op + COPYLENGTH) > oend)
+ goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 234dae6..e84ace9 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -747,11 +747,10 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
return start_extent;
args.domain = d;
- rc = xsm_memory_adjust_reservation(XSM_TARGET, current->domain, d);
- if ( rc )
+ if ( xsm_memory_adjust_reservation(XSM_TARGET, current->domain, d) )
{
rcu_unlock_domain(d);
- return rc;
+ return start_extent;
}
switch ( op )
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7b4092d..24a759c 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -617,7 +617,8 @@ static struct page_info *alloc_heap_pages(
*/
if ( (outstanding_claims + request >
total_avail_pages + tmem_freeable_pages()) &&
- (d == NULL || d->outstanding_pages < request) )
+ ((memflags & MEMF_no_refcount) ||
+ !d || d->outstanding_pages < request) )
goto not_found;
/*
diff --git a/xen/common/softirq.c b/xen/common/softirq.c
index 22e417a..33d5d86 100644
--- a/xen/common/softirq.c
+++ b/xen/common/softirq.c
@@ -106,7 +106,7 @@ void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
if ( !per_cpu(batching, this_cpu) || in_irq() )
smp_send_event_check_cpu(cpu);
else
- set_bit(nr, &per_cpu(batch_mask, this_cpu));
+ cpumask_set_cpu(cpu, &per_cpu(batch_mask, this_cpu));
}
void cpu_raise_softirq_batch_begin(void)
diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
index 0cb6ee1..70202e8 100644
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -76,7 +76,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
case XEN_SYSCTL_getdomaininfolist:
{
struct domain *d;
- struct xen_domctl_getdomaininfo info;
+ struct xen_domctl_getdomaininfo info = { 0 };
u32 num_domains = 0;
rcu_read_lock(&domlist_read_lock);
diff --git a/xen/drivers/char/dt-uart.c b/xen/drivers/char/dt-uart.c
index fa92b5c..1197230 100644
--- a/xen/drivers/char/dt-uart.c
+++ b/xen/drivers/char/dt-uart.c
@@ -25,13 +25,13 @@
/*
* Configure UART port with a string:
- * path,options
+ * path:options
*
* @path: full path used in the device tree for the UART. If the path
* doesn't start with '/', we assuming that it's an alias.
* @options: UART speficic options (see in each UART driver)
*/
-static char __initdata opt_dtuart[30] = "";
+static char __initdata opt_dtuart[256] = "";
string_param("dtuart", opt_dtuart);
void __init dt_uart_init(void)
@@ -47,7 +47,7 @@ void __init dt_uart_init(void)
return;
}
- options = strchr(opt_dtuart, ',');
+ options = strchr(opt_dtuart, ':');
if ( options != NULL )
*(options++) = '\0';
else
diff --git a/xen/drivers/char/pl011.c b/xen/drivers/char/pl011.c
index dd19ce8..57274d9 100644
--- a/xen/drivers/char/pl011.c
+++ b/xen/drivers/char/pl011.c
@@ -197,6 +197,20 @@ static const struct vuart_info *pl011_vuart(struct serial_port *port)
return &uart->vuart;
}
+static void pl011_tx_stop(struct serial_port *port)
+{
+ struct pl011 *uart = port->uart;
+
+ pl011_write(uart, IMSC, pl011_read(uart, IMSC) & ~(TXI));
+}
+
+static void pl011_tx_start(struct serial_port *port)
+{
+ struct pl011 *uart = port->uart;
+
+ pl011_write(uart, IMSC, pl011_read(uart, IMSC) | (TXI));
+}
+
static struct uart_driver __read_mostly pl011_driver = {
.init_preirq = pl011_init_preirq,
.init_postirq = pl011_init_postirq,
@@ -207,6 +221,8 @@ static struct uart_driver __read_mostly pl011_driver = {
.putc = pl011_putc,
.getc = pl011_getc,
.irq = pl011_irq,
+ .start_tx = pl011_tx_start,
+ .stop_tx = pl011_tx_stop,
.vuart_info = pl011_vuart,
};
diff --git a/xen/drivers/char/serial.c b/xen/drivers/char/serial.c
index 44026b1..c583a48 100644
--- a/xen/drivers/char/serial.c
+++ b/xen/drivers/char/serial.c
@@ -31,6 +31,18 @@ static struct serial_port com[SERHND_IDX + 1] = {
static bool_t __read_mostly post_irq;
+static inline void serial_start_tx(struct serial_port *port)
+{
+ if ( port->driver->start_tx != NULL )
+ port->driver->start_tx(port);
+}
+
+static inline void serial_stop_tx(struct serial_port *port)
+{
+ if ( port->driver->stop_tx != NULL )
+ port->driver->stop_tx(port);
+}
+
void serial_rx_interrupt(struct serial_port *port, struct cpu_user_regs *regs)
{
char c;
@@ -76,6 +88,18 @@ void serial_tx_interrupt(struct serial_port *port, struct cpu_user_regs *regs)
cpu_relax();
}
+ if ( port->txbufc == port->txbufp )
+ {
+ /* Disable TX. nothing to send */
+ serial_stop_tx(port);
+ spin_unlock(&port->tx_lock);
+ goto out;
+ }
+ else
+ {
+ if ( port->driver->tx_ready(port) )
+ serial_start_tx(port);
+ }
for ( i = 0, n = port->driver->tx_ready(port); i < n; i++ )
{
if ( port->txbufc == port->txbufp )
@@ -117,6 +141,8 @@ static void __serial_putc(struct serial_port *port, char c)
cpu_relax();
if ( n > 0 )
{
+ /* Enable TX before sending chars */
+ serial_start_tx(port);
while ( n-- )
port->driver->putc(
port,
@@ -135,6 +161,8 @@ static void __serial_putc(struct serial_port *port, char c)
if ( ((port->txbufp - port->txbufc) == 0) &&
port->driver->tx_ready(port) > 0 )
{
+ /* Enable TX before sending chars */
+ serial_start_tx(port);
/* Buffer and UART FIFO are both empty, and port is available. */
port->driver->putc(port, c);
}
@@ -152,11 +180,16 @@ static void __serial_putc(struct serial_port *port, char c)
while ( !(n = port->driver->tx_ready(port)) )
cpu_relax();
if ( n > 0 )
+ {
+ /* Enable TX before sending chars */
+ serial_start_tx(port);
port->driver->putc(port, c);
+ }
}
else
{
/* Simple synchronous transmitter. */
+ serial_start_tx(port);
port->driver->putc(port, c);
}
}
@@ -404,6 +437,7 @@ void serial_start_sync(int handle)
/* port is unavailable and might not come up until reenabled by
dom0, we can't really do proper sync */
break;
+ serial_start_tx(port);
port->driver->putc(
port, port->txbuf[mask_serial_txbuf_idx(port->txbufc++)]);
}
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 2e113d7..5a946d4 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -799,7 +799,8 @@ static const char *intr_remap_fault_reasons[] =
"Blocked an interrupt request due to source-id verification failure",
};
-static const char *iommu_get_fault_reason(u8 fault_reason, int *fault_type)
+static const char *iommu_get_fault_reason(u8 fault_reason,
+ enum faulttype *fault_type)
{
if ( fault_reason >= 0x20 && ( fault_reason < 0x20 +
ARRAY_SIZE(intr_remap_fault_reasons)) )
@@ -822,35 +823,48 @@ static const char *iommu_get_fault_reason(u8 fault_reason, int *fault_type)
static int iommu_page_fault_do_one(struct iommu *iommu, int type,
u8 fault_reason, u16 source_id, u64 addr)
{
- const char *reason;
- int fault_type;
+ const char *reason, *kind;
+ enum faulttype fault_type;
u16 seg = iommu->intel->drhd->segment;
- reason = iommu_get_fault_reason(fault_reason, &fault_type);
- if ( fault_type == DMA_REMAP )
- {
- INTEL_IOMMU_DEBUG(
- "DMAR:[%s] Request device [%04x:%02x:%02x.%u] "
- "fault addr %"PRIx64", iommu reg = %p\n"
- "DMAR:[fault reason %02xh] %s\n",
- (type ? "DMA Read" : "DMA Write"),
- seg, (source_id >> 8), PCI_SLOT(source_id & 0xFF),
- PCI_FUNC(source_id & 0xFF), addr, iommu->reg,
- fault_reason, reason);
- if (iommu_debug)
- print_vtd_entries(iommu, (source_id >> 8),
- (source_id & 0xff), (addr >> PAGE_SHIFT));
+ reason = iommu_get_fault_reason(fault_reason, &fault_type);
+ switch ( fault_type )
+ {
+ case DMA_REMAP:
+ printk(XENLOG_G_WARNING VTDPREFIX
+ "DMAR:[%s] Request device [%04x:%02x:%02x.%u] "
+ "fault addr %"PRIx64", iommu reg = %p\n",
+ (type ? "DMA Read" : "DMA Write"),
+ seg, PCI_BUS(source_id), PCI_SLOT(source_id),
+ PCI_FUNC(source_id), addr, iommu->reg);
+ kind = "DMAR";
+ break;
+ case INTR_REMAP:
+ printk(XENLOG_G_WARNING VTDPREFIX
+ "INTR-REMAP: Request device [%04x:%02x:%02x.%u] "
+ "fault index %"PRIx64", iommu reg = %p\n",
+ seg, PCI_BUS(source_id), PCI_SLOT(source_id),
+ PCI_FUNC(source_id), addr >> 48, iommu->reg);
+ kind = "INTR-REMAP";
+ break;
+ default:
+ printk(XENLOG_G_WARNING VTDPREFIX
+ "UNKNOWN: Request device [%04x:%02x:%02x.%u] "
+ "fault addr %"PRIx64", iommu reg = %p\n",
+ seg, PCI_BUS(source_id), PCI_SLOT(source_id),
+ PCI_FUNC(source_id), addr, iommu->reg);
+ kind = "UNKNOWN";
+ break;
}
- else
- INTEL_IOMMU_DEBUG(
- "INTR-REMAP: Request device [%04x:%02x:%02x.%u] "
- "fault index %"PRIx64", iommu reg = %p\n"
- "INTR-REMAP:[fault reason %02xh] %s\n",
- seg, (source_id >> 8), PCI_SLOT(source_id & 0xFF),
- PCI_FUNC(source_id & 0xFF), addr >> 48, iommu->reg,
- fault_reason, reason);
- return 0;
+ printk(XENLOG_G_WARNING VTDPREFIX "%s: reason %02x - %s\n",
+ kind, fault_reason, reason);
+
+ if ( iommu_verbose && fault_type == DMA_REMAP )
+ print_vtd_entries(iommu, PCI_BUS(source_id), PCI_DEVFN2(source_id),
+ addr >> PAGE_SHIFT);
+
+ return 0;
}
static void iommu_fault_status(u32 fault_status)
diff --git a/xen/drivers/passthrough/vtd/iommu.h b/xen/drivers/passthrough/vtd/iommu.h
index c3e5181..d6e6520 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -268,18 +268,22 @@ struct dma_pte {
};
#define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2)
+#define DMA_PTE_PROT (DMA_PTE_READ | DMA_PTE_WRITE)
+#define DMA_PTE_SP (1 << 7)
#define DMA_PTE_SNP (1 << 11)
#define dma_clear_pte(p) do {(p).val = 0;} while(0)
#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while(0)
#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while(0)
-#define dma_set_pte_superpage(p) do {(p).val |= (1 << 7);} while(0)
+#define dma_set_pte_superpage(p) do {(p).val |= DMA_PTE_SP;} while(0)
#define dma_set_pte_snp(p) do {(p).val |= DMA_PTE_SNP;} while(0)
-#define dma_set_pte_prot(p, prot) \
- do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
+#define dma_set_pte_prot(p, prot) do { \
+ (p).val = ((p).val & ~DMA_PTE_PROT) | ((prot) & DMA_PTE_PROT); \
+ } while (0)
#define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
#define dma_set_pte_addr(p, addr) do {\
(p).val |= ((addr) & PAGE_MASK_4K); } while (0)
-#define dma_pte_present(p) (((p).val & 3) != 0)
+#define dma_pte_present(p) (((p).val & DMA_PTE_PROT) != 0)
+#define dma_pte_superpage(p) (((p).val & DMA_PTE_SP) != 0)
/* interrupt remap entry */
struct iremap_entry {
diff --git a/xen/drivers/passthrough/vtd/utils.c b/xen/drivers/passthrough/vtd/utils.c
index db4c326..bd14c02 100644
--- a/xen/drivers/passthrough/vtd/utils.c
+++ b/xen/drivers/passthrough/vtd/utils.c
@@ -179,6 +179,8 @@ void print_vtd_entries(struct iommu *iommu, int bus, int devfn, u64 gmfn)
printk(" l%d[%x] not present\n", level, l_index);
break;
}
+ if ( dma_pte_superpage(pte) )
+ break;
val = dma_pte_addr(pte);
} while ( --level );
}
diff --git a/xen/include/asm-arm/arm64/page.h b/xen/include/asm-arm/arm64/page.h
index 1fd416d..e7a761d 100644
--- a/xen/include/asm-arm/arm64/page.h
+++ b/xen/include/asm-arm/arm64/page.h
@@ -89,9 +89,9 @@ static inline uint64_t gva_to_ma_par(vaddr_t va, unsigned int flags)
uint64_t par, tmp = READ_SYSREG64(PAR_EL1);
if ( (flags & GV2M_WRITE) == GV2M_WRITE )
- asm volatile ("at s12e1r, %0;" : : "r" (va));
- else
asm volatile ("at s12e1w, %0;" : : "r" (va));
+ else
+ asm volatile ("at s12e1r, %0;" : : "r" (va));
isb();
par = READ_SYSREG64(PAR_EL1);
WRITE_SYSREG64(tmp, PAR_EL1);
diff --git a/xen/include/asm-arm/asm_defns.h b/xen/include/asm-arm/asm_defns.h
index 36e72ff..02be83e 100644
--- a/xen/include/asm-arm/asm_defns.h
+++ b/xen/include/asm-arm/asm_defns.h
@@ -7,6 +7,15 @@
#endif
#include <asm/processor.h>
+/* For generic assembly code: use macros to define operand sizes. */
+#if defined(CONFIG_ARM_32)
+# define __OP32
+#elif defined(CONFIG_ARM_64)
+# define __OP32 "w"
+#else
+# error "unknown ARM variant"
+#endif
+
#endif /* __ARM_ASM_DEFNS_H__ */
/*
* Local variables:
diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h
index 25f96c8..d69a7c3 100644
--- a/xen/include/asm-arm/bitops.h
+++ b/xen/include/asm-arm/bitops.h
@@ -9,6 +9,8 @@
#ifndef _ARM_BITOPS_H
#define _ARM_BITOPS_H
+#include <asm/asm_defns.h>
+
/*
* Non-atomic bit manipulation.
*
@@ -140,9 +142,8 @@ static inline int fls(int x)
if (__builtin_constant_p(x))
return constant_fls(x);
- asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
- ret = BITS_PER_LONG - ret;
- return ret;
+ asm("clz\t%"__OP32"0, %"__OP32"1" : "=r" (ret) : "r" (x));
+ return 32 - ret;
}
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 787e93c..8b7dd85 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -89,7 +89,7 @@ struct arch_domain
*/
spinlock_t lock;
int ctlr;
- int nr_lines; /* Number of SPIs */
+ int nr_spis; /* Number of SPIs */
struct vgic_irq_rank *shared_irqs;
/*
* SPIs are domain global, SGIs and PPIs are per-VCPU and stored in
diff --git a/xen/include/asm-arm/gic.h b/xen/include/asm-arm/gic.h
index 187dc46..0396a8e 100644
--- a/xen/include/asm-arm/gic.h
+++ b/xen/include/asm-arm/gic.h
@@ -93,6 +93,7 @@
#define GICD_CTL_ENABLE 0x1
#define GICD_TYPE_LINES 0x01f
+#define GICD_TYPE_CPUS_SHIFT 5
#define GICD_TYPE_CPUS 0x0e0
#define GICD_TYPE_SEC 0x400
diff --git a/xen/include/asm-arm/gic_v3_defs.h b/xen/include/asm-arm/gic_v3_defs.h
index 13adb53..b8a1c2e 100644
--- a/xen/include/asm-arm/gic_v3_defs.h
+++ b/xen/include/asm-arm/gic_v3_defs.h
@@ -45,6 +45,9 @@
#define GICC_SRE_EL2_DIB (1UL << 2)
#define GICC_SRE_EL2_ENEL1 (1UL << 3)
+/* Additional bits in GICD_TYPER defined by GICv3 */
+#define GICD_TYPE_ID_BITS_SHIFT 19
+
#define GICD_CTLR_RWP (1UL << 31)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index b7ef817..fcd26fb 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -3,6 +3,9 @@
#include <asm/cpregs.h>
#include <asm/sysregs.h>
+#ifndef __ASSEMBLY__
+#include <xen/types.h>
+#endif
#include <public/arch-arm.h>
/* MIDR Main ID Register */
@@ -220,8 +223,6 @@
#ifndef __ASSEMBLY__
-#include <xen/types.h>
-
struct cpuinfo_arm {
union {
uint32_t bits;
diff --git a/xen/include/asm-arm/regs.h b/xen/include/asm-arm/regs.h
index 0951857..56d53d6 100644
--- a/xen/include/asm-arm/regs.h
+++ b/xen/include/asm-arm/regs.h
@@ -24,9 +24,17 @@
#ifdef CONFIG_ARM_32
#define hyp_mode(r) psr_mode((r)->cpsr,PSR_MODE_HYP)
+#define psr_mode_is_user(r) usr_mode(r)
#else
#define hyp_mode(r) (psr_mode((r)->cpsr,PSR_MODE_EL2h) || \
psr_mode((r)->cpsr,PSR_MODE_EL2t))
+
+/*
+ * Trap may have been taken from EL0, which might be in AArch32 usr
+ * mode, or in AArch64 mode (PSR_MODE_EL0t).
+ */
+#define psr_mode_is_user(r) \
+ (psr_mode((r)->cpsr,PSR_MODE_EL0t) || usr_mode(r))
#endif
#define guest_mode(r) \
diff --git a/xen/include/asm-arm/vgic.h b/xen/include/asm-arm/vgic.h
index 5160f17..74d5a4e 100644
--- a/xen/include/asm-arm/vgic.h
+++ b/xen/include/asm-arm/vgic.h
@@ -113,7 +113,7 @@ struct vgic_ops {
};
/* Number of ranks of interrupt registers for a domain */
-#define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_lines+31)/32)
+#define DOMAIN_NR_RANKS(d) (((d)->arch.vgic.nr_spis+31)/32)
#define vgic_lock(v) spin_lock_irq(&(v)->domain->arch.vgic.lock)
#define vgic_unlock(v) spin_unlock_irq(&(v)->domain->arch.vgic.lock)
@@ -175,6 +175,8 @@ enum gic_sgi_mode;
*/
#define REG_RANK_INDEX(b, n, s) ((((n) >> s) & ((b)-1)) % 32)
+#define vgic_num_irqs(d) ((d)->arch.vgic.nr_spis + 32)
+
extern int domain_vgic_init(struct domain *d);
extern void domain_vgic_free(struct domain *d);
extern int vcpu_vgic_init(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 2757c7f..0702bf5 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -83,6 +83,7 @@ struct hvm_domain {
struct {
spinlock_t lock;
ioservid_t id;
+ bool_t waiting;
struct list_head list;
} ioreq_server;
struct hvm_ioreq_server *default_ioreq_server;
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index f98eaf5..b4e4731 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -114,7 +114,7 @@
#define TRAP_machine_check 18
#define TRAP_simd_error 19
#define TRAP_virtualisation 20
-#define TRAP_last_reserved 31
+#define TRAP_nr 32
/* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
/* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */
@@ -492,6 +492,9 @@ extern void mtrr_bp_init(void);
void mcheck_init(struct cpuinfo_x86 *c, bool_t bsp);
+/* Dispatch table for exceptions */
+extern void (* const exception_table[TRAP_nr])(struct cpu_user_regs *regs);
+
#define DECLARE_TRAP_HANDLER(_name) \
void _name(void); \
void do_ ## _name(struct cpu_user_regs *regs)
@@ -524,7 +527,6 @@ DECLARE_TRAP_HANDLER(alignment_check);
void trap_nop(void);
void enable_nmis(void);
-void noreturn do_nmi_crash(struct cpu_user_regs *regs);
void do_reserved_trap(struct cpu_user_regs *regs);
void syscall_enter(void);
diff --git a/xen/include/asm-x86/xstate.h b/xen/include/asm-x86/xstate.h
index 8d21349..4c690db 100644
--- a/xen/include/asm-x86/xstate.h
+++ b/xen/include/asm-x86/xstate.h
@@ -16,6 +16,9 @@
#define XSTATE_CPUID 0x0000000d
#define XSTATE_FEATURE_XSAVEOPT (1 << 0) /* sub-leaf 1, eax[bit 0] */
+#define XSTATE_FEATURE_XSAVEC (1 << 1) /* sub-leaf 1, eax[bit 1] */
+#define XSTATE_FEATURE_XGETBV1 (1 << 2) /* sub-leaf 1, eax[bit 2] */
+#define XSTATE_FEATURE_XSAVES (1 << 3) /* sub-leaf 1, eax[bit 3] */
#define XCR_XFEATURE_ENABLED_MASK 0x00000000 /* index of XCR0 */
@@ -40,6 +43,7 @@
#define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY)
extern u64 xfeature_mask;
+extern bool_t cpu_has_xsaves, cpu_has_xgetbv1;
/* extended state save area */
struct __packed __attribute__((aligned (64))) xsave_struct
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 57e2ed7..9e1bc63 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -543,6 +543,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
/* Bind machine I/O address range -> HVM address range. */
+/* If this returns -E2BIG lower nr_mfns value. */
/* XEN_DOMCTL_memory_mapping */
#define DPCI_ADD_MAPPING 1
#define DPCI_REMOVE_MAPPING 0
diff --git a/xen/include/public/xen-compat.h b/xen/include/public/xen-compat.h
index 3eb80a0..c1d660d 100644
--- a/xen/include/public/xen-compat.h
+++ b/xen/include/public/xen-compat.h
@@ -27,7 +27,7 @@
#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
#define __XEN_PUBLIC_XEN_COMPAT_H__
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040400
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040500
#if defined(__XEN__) || defined(__XEN_TOOLS__)
/* Xen is built with matching headers and implements the latest interface. */
diff --git a/xen/include/xen/lib.h b/xen/include/xen/lib.h
index f11b49e..0bb05e5 100644
--- a/xen/include/xen/lib.h
+++ b/xen/include/xen/lib.h
@@ -41,9 +41,11 @@ do { \
#ifndef NDEBUG
#define ASSERT(p) \
do { if ( unlikely(!(p)) ) assert_failed(#p); } while (0)
+#define ASSERT_UNREACHABLE() assert_failed("unreachable")
#define debug_build() 1
#else
#define ASSERT(p) do { if ( 0 && (p) ); } while (0)
+#define ASSERT_UNREACHABLE() do { } while (0)
#define debug_build() 0
#endif
diff --git a/xen/include/xen/serial.h b/xen/include/xen/serial.h
index 9f4451b..71e6ade 100644
--- a/xen/include/xen/serial.h
+++ b/xen/include/xen/serial.h
@@ -81,6 +81,10 @@ struct uart_driver {
int (*getc)(struct serial_port *, char *);
/* Get IRQ number for this port's serial line: returns -1 if none. */
int (*irq)(struct serial_port *);
+ /* Unmask TX interrupt */
+ void (*start_tx)(struct serial_port *);
+ /* Mask TX interrupt */
+ void (*stop_tx)(struct serial_port *);
/* Get serial information */
const struct vuart_info *(*vuart_info)(struct serial_port *);
};
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 41b3e35..9c9fd9a 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -64,6 +64,8 @@
? mem_access_op memory.h
! pod_target memory.h
! remove_from_physmap memory.h
+? vmemrange memory.h
+! vnuma_topology_info memory.h
? physdev_eoi physdev.h
? physdev_get_free_pirq physdev.h
? physdev_irq physdev.h
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/pkg-xen/xen.git
More information about the Pkg-xen-changes
mailing list