[kernel] r15134 - in dists/lenny-security/linux-2.6/debian: . config patches/bugfix/x86 patches/series
Dann Frazier
dannf at alioth.debian.org
Tue Feb 9 15:26:42 UTC 2010
Author: dannf
Date: Tue Feb 9 15:26:36 2010
New Revision: 15134
Log:
* Build fix for CVE-2010-0291 change on powerpc64
* KVM: emulator privilege escalation (CVE-2010-0298)
* KVM: emulator privilege escalation IOPL/CPL level check (CVE-2010-0306)
Added:
dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/check-cpl-level-during-priv-instruction-emulation.patch
dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/fix-popf-emulation.patch
dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-add-kvm_rw_guest_virt.patch
dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-check-IOPL-level-during-io-instruction-emulation.patch
dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-emulator-fix-popf-emulation.patch
dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-fix-memory-access-during-x86-emulation.patch
dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-remove-vmap-usage.patch
dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-use-kvm_rw_guest_virt-for-segment-descriptors.patch
Modified:
dists/lenny-security/linux-2.6/debian/changelog
dists/lenny-security/linux-2.6/debian/config/defines
dists/lenny-security/linux-2.6/debian/patches/series/21lenny3
Modified: dists/lenny-security/linux-2.6/debian/changelog
==============================================================================
--- dists/lenny-security/linux-2.6/debian/changelog Mon Feb 8 23:39:22 2010 (r15133)
+++ dists/lenny-security/linux-2.6/debian/changelog Tue Feb 9 15:26:36 2010 (r15134)
@@ -1,12 +1,14 @@
-linux-2.6 (2.6.26-21lenny3) UNRELEASED; urgency=high
+linux-2.6 (2.6.26-21lenny3) stable-security; urgency=high
* Additional fixes for CVE-2010-0307
+ * Build fix for CVE-2010-0291 change on powerpc64
* KVM: PIT: control word is write-only (CVE-2010-0309)
* connector: Delete buggy notification code. (CVE-2010-0410)
* Fix potential crash with sys_move_pages (CVE-2010-0415)
- * Build fix for CVE-2010-0291 change on powerpc64
+ * KVM: emulator privilege escalation (CVE-2010-0298)
+ * KVM: emulator privilege escalation IOPL/CPL level check (CVE-2010-0306)
- -- dann frazier <dannf at debian.org> Thu, 04 Feb 2010 17:43:31 -0700
+ -- dann frazier <dannf at debian.org> Mon, 08 Feb 2010 16:42:16 -0700
linux-2.6 (2.6.26-21lenny2) stable-security; urgency=high
Modified: dists/lenny-security/linux-2.6/debian/config/defines
==============================================================================
--- dists/lenny-security/linux-2.6/debian/config/defines Mon Feb 8 23:39:22 2010 (r15133)
+++ dists/lenny-security/linux-2.6/debian/config/defines Tue Feb 9 15:26:36 2010 (r15134)
@@ -1,5 +1,6 @@
[abi]
abiname: 2
+ignore-changes: gfn_* kvm_* __kvm_* emulate_instruction emulator_read_std emulator_write_emulated fx_init load_pdptrs
[base]
arches:
Added: dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/check-cpl-level-during-priv-instruction-emulation.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/check-cpl-level-during-priv-instruction-emulation.patch Tue Feb 9 15:26:36 2010 (r15134)
@@ -0,0 +1,250 @@
+Subject: [KVM 5.5/5.4.z Embargoed 7/7 v2] Check CPL level
+ during privilege instruction emulation.
+
+Add CPL checking in case emulator is tricked into emulating
+privilege instruction.
+
+Signed-off-by: Gleb Natapov <gleb at redhat.com>
+---
+ arch/x86/kvm/x86_emulate.c | 137 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 137 insertions(+), 0 deletions(-)
+
+Adjusted to apply to Debian's 2.6.26 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/x86_emulate.c linux-source-2.6.26/arch/x86/kvm/x86_emulate.c
+--- linux-source-2.6.26.orig/arch/x86/kvm/x86_emulate.c 2010-02-04 22:20:07.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/x86_emulate.c 2010-02-04 22:21:53.000000000 -0700
+@@ -1725,6 +1725,14 @@ special_insn:
+ c->dst.type = OP_NONE; /* Disable writeback. */
+ break;
+ case 0xf4: /* hlt */
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
+ ctxt->vcpu->arch.halt_request = 1;
+ break;
+ case 0xf5: /* cmc */
+@@ -1791,6 +1799,11 @@ twobyte_insn:
+ if (c->modrm_mod != 3 || c->modrm_rm != 1)
+ goto cannot_emulate;
+
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ rc = kvm_fix_hypercall(ctxt->vcpu);
+ if (rc)
+ goto done;
+@@ -1801,6 +1814,16 @@ twobyte_insn:
+ c->dst.type = OP_NONE;
+ break;
+ case 2: /* lgdt */
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ rc = read_descriptor(ctxt, ops, c->src.ptr,
+ &size, &address, c->op_bytes);
+ if (rc)
+@@ -1811,11 +1834,26 @@ twobyte_insn:
+ break;
+ case 3: /* lidt/vmmcall */
+ if (c->modrm_mod == 3 && c->modrm_rm == 1) {
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ rc = kvm_fix_hypercall(ctxt->vcpu);
+ if (rc)
+ goto done;
+ kvm_emulate_hypercall(ctxt->vcpu);
+ } else {
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ rc = read_descriptor(ctxt, ops, c->src.ptr,
+ &size, &address,
+ c->op_bytes);
+@@ -1831,11 +1869,26 @@ twobyte_insn:
+ c->dst.val = realmode_get_cr(ctxt->vcpu, 0);
+ break;
+ case 6: /* lmsw */
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ realmode_lmsw(ctxt->vcpu, (u16)c->src.val,
+ &ctxt->eflags);
+ c->dst.type = OP_NONE;
+ break;
+ case 7: /* invlpg*/
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ emulate_invlpg(ctxt->vcpu, memop);
+ /* Disable writeback. */
+ c->dst.type = OP_NONE;
+@@ -1845,23 +1898,67 @@ twobyte_insn:
+ }
+ break;
+ case 0x06:
++ if (c->lock_prefix) {
++ if (ctxt->mode == X86EMUL_MODE_REAL ||
++ !(ctxt->vcpu->arch.cr0 & X86_CR0_PE))
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ else
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ emulate_clts(ctxt->vcpu);
+ c->dst.type = OP_NONE;
+ break;
+ case 0x08: /* invd */
+ case 0x09: /* wbinvd */
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
+ case 0x0d: /* GrpP (prefetch) */
+ case 0x18: /* Grp16 (prefetch/nop) */
+ c->dst.type = OP_NONE;
+ break;
+ case 0x20: /* mov cr, reg */
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ if (c->modrm_mod != 3)
+ goto cannot_emulate;
++
+ c->regs[c->modrm_rm] =
+ realmode_get_cr(ctxt->vcpu, c->modrm_reg);
+ c->dst.type = OP_NONE; /* no writeback */
+ break;
+ case 0x21: /* mov from dr to reg */
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ if (c->modrm_mod != 3)
+ goto cannot_emulate;
+ rc = emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]);
+@@ -1870,6 +1967,16 @@ twobyte_insn:
+ c->dst.type = OP_NONE; /* no writeback */
+ break;
+ case 0x22: /* mov reg, cr */
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ if (c->modrm_mod != 3)
+ goto cannot_emulate;
+ realmode_set_cr(ctxt->vcpu,
+@@ -1877,6 +1984,16 @@ twobyte_insn:
+ c->dst.type = OP_NONE;
+ break;
+ case 0x23: /* mov from reg to dr */
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ if (c->modrm_mod != 3)
+ goto cannot_emulate;
+ rc = emulator_set_dr(ctxt, c->modrm_reg,
+@@ -1887,6 +2004,16 @@ twobyte_insn:
+ break;
+ case 0x30:
+ /* wrmsr */
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ msr_data = (u32)c->regs[VCPU_REGS_RAX]
+ | ((u64)c->regs[VCPU_REGS_RDX] << 32);
+ rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
+@@ -1899,6 +2026,16 @@ twobyte_insn:
+ break;
+ case 0x32:
+ /* rdmsr */
++ if (c->lock_prefix) {
++ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
++ goto done;
++ }
++
++ if (kvm_x86_ops->get_cpl(ctxt->vcpu)) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ goto done;
++ }
++
+ rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
+ if (rc) {
+ kvm_inject_gp(ctxt->vcpu, 0);
Added: dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/fix-popf-emulation.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/fix-popf-emulation.patch Tue Feb 9 15:26:36 2010 (r15134)
@@ -0,0 +1,96 @@
+Subject: [KVM 5.5/5.4.z Embargoed 6/7 v2] Fix popf emulation.
+
+POPF behaves differently depending on current CPU mode. Emulate correct
+logic to prevent guest from changing flags that it can't change
+otherwise.
+
+Signed-off-by: Gleb Natapov <gleb at redhat.com>
+---
+ arch/x86/kvm/x86_emulate.c | 57 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 files changed, 56 insertions(+), 1 deletions(-)
+
+Adjusted to apply to Debian's 2.6.26 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/x86_emulate.c linux-source-2.6.26/arch/x86/kvm/x86_emulate.c
+--- linux-source-2.6.26.orig/arch/x86/kvm/x86_emulate.c 2010-02-04 22:17:43.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/x86_emulate.c 2010-02-04 22:20:07.000000000 -0700
+@@ -287,8 +287,18 @@ static u16 group2_table[] = {
+ };
+
+ /* EFLAGS bit definitions. */
++#define EFLG_ID (1<<21)
++#define EFLG_VIP (1<<20)
++#define EFLG_VIF (1<<19)
++#define EFLG_AC (1<<18)
++#define EFLG_VM (1<<17)
++#define EFLG_RF (1<<16)
++#define EFLG_IOPL (3<<12)
++#define EFLG_NT (1<<14)
+ #define EFLG_OF (1<<11)
+ #define EFLG_DF (1<<10)
++#define EFLG_IF (1<<9)
++#define EFLG_TF (1<<8)
+ #define EFLG_SF (1<<7)
+ #define EFLG_ZF (1<<6)
+ #define EFLG_AF (1<<4)
+@@ -1077,6 +1087,48 @@ static inline void emulate_push(struct x
+ c->regs[VCPU_REGS_RSP]);
+ }
+
++static int emulate_popf(struct x86_emulate_ctxt *ctxt,
++ struct x86_emulate_ops *ops,
++ void *dest, int len)
++{
++ struct decode_cache *c = &ctxt->decode;
++ int rc;
++ unsigned long val, change_mask;
++ int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
++ int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu);
++
++ rc = ops->read_emulated(register_address(c, ctxt->ss_base,
++ c->regs[VCPU_REGS_RSP]),
++ &val, c->src.bytes, ctxt->vcpu);
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
++
++ register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.bytes);
++
++ change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
++ | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
++
++ if (ctxt->vcpu->arch.cr0 & X86_CR0_PE) {
++ if (cpl == 0)
++ change_mask |= EFLG_IOPL;
++ if (cpl <= iopl)
++ change_mask |= EFLG_IF;
++ } else if (ctxt->eflags & EFLG_VM) {
++ if (iopl < 3) {
++ kvm_inject_gp(ctxt->vcpu, 0);
++ return X86EMUL_PROPAGATE_FAULT;
++ }
++ change_mask |= EFLG_IF;
++ }
++ else /* real mode */
++ change_mask |= (EFLG_IOPL | EFLG_IF);
++
++ *(unsigned long*)dest =
++ (ctxt->eflags & ~change_mask) | (val & change_mask);
++
++ return rc;
++}
++
+ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops)
+ {
+@@ -1533,7 +1585,10 @@ special_insn:
+ c->dst.type = OP_REG;
+ c->dst.ptr = (unsigned long *) &ctxt->eflags;
+ c->dst.bytes = c->op_bytes;
+- goto pop_instruction;
++ rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes);
++ if (rc != X86EMUL_CONTINUE)
++ goto done;
++ break;
+ case 0xa0 ... 0xa1: /* mov */
+ c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
+ c->dst.val = c->src.val;
Added: dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-add-kvm_rw_guest_virt.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-add-kvm_rw_guest_virt.patch Tue Feb 9 15:26:36 2010 (r15134)
@@ -0,0 +1,142 @@
+Subject: [KVM 5.5/5.4.z Embargoed 1/7 v2] KVM: introduce
+ kvm_read_guest_virt, kvm_write_guest_virt
+
+From: Izik Eidus <ieidus at redhat.com>
+
+This commit change the name of emulator_read_std into kvm_read_guest_virt,
+and add new function name kvm_write_guest_virt that allow writing into a
+guest virtual address.
+
+Signed-off-by: Izik Eidus <ieidus at redhat.com>
+Signed-off-by: Avi Kivity <avi at redhat.com>
+Signed-off-by: Gleb Natapov <gleb at redhat.com>
+---
+ arch/x86/include/asm/kvm_host.h | 4 ---
+ arch/x86/kvm/x86.c | 56 +++++++++++++++++++++++++++++---------
+ 2 files changed, 42 insertions(+), 18 deletions(-)
+
+Adjusted to apply to Debian's 2.6.26 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/x86.c linux-source-2.6.26/arch/x86/kvm/x86.c
+--- linux-source-2.6.26.orig/arch/x86/kvm/x86.c 2010-02-01 23:54:25.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/x86.c 2010-02-04 21:39:17.000000000 -0700
+@@ -1807,10 +1807,8 @@ static struct kvm_io_device *vcpu_find_m
+ return dev;
+ }
+
+-int emulator_read_std(unsigned long addr,
+- void *val,
+- unsigned int bytes,
+- struct kvm_vcpu *vcpu)
++int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
++ struct kvm_vcpu *vcpu)
+ {
+ void *data = val;
+ int r = X86EMUL_CONTINUE;
+@@ -1818,27 +1816,57 @@ int emulator_read_std(unsigned long addr
+ while (bytes) {
+ gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+ unsigned offset = addr & (PAGE_SIZE-1);
+- unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
++ unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
+ int ret;
+
+ if (gpa == UNMAPPED_GVA) {
+ r = X86EMUL_PROPAGATE_FAULT;
+ goto out;
+ }
+- ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
++ ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
+ if (ret < 0) {
+ r = X86EMUL_UNHANDLEABLE;
+ goto out;
+ }
+
+- bytes -= tocopy;
+- data += tocopy;
+- addr += tocopy;
++ bytes -= toread;
++ data += toread;
++ addr += toread;
+ }
+ out:
+ return r;
+ }
+-EXPORT_SYMBOL_GPL(emulator_read_std);
++
++int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
++ struct kvm_vcpu *vcpu)
++{
++ void *data = val;
++ int r = X86EMUL_CONTINUE;
++
++ while (bytes) {
++ gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
++ unsigned offset = addr & (PAGE_SIZE-1);
++ unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
++ int ret;
++
++ if (gpa == UNMAPPED_GVA) {
++ r = X86EMUL_PROPAGATE_FAULT;
++ goto out;
++ }
++ ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
++ if (ret < 0) {
++ r = X86EMUL_UNHANDLEABLE;
++ goto out;
++ }
++
++ bytes -= towrite;
++ data += towrite;
++ addr += towrite;
++ }
++out:
++ return r;
++}
++
+
+ static int emulator_read_emulated(unsigned long addr,
+ void *val,
+@@ -1860,8 +1888,8 @@ static int emulator_read_emulated(unsign
+ if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+ goto mmio;
+
+- if (emulator_read_std(addr, val, bytes, vcpu)
+- == X86EMUL_CONTINUE)
++ if (kvm_read_guest_virt(addr, val, bytes, vcpu)
++ == X86EMUL_CONTINUE)
+ return X86EMUL_CONTINUE;
+ if (gpa == UNMAPPED_GVA)
+ return X86EMUL_PROPAGATE_FAULT;
+@@ -2065,7 +2093,7 @@ void kvm_report_emulation_failure(struct
+ if (reported)
+ return;
+
+- emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
++ kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
+
+ printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
+ context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
+@@ -2074,7 +2102,7 @@ void kvm_report_emulation_failure(struct
+ EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
+
+ static struct x86_emulate_ops emulate_ops = {
+- .read_std = emulator_read_std,
++ .read_std = kvm_read_guest_virt,
+ .read_emulated = emulator_read_emulated,
+ .write_emulated = emulator_write_emulated,
+ .cmpxchg_emulated = emulator_cmpxchg_emulated,
+diff -urpN linux-source-2.6.26.orig/include/asm-x86/kvm_host.h linux-source-2.6.26/include/asm-x86/kvm_host.h
+--- linux-source-2.6.26.orig/include/asm-x86/kvm_host.h 2008-07-13 15:51:29.000000000 -0600
++++ linux-source-2.6.26/include/asm-x86/kvm_host.h 2010-02-04 21:39:17.000000000 -0700
+@@ -517,10 +517,6 @@ void kvm_inject_page_fault(struct kvm_vc
+
+ void fx_init(struct kvm_vcpu *vcpu);
+
+-int emulator_read_std(unsigned long addr,
+- void *val,
+- unsigned int bytes,
+- struct kvm_vcpu *vcpu);
+ int emulator_write_emulated(unsigned long addr,
+ const void *val,
+ unsigned int bytes,
Added: dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-check-IOPL-level-during-io-instruction-emulation.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-check-IOPL-level-during-io-instruction-emulation.patch Tue Feb 9 15:26:36 2010 (r15134)
@@ -0,0 +1,186 @@
+Subject: [KVM 5.5/5.4.z Embargoed 5/7 v2] Check IOPL level
+ during io instruction emulation.
+
+Make emulator check that vcpu is allowed to execute IN, INS, OUT,
+OUTS, CLI, STI.
+
+Signed-off-by: Gleb Natapov <gleb at redhat.com>
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/x86.c | 77 ++++++++++++++++++++++++++++++++-------
+ arch/x86/kvm/x86_emulate.c | 18 +++++++---
+ 3 files changed, 77 insertions(+), 19 deletions(-)
+
+Backported to Debian's 2.6.26 by dann frazier <dannf at debian.org>
+
+diff -urpN a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+--- a/arch/x86/kvm/x86.c 2010-02-05 11:15:02.000000000 -0700
++++ b/arch/x86/kvm/x86.c 2010-02-05 11:19:28.000000000 -0700
+@@ -2375,11 +2375,68 @@ static struct kvm_io_device *vcpu_find_p
+ return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
+ }
+
++static void get_segment(struct kvm_vcpu *vcpu,
++ struct kvm_segment *var, int seg)
++{
++ kvm_x86_ops->get_segment(vcpu, var, seg);
++}
++
++bool kvm_check_iopl(struct kvm_vcpu *vcpu)
++{
++ int iopl;
++ if (!(vcpu->arch.cr0 & X86_CR0_PE))
++ return false;
++ if (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_VM)
++ return true;
++ iopl = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
++ return kvm_x86_ops->get_cpl(vcpu) > iopl;
++}
++
++bool kvm_check_io_port_access_allowed(struct kvm_vcpu *vcpu, u16 port, u16 len)
++{
++ struct kvm_segment tr_seg;
++ int r;
++ u16 io_bitmap_ptr;
++ u8 perm, bit_idx = port & 0x7;
++ unsigned mask = (1 << len) - 1;
++
++ get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
++ if (tr_seg.unusable)
++ return false;
++ if (tr_seg.limit < 103)
++ return false;
++ r = kvm_read_guest_virt_system(tr_seg.base + 102, &io_bitmap_ptr, 2,
++ vcpu, NULL);
++ if (r != X86EMUL_CONTINUE)
++ return false;
++ if (io_bitmap_ptr + port/8 >= tr_seg.limit)
++ return false;
++ r = kvm_read_guest_virt_system(tr_seg.base + io_bitmap_ptr + port/8,
++ &perm, 1, vcpu, NULL);
++ if (r != X86EMUL_CONTINUE)
++ return false;
++ if ((perm >> bit_idx) & mask)
++ return false;
++ return true;
++}
++
+ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
+ int size, unsigned port)
+ {
+ struct kvm_io_device *pio_dev;
+
++ if (in)
++ KVMTRACE_2D(IO_READ, vcpu, port, (u32)size, handler);
++ else
++ KVMTRACE_2D(IO_WRITE, vcpu, port, (u32)size, handler);
++
++ if (kvm_check_iopl(vcpu)) {
++ if (!kvm_check_io_port_access_allowed(vcpu, port, size)) {
++ kvm_inject_gp(vcpu, 0);
++ return 1;
++ }
++ }
++
+ vcpu->run->exit_reason = KVM_EXIT_IO;
+ vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
+ vcpu->run->io.size = vcpu->arch.pio.size = size;
+@@ -2391,13 +2448,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcp
+ vcpu->arch.pio.down = 0;
+ vcpu->arch.pio.rep = 0;
+
+- if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
+- KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
+- handler);
+- else
+- KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
+- handler);
+-
+ kvm_x86_ops->cache_regs(vcpu);
+ memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
+ kvm_x86_ops->decache_regs(vcpu);
+@@ -2422,6 +2472,18 @@ int kvm_emulate_pio_string(struct kvm_vc
+ int ret = 0;
+ struct kvm_io_device *pio_dev;
+
++ if (in)
++ KVMTRACE_2D(IO_READ, vcpu, port, (u32)size, handler);
++ else
++ KVMTRACE_2D(IO_WRITE, vcpu, port, (u32)size, handler);
++
++ if (kvm_check_iopl(vcpu)) {
++ if (!kvm_check_io_port_access_allowed(vcpu, port, size)) {
++ kvm_inject_gp(vcpu, 0);
++ return 1;
++ }
++ }
++
+ vcpu->run->exit_reason = KVM_EXIT_IO;
+ vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
+ vcpu->run->io.size = vcpu->arch.pio.size = size;
+@@ -2433,13 +2495,6 @@ int kvm_emulate_pio_string(struct kvm_vc
+ vcpu->arch.pio.down = down;
+ vcpu->arch.pio.rep = rep;
+
+- if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
+- KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
+- handler);
+- else
+- KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
+- handler);
+-
+ if (!count) {
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+ return 1;
+@@ -3129,12 +3184,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct
+ return 0;
+ }
+
+-static void get_segment(struct kvm_vcpu *vcpu,
+- struct kvm_segment *var, int seg)
+-{
+- kvm_x86_ops->get_segment(vcpu, var, seg);
+-}
+-
+ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+ {
+ struct kvm_segment cs;
+diff -urpN a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c
+--- a/arch/x86/kvm/x86_emulate.c 2010-02-05 11:15:02.000000000 -0700
++++ b/arch/x86/kvm/x86_emulate.c 2010-02-05 11:17:56.000000000 -0700
+@@ -1685,12 +1685,20 @@ special_insn:
+ c->dst.type = OP_NONE; /* Disable writeback. */
+ break;
+ case 0xfa: /* cli */
+- ctxt->eflags &= ~X86_EFLAGS_IF;
+- c->dst.type = OP_NONE; /* Disable writeback. */
++ if (kvm_check_iopl(ctxt->vcpu))
++ kvm_inject_gp(ctxt->vcpu, 0);
++ else {
++ ctxt->eflags &= ~X86_EFLAGS_IF;
++ c->dst.type = OP_NONE; /* Disable writeback. */
++ }
+ break;
+ case 0xfb: /* sti */
+- ctxt->eflags |= X86_EFLAGS_IF;
+- c->dst.type = OP_NONE; /* Disable writeback. */
++ if (kvm_check_iopl(ctxt->vcpu))
++ kvm_inject_gp(ctxt->vcpu, 0);
++ else {
++ ctxt->eflags |= X86_EFLAGS_IF;
++ c->dst.type = OP_NONE; /* Disable writeback. */
++ }
+ break;
+ case 0xfe ... 0xff: /* Grp4/Grp5 */
+ rc = emulate_grp45(ctxt, ops);
+diff -urpN a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
+--- a/include/asm-x86/kvm_host.h 2010-02-05 11:15:02.000000000 -0700
++++ b/include/asm-x86/kvm_host.h 2010-02-05 11:17:56.000000000 -0700
+@@ -548,6 +548,7 @@ void kvm_enable_tdp(void);
+
+ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
+ int complete_pio(struct kvm_vcpu *vcpu);
++bool kvm_check_iopl(struct kvm_vcpu *vcpu);
+
+ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
+ {
Added: dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-emulator-fix-popf-emulation.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-emulator-fix-popf-emulation.patch Tue Feb 9 15:26:36 2010 (r15134)
@@ -0,0 +1,25 @@
+commit 2b48cc75b21431037d6f902b9d583b1aff198490
+Author: Avi Kivity <avi at redhat.com>
+Date: Sat Nov 29 20:36:13 2008 +0200
+
+ KVM: x86 emulator: fix popf emulation
+
+ Set operand type and size to get correct writeback behavior.
+
+ Signed-off-by: Avi Kivity <avi at redhat.com>
+
+Adjusted to apply to Debian's 2.6.26 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/x86_emulate.c linux-source-2.6.26/arch/x86/kvm/x86_emulate.c
+--- linux-source-2.6.26.orig/arch/x86/kvm/x86_emulate.c 2010-02-04 22:07:32.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/x86_emulate.c 2010-02-04 22:17:43.000000000 -0700
+@@ -1530,7 +1530,9 @@ special_insn:
+ emulate_push(ctxt);
+ break;
+ case 0x9d: /* popf */
++ c->dst.type = OP_REG;
+ c->dst.ptr = (unsigned long *) &ctxt->eflags;
++ c->dst.bytes = c->op_bytes;
+ goto pop_instruction;
+ case 0xa0 ... 0xa1: /* mov */
+ c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
Added: dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-fix-memory-access-during-x86-emulation.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-fix-memory-access-during-x86-emulation.patch Tue Feb 9 15:26:36 2010 (r15134)
@@ -0,0 +1,511 @@
+Subject: [KVM 5.5/5.4.z Embargoed 4/7 v2] KVM: fix memory
+ access during x86 emulation.
+
+Currently when x86 emulator needs to access memory, page walk is done with
+broadest permission possible, so if emulated instruction was executed
+by userspace process it can still access kernel memory. Fix that by
+providing correct memory access to page walker during emulation.
+
+Signed-off-by: Gleb Natapov <gleb at redhat.com>
+---
+ arch/x86/include/asm/kvm_host.h | 7 ++-
+ arch/x86/include/asm/kvm_x86_emulate.h | 14 +++-
+ arch/x86/kvm/mmu.c | 16 ++--
+ arch/x86/kvm/mmu.h | 6 ++
+ arch/x86/kvm/paging_tmpl.h | 11 ++-
+ arch/x86/kvm/x86.c | 131 ++++++++++++++++++++++++-------
+ arch/x86/kvm/x86_emulate.c | 6 +-
+ 7 files changed, 143 insertions(+), 48 deletions(-)
+
+Backported to Debian's 2.6.26 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/mmu.c linux-source-2.6.26/arch/x86/kvm/mmu.c
+--- linux-source-2.6.26.orig/arch/x86/kvm/mmu.c 2010-02-01 23:54:20.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/mmu.c 2010-02-04 21:52:09.000000000 -0700
+@@ -119,11 +119,6 @@ static int dbg = 1;
+ #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
+ | PT64_NX_MASK)
+
+-#define PFERR_PRESENT_MASK (1U << 0)
+-#define PFERR_WRITE_MASK (1U << 1)
+-#define PFERR_USER_MASK (1U << 2)
+-#define PFERR_FETCH_MASK (1U << 4)
+-
+ #define PT_DIRECTORY_LEVEL 2
+ #define PT_PAGE_TABLE_LEVEL 1
+
+@@ -1007,7 +1002,7 @@ struct page *gva_to_page(struct kvm_vcpu
+ {
+ struct page *page;
+
+- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
++ gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
+
+ if (gpa == UNMAPPED_GVA)
+ return NULL;
+@@ -1304,8 +1299,11 @@ static void mmu_alloc_roots(struct kvm_v
+ vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
+ }
+
+-static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
++static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
++ u32 access, u32 *error)
+ {
++ if (error)
++ *error = 0;
+ return vaddr;
+ }
+
+@@ -1785,7 +1783,7 @@ int kvm_mmu_unprotect_page_virt(struct k
+ gpa_t gpa;
+ int r;
+
+- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
++ gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
+
+ spin_lock(&vcpu->kvm->mmu_lock);
+ r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+@@ -2218,7 +2216,7 @@ static void audit_mappings_page(struct k
+
+ audit_mappings_page(vcpu, ent, va, level - 1);
+ } else {
+- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
++ gpa_t gpa = kvm_mmu_gva_to_gpa_system(vcpu, va, NULL);
+ hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
+
+ if (is_shadow_present_pte(ent)
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/mmu.h linux-source-2.6.26/arch/x86/kvm/mmu.h
+--- linux-source-2.6.26.orig/arch/x86/kvm/mmu.h 2010-02-04 22:01:28.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/mmu.h 2010-02-04 22:01:53.000000000 -0700
+@@ -36,6 +36,12 @@
+ #define PT32_ROOT_LEVEL 2
+ #define PT32E_ROOT_LEVEL 3
+
++#define PFERR_PRESENT_MASK (1U << 0)
++#define PFERR_WRITE_MASK (1U << 1)
++#define PFERR_USER_MASK (1U << 2)
++#define PFERR_RSVD_MASK (1U << 3)
++#define PFERR_FETCH_MASK (1U << 4)
++
+ static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
+ {
+ if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/paging_tmpl.h linux-source-2.6.26/arch/x86/kvm/paging_tmpl.h
+--- linux-source-2.6.26.orig/arch/x86/kvm/paging_tmpl.h 2010-02-01 23:54:20.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/paging_tmpl.h 2010-02-04 21:52:09.000000000 -0700
+@@ -441,18 +441,23 @@ static int FNAME(page_fault)(struct kvm_
+ return write_pt;
+ }
+
+-static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
++static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
++ u32 *error)
+ {
+ struct guest_walker walker;
+ gpa_t gpa = UNMAPPED_GVA;
+ int r;
+
+- r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
++ r = FNAME(walk_addr)(&walker, vcpu, vaddr,
++ !!(access & PFERR_WRITE_MASK),
++ !!(access & PFERR_USER_MASK),
++ !!(access & PFERR_FETCH_MASK));
+
+ if (r) {
+ gpa = gfn_to_gpa(walker.gfn);
+ gpa |= vaddr & ~PAGE_MASK;
+- }
++ } else if(error)
++ *error = walker.error_code;
+
+ return gpa;
+ }
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/x86.c linux-source-2.6.26/arch/x86/kvm/x86.c
+--- linux-source-2.6.26.orig/arch/x86/kvm/x86.c 2010-02-04 21:49:22.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/x86.c 2010-02-04 21:59:53.000000000 -0700
+@@ -1807,14 +1807,41 @@ static struct kvm_io_device *vcpu_find_m
+ return dev;
+ }
+
+-int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
+- struct kvm_vcpu *vcpu)
++gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
++{
++ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
++ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
++}
++
++ gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
++{
++ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
++ access |= PFERR_FETCH_MASK;
++ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
++}
++
++gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
++{
++ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
++ access |= PFERR_WRITE_MASK;
++ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
++}
++
++/* uses this to access any guet's mapped memory without checking CPL */
++gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
++{
++ return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
++}
++
++static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
++ struct kvm_vcpu *vcpu, u32 access,
++ u32 *error)
+ {
+ void *data = val;
+ int r = X86EMUL_CONTINUE;
+
+ while (bytes) {
+- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
++ gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
+ unsigned offset = addr & (PAGE_SIZE-1);
+ unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
+ int ret;
+@@ -1837,14 +1864,37 @@ out:
+ return r;
+ }
+
++/* used for instruction fetching */
++static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
++ struct kvm_vcpu *vcpu, u32 *error)
++{
++ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
++ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
++ access | PFERR_FETCH_MASK, error);
++}
++
++static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
++ struct kvm_vcpu *vcpu, u32 *error)
++{
++ u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
++ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
++ error);
++}
++
++static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
++ struct kvm_vcpu *vcpu, u32 *error)
++{
++ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
++}
++
+ int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
+- struct kvm_vcpu *vcpu)
++ struct kvm_vcpu *vcpu, u32 *error)
+ {
+ void *data = val;
+ int r = X86EMUL_CONTINUE;
+
+ while (bytes) {
+- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
++ gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
+ unsigned offset = addr & (PAGE_SIZE-1);
+ unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
+ int ret;
+@@ -1875,6 +1925,7 @@ static int emulator_read_emulated(unsign
+ {
+ struct kvm_io_device *mmio_dev;
+ gpa_t gpa;
++ u32 error_code;
+
+ if (vcpu->mmio_read_completed) {
+ memcpy(val, vcpu->mmio_data, bytes);
+@@ -1882,17 +1933,20 @@ static int emulator_read_emulated(unsign
+ return X86EMUL_CONTINUE;
+ }
+
+- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
++ gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
++
++ if (gpa == UNMAPPED_GVA) {
++ kvm_inject_page_fault(vcpu, addr, error_code);
++ return X86EMUL_PROPAGATE_FAULT;
++ }
+
+ /* For APIC access vmexit */
+ if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+ goto mmio;
+
+- if (kvm_read_guest_virt(addr, val, bytes, vcpu)
++ if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
+ == X86EMUL_CONTINUE)
+ return X86EMUL_CONTINUE;
+- if (gpa == UNMAPPED_GVA)
+- return X86EMUL_PROPAGATE_FAULT;
+
+ mmio:
+ /*
+@@ -1934,11 +1988,12 @@ static int emulator_write_emulated_onepa
+ {
+ struct kvm_io_device *mmio_dev;
+ gpa_t gpa;
++ u32 error_code;
+
+- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
++ gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
+
+ if (gpa == UNMAPPED_GVA) {
+- kvm_inject_page_fault(vcpu, addr, 2);
++ kvm_inject_page_fault(vcpu, addr, error_code);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
+@@ -2012,7 +2067,7 @@ static int emulator_cmpxchg_emulated(uns
+ char *kaddr;
+ u64 val;
+
+- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
++ gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
+
+ if (gpa == UNMAPPED_GVA ||
+ (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
+@@ -2093,7 +2148,7 @@ void kvm_report_emulation_failure(struct
+ if (reported)
+ return;
+
+- kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
++ kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
+
+ printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
+ context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
+@@ -2103,6 +2158,7 @@ EXPORT_SYMBOL_GPL(kvm_report_emulation_f
+
+ static struct x86_emulate_ops emulate_ops = {
+ .read_std = kvm_read_guest_virt,
++ .fetch = kvm_fetch_guest_virt,
+ .read_emulated = emulator_read_emulated,
+ .write_emulated = emulator_write_emulated,
+ .cmpxchg_emulated = emulator_cmpxchg_emulated,
+@@ -2217,12 +2273,17 @@ static int pio_copy_data(struct kvm_vcpu
+ gva_t q = vcpu->arch.pio.guest_gva;
+ unsigned bytes;
+ int ret;
++ u32 error_code;
+
+ bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
+ if (vcpu->arch.pio.in)
+- ret = kvm_write_guest_virt(q, p, bytes, vcpu);
++ ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
+ else
+- ret = kvm_read_guest_virt(q, p, bytes, vcpu);
++ ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
++
++ if (ret == X86EMUL_PROPAGATE_FAULT)
++ kvm_inject_page_fault(vcpu, q, error_code);
++
+ return ret;
+ }
+
+@@ -2243,7 +2304,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
+ r = pio_copy_data(vcpu);
+ if (r) {
+ kvm_x86_ops->cache_regs(vcpu);
+- return r;
++ goto out;
+ }
+ }
+
+@@ -2266,7 +2327,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
+ }
+
+ kvm_x86_ops->decache_regs(vcpu);
+-
++ out:
+ io->count -= io->cur_count;
+ io->cur_count = 0;
+
+@@ -2411,10 +2472,8 @@ int kvm_emulate_pio_string(struct kvm_vc
+ if (!vcpu->arch.pio.in) {
+ /* string PIO write */
+ ret = pio_copy_data(vcpu);
+- if (ret == X86EMUL_PROPAGATE_FAULT) {
+- kvm_inject_gp(vcpu, 0);
++ if (ret == X86EMUL_PROPAGATE_FAULT)
+ return 1;
+- }
+ if (ret == 0 && pio_dev) {
+ pio_string_write(pio_dev, vcpu);
+ complete_pio(vcpu);
+@@ -3220,7 +3279,7 @@ static int load_guest_segment_descriptor
+ kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
+ return 1;
+ }
+- return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
++ return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
+ }
+
+ /* allowed just for 8 bytes segments */
+@@ -3234,10 +3293,22 @@ static int save_guest_segment_descriptor
+
+ if (dtable.limit < index * 8 + 7)
+ return 1;
+- return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
++ return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
++}
++
++static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
++ struct desc_struct *seg_desc)
++{
++ u32 base_addr;
++
++ base_addr = seg_desc->base0;
++ base_addr |= (seg_desc->base1 << 16);
++ base_addr |= (seg_desc->base2 << 24);
++
++ return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
+ }
+
+-static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
++static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
+ struct desc_struct *seg_desc)
+ {
+ u32 base_addr;
+@@ -3246,7 +3317,7 @@ static u32 get_tss_base_addr(struct kvm_
+ base_addr |= (seg_desc->base1 << 16);
+ base_addr |= (seg_desc->base2 << 24);
+
+- return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
++ return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
+ }
+
+ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
+@@ -3422,7 +3493,7 @@ int kvm_task_switch_16(struct kvm_vcpu *
+ sizeof tss_segment_16))
+ goto out;
+
+- if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
++ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
+ &tss_segment_16, sizeof tss_segment_16))
+ goto out;
+
+@@ -3451,7 +3522,7 @@ int kvm_task_switch_32(struct kvm_vcpu *
+ sizeof tss_segment_32))
+ goto out;
+
+- if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
++ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
+ &tss_segment_32, sizeof tss_segment_32))
+ goto out;
+
+@@ -3472,7 +3543,7 @@ int kvm_task_switch(struct kvm_vcpu *vcp
+ u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
+ u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
+
+- old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
++ old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
+
+ /* FIXME: Handle errors. Failure to read either TSS or their
+ * descriptors should generate a pagefault.
+@@ -3666,7 +3737,7 @@ int kvm_arch_vcpu_ioctl_translate(struct
+
+ vcpu_load(vcpu);
+ down_read(&vcpu->kvm->slots_lock);
+- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
++ gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
+ up_read(&vcpu->kvm->slots_lock);
+ tr->physical_address = gpa;
+ tr->valid = gpa != UNMAPPED_GVA;
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/x86_emulate.c linux-source-2.6.26/arch/x86/kvm/x86_emulate.c
+--- linux-source-2.6.26.orig/arch/x86/kvm/x86_emulate.c 2010-02-01 23:54:26.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/x86_emulate.c 2010-02-04 21:52:09.000000000 -0700
+@@ -528,7 +528,7 @@ static int do_fetch_insn_byte(struct x86
+
+ if (linear < fc->start || linear >= fc->end) {
+ size = min(15UL, PAGE_SIZE - offset_in_page(linear));
+- rc = ops->read_std(linear, fc->data, size, ctxt->vcpu);
++ rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
+ if (rc)
+ return rc;
+ fc->start = linear;
+@@ -583,11 +583,11 @@ static int read_descriptor(struct x86_em
+ op_bytes = 3;
+ *address = 0;
+ rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
+- ctxt->vcpu);
++ ctxt->vcpu, NULL);
+ if (rc)
+ return rc;
+ rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
+- ctxt->vcpu);
++ ctxt->vcpu, NULL);
+ return rc;
+ }
+
+@@ -1137,7 +1137,7 @@ static inline int emulate_grp1a(struct x
+
+ rc = ops->read_std(register_address(c, ctxt->ss_base,
+ c->regs[VCPU_REGS_RSP]),
+- &c->dst.val, c->dst.bytes, ctxt->vcpu);
++ &c->dst.val, c->dst.bytes, ctxt->vcpu, NULL);
+ if (rc != 0)
+ return rc;
+
+@@ -1463,7 +1463,7 @@ special_insn:
+ pop_instruction:
+ if ((rc = ops->read_std(register_address(c, ctxt->ss_base,
+ c->regs[VCPU_REGS_RSP]), c->dst.ptr,
+- c->op_bytes, ctxt->vcpu)) != 0)
++ c->op_bytes, ctxt->vcpu, NULL)) != 0)
+ goto done;
+
+ register_address_increment(c, &c->regs[VCPU_REGS_RSP],
+diff -urpN linux-source-2.6.26.orig/include/asm-x86/kvm_host.h linux-source-2.6.26/include/asm-x86/kvm_host.h
+--- linux-source-2.6.26.orig/include/asm-x86/kvm_host.h 2010-02-04 21:39:17.000000000 -0700
++++ linux-source-2.6.26/include/asm-x86/kvm_host.h 2010-02-04 21:52:49.000000000 -0700
+@@ -201,7 +201,8 @@ struct kvm_mmu {
+ void (*new_cr3)(struct kvm_vcpu *vcpu);
+ int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
+ void (*free)(struct kvm_vcpu *vcpu);
+- gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
++ gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
++ u32 *error);
+ void (*prefetch_page)(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *page);
+ hpa_t root_hpa;
+@@ -532,6 +533,11 @@ void __kvm_mmu_free_some_pages(struct kv
+ int kvm_mmu_load(struct kvm_vcpu *vcpu);
+ void kvm_mmu_unload(struct kvm_vcpu *vcpu);
+
++gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
++gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
++gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
++gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
++
+ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
+
+ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
+diff -urpN linux-source-2.6.26.orig/include/asm-x86/kvm_x86_emulate.h linux-source-2.6.26/include/asm-x86/kvm_x86_emulate.h
+--- linux-source-2.6.26.orig/include/asm-x86/kvm_x86_emulate.h 2010-02-01 23:54:26.000000000 -0700
++++ linux-source-2.6.26/include/asm-x86/kvm_x86_emulate.h 2010-02-04 21:52:09.000000000 -0700
+@@ -54,13 +54,23 @@ struct x86_emulate_ctxt;
+ struct x86_emulate_ops {
+ /*
+ * read_std: Read bytes of standard (non-emulated/special) memory.
+- * Used for instruction fetch, stack operations, and others.
++ * Used for descriptor reading.
+ * @addr: [IN ] Linear address from which to read.
+ * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
+ * @bytes: [IN ] Number of bytes to read from memory.
+ */
+ int (*read_std)(unsigned long addr, void *val,
+- unsigned int bytes, struct kvm_vcpu *vcpu);
++ unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
++
++ /*
++ * fetch: Read bytes of standard (non-emulated/special) memory.
++ * Used for instruction fetch.
++ * @addr: [IN ] Linear address from which to read.
++ * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
++ * @bytes: [IN ] Number of bytes to read from memory.
++ */
++ int (*fetch)(unsigned long addr, void *val,
++ unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+
+ /*
+ * read_emulated: Read bytes from emulated/special memory area.
Added: dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-remove-vmap-usage.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-remove-vmap-usage.patch Tue Feb 9 15:26:36 2010 (r15134)
@@ -0,0 +1,152 @@
+Subject: [KVM 5.5/5.4.z Embargoed 2/7 v2] KVM: remove the vmap
+ usage
+
+From: Izik Eidus <ieidus at redhat.com>
+
+vmap() on guest pages hides those pages from the Linux mm for an extended
+(userspace determined) amount of time. Get rid of it.
+
+Signed-off-by: Izik Eidus <ieidus at redhat.com>
+Signed-off-by: Avi Kivity <avi at redhat.com>
+Signed-off-by: Gleb Natapov <gleb at redhat.com>
+---
+ arch/x86/kvm/x86.c | 62 +++++++++-----------------------------------
+ include/linux/kvm_types.h | 3 +-
+ 2 files changed, 14 insertions(+), 51 deletions(-)
+
+Adjusted to apply to Debian's 2.6.26 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/x86.c linux-source-2.6.26/arch/x86/kvm/x86.c
+--- linux-source-2.6.26.orig/arch/x86/kvm/x86.c 2010-02-04 21:39:17.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/x86.c 2010-02-04 21:45:21.000000000 -0700
+@@ -2211,40 +2211,19 @@ int emulate_instruction(struct kvm_vcpu
+ }
+ EXPORT_SYMBOL_GPL(emulate_instruction);
+
+-static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
+-{
+- int i;
+-
+- for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
+- if (vcpu->arch.pio.guest_pages[i]) {
+- kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
+- vcpu->arch.pio.guest_pages[i] = NULL;
+- }
+-}
+-
+ static int pio_copy_data(struct kvm_vcpu *vcpu)
+ {
+ void *p = vcpu->arch.pio_data;
+- void *q;
++ gva_t q = vcpu->arch.pio.guest_gva;
+ unsigned bytes;
+- int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
++ int ret;
+
+- q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
+- PAGE_KERNEL);
+- if (!q) {
+- free_pio_guest_pages(vcpu);
+- return -ENOMEM;
+- }
+- q += vcpu->arch.pio.guest_page_offset;
+ bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
+ if (vcpu->arch.pio.in)
+- memcpy(q, p, bytes);
++ ret = kvm_write_guest_virt(q, p, bytes, vcpu);
+ else
+- memcpy(p, q, bytes);
+- q -= vcpu->arch.pio.guest_page_offset;
+- vunmap(q);
+- free_pio_guest_pages(vcpu);
+- return 0;
++ ret = kvm_read_guest_virt(q, p, bytes, vcpu);
++ return ret;
+ }
+
+ int complete_pio(struct kvm_vcpu *vcpu)
+@@ -2349,7 +2328,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcp
+ vcpu->arch.pio.in = in;
+ vcpu->arch.pio.string = 0;
+ vcpu->arch.pio.down = 0;
+- vcpu->arch.pio.guest_page_offset = 0;
+ vcpu->arch.pio.rep = 0;
+
+ if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
+@@ -2380,9 +2358,7 @@ int kvm_emulate_pio_string(struct kvm_vc
+ gva_t address, int rep, unsigned port)
+ {
+ unsigned now, in_page;
+- int i, ret = 0;
+- int nr_pages = 1;
+- struct page *page;
++ int ret = 0;
+ struct kvm_io_device *pio_dev;
+
+ vcpu->run->exit_reason = KVM_EXIT_IO;
+@@ -2394,7 +2370,6 @@ int kvm_emulate_pio_string(struct kvm_vc
+ vcpu->arch.pio.in = in;
+ vcpu->arch.pio.string = 1;
+ vcpu->arch.pio.down = down;
+- vcpu->arch.pio.guest_page_offset = offset_in_page(address);
+ vcpu->arch.pio.rep = rep;
+
+ if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
+@@ -2414,15 +2389,8 @@ int kvm_emulate_pio_string(struct kvm_vc
+ else
+ in_page = offset_in_page(address) + size;
+ now = min(count, (unsigned long)in_page / size);
+- if (!now) {
+- /*
+- * String I/O straddles page boundary. Pin two guest pages
+- * so that we satisfy atomicity constraints. Do just one
+- * transaction to avoid complexity.
+- */
+- nr_pages = 2;
++ if (!now)
+ now = 1;
+- }
+ if (down) {
+ /*
+ * String I/O in reverse. Yuck. Kill the guest, fix later.
+@@ -2437,21 +2405,17 @@ int kvm_emulate_pio_string(struct kvm_vc
+ if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
+ kvm_x86_ops->skip_emulated_instruction(vcpu);
+
+- for (i = 0; i < nr_pages; ++i) {
+- page = gva_to_page(vcpu, address + i * PAGE_SIZE);
+- vcpu->arch.pio.guest_pages[i] = page;
+- if (!page) {
+- kvm_inject_gp(vcpu, 0);
+- free_pio_guest_pages(vcpu);
+- return 1;
+- }
+- }
++ vcpu->arch.pio.guest_gva = address;
+
+ pio_dev = vcpu_find_pio_dev(vcpu, port);
+ if (!vcpu->arch.pio.in) {
+ /* string PIO write */
+ ret = pio_copy_data(vcpu);
+- if (ret >= 0 && pio_dev) {
++ if (ret == X86EMUL_PROPAGATE_FAULT) {
++ kvm_inject_gp(vcpu, 0);
++ return 1;
++ }
++ if (ret == 0 && pio_dev) {
+ pio_string_write(pio_dev, vcpu);
+ complete_pio(vcpu);
+ if (vcpu->arch.pio.count == 0)
+diff -urpN linux-source-2.6.26.orig/include/linux/kvm_types.h linux-source-2.6.26/include/linux/kvm_types.h
+--- linux-source-2.6.26.orig/include/linux/kvm_types.h 2008-07-13 15:51:29.000000000 -0600
++++ linux-source-2.6.26/include/linux/kvm_types.h 2010-02-04 21:45:22.000000000 -0700
+@@ -43,8 +43,7 @@ typedef hfn_t pfn_t;
+ struct kvm_pio_request {
+ unsigned long count;
+ int cur_count;
+- struct page *guest_pages[2];
+- unsigned guest_page_offset;
++ gva_t guest_gva;
+ int in;
+ int port;
+ int size;
Added: dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-use-kvm_rw_guest_virt-for-segment-descriptors.patch
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ dists/lenny-security/linux-2.6/debian/patches/bugfix/x86/kvm-use-kvm_rw_guest_virt-for-segment-descriptors.patch Tue Feb 9 15:26:36 2010 (r15134)
@@ -0,0 +1,57 @@
+Subject: [KVM 5.5/5.4.z Embargoed 3/7 v2] KVM: Use kvm_{read,
+ write}_guest_virt() to read and write segment descriptors
+
+From: Mikhail Ershov <Mike.Ershov at gmail.com>
+
+Segment descriptors tables can be placed on two non-contiguous pages.
+This patch makes reading segment descriptors by linear address.
+
+Signed-off-by: Mikhail Ershov <Mike.Ershov at gmail.com>
+Signed-off-by: Avi Kivity <avi at redhat.com>
+Signed-off-by: Gleb Natapov <gleb at redhat.com>
+---
+ arch/x86/kvm/x86.c | 10 ++--------
+ 1 files changed, 2 insertions(+), 8 deletions(-)
+
+Adjusted to apply to Debian's 2.6.26 by dann frazier <dannf at debian.org>
+
+diff -urpN linux-source-2.6.26.orig/arch/x86/kvm/x86.c linux-source-2.6.26/arch/x86/kvm/x86.c
+--- linux-source-2.6.26.orig/arch/x86/kvm/x86.c 2010-02-04 21:39:17.000000000 -0700
++++ linux-source-2.6.26/arch/x86/kvm/x86.c 2010-02-04 21:49:22.000000000 -0700
+@@ -3247,7 +3211,6 @@ static void get_segment_descritptor_dtab
+ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+ struct desc_struct *seg_desc)
+ {
+- gpa_t gpa;
+ struct descriptor_table dtable;
+ u16 index = selector >> 3;
+
+@@ -3257,16 +3220,13 @@ static int load_guest_segment_descriptor
+ kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
+ return 1;
+ }
+- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
+- gpa += index * 8;
+- return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
++ return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
+ }
+
+ /* allowed just for 8 bytes segments */
+ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+ struct desc_struct *seg_desc)
+ {
+- gpa_t gpa;
+ struct descriptor_table dtable;
+ u16 index = selector >> 3;
+
+@@ -3274,9 +3234,7 @@ static int save_guest_segment_descriptor
+
+ if (dtable.limit < index * 8 + 7)
+ return 1;
+- gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
+- gpa += index * 8;
+- return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
++ return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
+ }
+
+ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
Modified: dists/lenny-security/linux-2.6/debian/patches/series/21lenny3
==============================================================================
--- dists/lenny-security/linux-2.6/debian/patches/series/21lenny3 Mon Feb 8 23:39:22 2010 (r15133)
+++ dists/lenny-security/linux-2.6/debian/patches/series/21lenny3 Tue Feb 9 15:26:36 2010 (r15134)
@@ -5,3 +5,11 @@
+ bugfix/all/connector-delete-buggy-notification-code.patch
+ bugfix/all/fix-potential-crash-with-sys_move_pages.patch
+ bugfix/all/untangle-the-do_mremap-mess-ppc64-fix.patch
++ bugfix/x86/kvm-add-kvm_rw_guest_virt.patch
++ bugfix/x86/kvm-remove-vmap-usage.patch
++ bugfix/x86/kvm-use-kvm_rw_guest_virt-for-segment-descriptors.patch
++ bugfix/x86/kvm-fix-memory-access-during-x86-emulation.patch
++ bugfix/x86/kvm-check-IOPL-level-during-io-instruction-emulation.patch
++ bugfix/x86/kvm-emulator-fix-popf-emulation.patch
++ bugfix/x86/fix-popf-emulation.patch
++ bugfix/x86/check-cpl-level-during-priv-instruction-emulation.patch
More information about the Kernel-svn-changes
mailing list