[linux] 03/09: Add fix for CVE-2016-5412

debian-kernel at lists.debian.org debian-kernel at lists.debian.org
Thu Dec 1 00:23:47 UTC 2016


This is an automated email from the git hooks/post-receive script.

benh pushed a commit to branch jessie-security
in repository linux.

commit b983d3f88d11bcc51bd3196a15fb661176646db4
Author: Ben Hutchings <ben at decadent.org.uk>
Date:   Wed Nov 30 20:31:23 2016 +0000

    Add fix for CVE-2016-5412
---
 debian/changelog                                   |   3 +
 ...l-out-tm-state-save-restore-into-separate.patch | 504 +++++++++++++++++++++
 ...book3s-hv-save-restore-tm-state-in-h_cede.patch |  65 +++
 debian/patches/series                              |   2 +
 4 files changed, 574 insertions(+)

diff --git a/debian/changelog b/debian/changelog
index 24b50c6..377634c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -6,6 +6,9 @@ linux (3.16.36-1+deb8u3) UNRELEASED; urgency=medium
   * fs: Avoid premature clearing of capabilities (CVE-2015-1350)
     (Closes: #770492)
   * [arm64] perf: reject groups spanning multiple HW PMUs (CVE-2015-8955)
+  * [powerpc*] KVM: Book3S HV: Pull out TM state save/restore into separate
+    procedures
+  * [powerpc*] KVM: Book3S HV: Save/restore TM state in H_CEDE (CVE-2016-5412)
 
  -- Ben Hutchings <ben at decadent.org.uk>  Wed, 30 Nov 2016 04:06:41 +0000
 
diff --git a/debian/patches/bugfix/powerpc/kvm-ppc-book3s-hv-pull-out-tm-state-save-restore-into-separate.patch b/debian/patches/bugfix/powerpc/kvm-ppc-book3s-hv-pull-out-tm-state-save-restore-into-separate.patch
new file mode 100644
index 0000000..442b90f
--- /dev/null
+++ b/debian/patches/bugfix/powerpc/kvm-ppc-book3s-hv-pull-out-tm-state-save-restore-into-separate.patch
@@ -0,0 +1,504 @@
+From: Paul Mackerras <paulus at ozlabs.org>
+Date: Wed, 22 Jun 2016 14:21:59 +1000
+Subject: KVM: PPC: Book3S HV: Pull out TM state save/restore into separate
+ procedures
+Origin: https://git.kernel.org/linus/f024ee098476a3e620232e4a78cfac505f121245
+
+This moves the transactional memory state save and restore sequences
+out of the guest entry/exit paths into separate procedures.  This is
+so that these sequences can be used in going into and out of nap
+in a subsequent patch.
+
+The only code changes here are (a) saving and restore LR on the
+stack, since these new procedures get called with a bl instruction,
+(b) explicitly saving r1 into the PACA instead of assuming that
+HSTATE_HOST_R1(r13) is already set, and (c) removing an unnecessary
+and redundant setting of MSR[TM] that should have been removed by
+commit 9d4d0bdd9e0a ("KVM: PPC: Book3S HV: Add transactional memory
+support", 2013-09-24) but wasn't.
+
+Signed-off-by: Paul Mackerras <paulus at ozlabs.org>
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 449 +++++++++++++++++---------------
+ 1 file changed, 237 insertions(+), 212 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -625,112 +625,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
+-	b	skip_tm
+-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+-
+-	/* Turn on TM/FP/VSX/VMX so we can restore them. */
+-	mfmsr	r5
+-	li	r6, MSR_TM >> 32
+-	sldi	r6, r6, 32
+-	or	r5, r5, r6
+-	ori	r5, r5, MSR_FP
+-	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
+-	mtmsrd	r5
+-
+-	/*
+-	 * The user may change these outside of a transaction, so they must
+-	 * always be context switched.
+-	 */
+-	ld	r5, VCPU_TFHAR(r4)
+-	ld	r6, VCPU_TFIAR(r4)
+-	ld	r7, VCPU_TEXASR(r4)
+-	mtspr	SPRN_TFHAR, r5
+-	mtspr	SPRN_TFIAR, r6
+-	mtspr	SPRN_TEXASR, r7
+-
+-	ld	r5, VCPU_MSR(r4)
+-	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+-	beq	skip_tm	/* TM not active in guest */
+-
+-	/* Make sure the failure summary is set, otherwise we'll program check
+-	 * when we trechkpt.  It's possible that this might have been not set
+-	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
+-	 * host.
+-	 */
+-	oris	r7, r7, (TEXASR_FS)@h
+-	mtspr	SPRN_TEXASR, r7
+-
+-	/*
+-	 * We need to load up the checkpointed state for the guest.
+-	 * We need to do this early as it will blow away any GPRs, VSRs and
+-	 * some SPRs.
+-	 */
+-
+-	mr	r31, r4
+-	addi	r3, r31, VCPU_FPRS_TM
+-	bl	load_fp_state
+-	addi	r3, r31, VCPU_VRS_TM
+-	bl	load_vr_state
+-	mr	r4, r31
+-	lwz	r7, VCPU_VRSAVE_TM(r4)
+-	mtspr	SPRN_VRSAVE, r7
+-
+-	ld	r5, VCPU_LR_TM(r4)
+-	lwz	r6, VCPU_CR_TM(r4)
+-	ld	r7, VCPU_CTR_TM(r4)
+-	ld	r8, VCPU_AMR_TM(r4)
+-	ld	r9, VCPU_TAR_TM(r4)
+-	mtlr	r5
+-	mtcr	r6
+-	mtctr	r7
+-	mtspr	SPRN_AMR, r8
+-	mtspr	SPRN_TAR, r9
+-
+-	/*
+-	 * Load up PPR and DSCR values but don't put them in the actual SPRs
+-	 * till the last moment to avoid running with userspace PPR and DSCR for
+-	 * too long.
+-	 */
+-	ld	r29, VCPU_DSCR_TM(r4)
+-	ld	r30, VCPU_PPR_TM(r4)
+-
+-	std	r2, PACATMSCRATCH(r13) /* Save TOC */
+-
+-	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
+-	li	r5, 0
+-	mtmsrd	r5, 1
+-
+-	/* Load GPRs r0-r28 */
+-	reg = 0
+-	.rept	29
+-	ld	reg, VCPU_GPRS_TM(reg)(r31)
+-	reg = reg + 1
+-	.endr
+-
+-	mtspr	SPRN_DSCR, r29
+-	mtspr	SPRN_PPR, r30
+-
+-	/* Load final GPRs */
+-	ld	29, VCPU_GPRS_TM(29)(r31)
+-	ld	30, VCPU_GPRS_TM(30)(r31)
+-	ld	31, VCPU_GPRS_TM(31)(r31)
+-
+-	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
+-	TRECHKPT
+-
+-	/* Now let's get back the state we need. */
+-	HMT_MEDIUM
+-	GET_PACA(r13)
+-	ld	r29, HSTATE_DSCR(r13)
+-	mtspr	SPRN_DSCR, r29
+-	ld	r4, HSTATE_KVM_VCPU(r13)
+-	ld	r1, HSTATE_HOST_R1(r13)
+-	ld	r2, PACATMSCRATCH(r13)
+-
+-	/* Set the MSR RI since we have our registers back. */
+-	li	r5, MSR_RI
+-	mtmsrd	r5, 1
+-skip_tm:
++	bl	kvmppc_restore_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+ 
+ 	/* Load guest PMU registers */
+@@ -821,12 +717,6 @@ BEGIN_FTR_SECTION
+ 	/* Skip next section on POWER7 or PPC970 */
+ 	b	8f
+ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+-	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+-	mfmsr	r8
+-	li	r0, 1
+-	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+-	mtmsrd	r8
+-
+ 	/* Load up POWER8-specific registers */
+ 	ld	r5, VCPU_IAMR(r4)
+ 	lwz	r6, VCPU_PSPB(r4)
+@@ -1347,106 +1237,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
+-	b	2f
+-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+-	/* Turn on TM. */
+-	mfmsr	r8
+-	li	r0, 1
+-	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+-	mtmsrd	r8
+-
+-	ld	r5, VCPU_MSR(r9)
+-	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+-	beq	1f	/* TM not active in guest. */
+-
+-	li	r3, TM_CAUSE_KVM_RESCHED
+-
+-	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
+-	li	r5, 0
+-	mtmsrd	r5, 1
+-
+-	/* All GPRs are volatile at this point. */
+-	TRECLAIM(R3)
+-
+-	/* Temporarily store r13 and r9 so we have some regs to play with */
+-	SET_SCRATCH0(r13)
+-	GET_PACA(r13)
+-	std	r9, PACATMSCRATCH(r13)
+-	ld	r9, HSTATE_KVM_VCPU(r13)
+-
+-	/* Get a few more GPRs free. */
+-	std	r29, VCPU_GPRS_TM(29)(r9)
+-	std	r30, VCPU_GPRS_TM(30)(r9)
+-	std	r31, VCPU_GPRS_TM(31)(r9)
+-
+-	/* Save away PPR and DSCR soon so don't run with user values. */
+-	mfspr	r31, SPRN_PPR
+-	HMT_MEDIUM
+-	mfspr	r30, SPRN_DSCR
+-	ld	r29, HSTATE_DSCR(r13)
+-	mtspr	SPRN_DSCR, r29
+-
+-	/* Save all but r9, r13 & r29-r31 */
+-	reg = 0
+-	.rept	29
+-	.if (reg != 9) && (reg != 13)
+-	std	reg, VCPU_GPRS_TM(reg)(r9)
+-	.endif
+-	reg = reg + 1
+-	.endr
+-	/* ... now save r13 */
+-	GET_SCRATCH0(r4)
+-	std	r4, VCPU_GPRS_TM(13)(r9)
+-	/* ... and save r9 */
+-	ld	r4, PACATMSCRATCH(r13)
+-	std	r4, VCPU_GPRS_TM(9)(r9)
+-
+-	/* Reload stack pointer and TOC. */
+-	ld	r1, HSTATE_HOST_R1(r13)
+-	ld	r2, PACATOC(r13)
+-
+-	/* Set MSR RI now we have r1 and r13 back. */
+-	li	r5, MSR_RI
+-	mtmsrd	r5, 1
+-
+-	/* Save away checkpinted SPRs. */
+-	std	r31, VCPU_PPR_TM(r9)
+-	std	r30, VCPU_DSCR_TM(r9)
+-	mflr	r5
+-	mfcr	r6
+-	mfctr	r7
+-	mfspr	r8, SPRN_AMR
+-	mfspr	r10, SPRN_TAR
+-	std	r5, VCPU_LR_TM(r9)
+-	stw	r6, VCPU_CR_TM(r9)
+-	std	r7, VCPU_CTR_TM(r9)
+-	std	r8, VCPU_AMR_TM(r9)
+-	std	r10, VCPU_TAR_TM(r9)
+-
+-	/* Restore r12 as trap number. */
+-	lwz	r12, VCPU_TRAP(r9)
+-
+-	/* Save FP/VSX. */
+-	addi	r3, r9, VCPU_FPRS_TM
+-	bl	store_fp_state
+-	addi	r3, r9, VCPU_VRS_TM
+-	bl	store_vr_state
+-	mfspr	r6, SPRN_VRSAVE
+-	stw	r6, VCPU_VRSAVE_TM(r9)
+-1:
+-	/*
+-	 * We need to save these SPRs after the treclaim so that the software
+-	 * error code is recorded correctly in the TEXASR.  Also the user may
+-	 * change these outside of a transaction, so they must always be
+-	 * context switched.
+-	 */
+-	mfspr	r5, SPRN_TFHAR
+-	mfspr	r6, SPRN_TFIAR
+-	mfspr	r7, SPRN_TEXASR
+-	std	r5, VCPU_TFHAR(r9)
+-	std	r6, VCPU_TFIAR(r9)
+-	std	r7, VCPU_TEXASR(r9)
+-2:
++	bl	kvmppc_save_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+ 
+ 	/* Increment yield count if they have a VPA */
+@@ -2492,6 +2284,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ 	mr	r4,r31
+ 	blr
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++/*
++ * Save transactional state and TM-related registers.
++ * Called with r9 pointing to the vcpu struct.
++ * This can modify all checkpointed registers, but
++ * restores r1, r2 and r9 (vcpu pointer) before exit.
++ */
++kvmppc_save_tm:
++	mflr	r0
++	std	r0, PPC_LR_STKOFF(r1)
++
++	/* Turn on TM. */
++	mfmsr	r8
++	li	r0, 1
++	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
++	mtmsrd	r8
++
++	ld	r5, VCPU_MSR(r9)
++	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++	beq	1f	/* TM not active in guest. */
++
++	std	r1, HSTATE_HOST_R1(r13)
++	li	r3, TM_CAUSE_KVM_RESCHED
++
++	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
++	li	r5, 0
++	mtmsrd	r5, 1
++
++	/* All GPRs are volatile at this point. */
++	TRECLAIM(R3)
++
++	/* Temporarily store r13 and r9 so we have some regs to play with */
++	SET_SCRATCH0(r13)
++	GET_PACA(r13)
++	std	r9, PACATMSCRATCH(r13)
++	ld	r9, HSTATE_KVM_VCPU(r13)
++
++	/* Get a few more GPRs free. */
++	std	r29, VCPU_GPRS_TM(29)(r9)
++	std	r30, VCPU_GPRS_TM(30)(r9)
++	std	r31, VCPU_GPRS_TM(31)(r9)
++
++	/* Save away PPR and DSCR soon so don't run with user values. */
++	mfspr	r31, SPRN_PPR
++	HMT_MEDIUM
++	mfspr	r30, SPRN_DSCR
++	ld	r29, HSTATE_DSCR(r13)
++	mtspr	SPRN_DSCR, r29
++
++	/* Save all but r9, r13 & r29-r31 */
++	reg = 0
++	.rept	29
++	.if (reg != 9) && (reg != 13)
++	std	reg, VCPU_GPRS_TM(reg)(r9)
++	.endif
++	reg = reg + 1
++	.endr
++	/* ... now save r13 */
++	GET_SCRATCH0(r4)
++	std	r4, VCPU_GPRS_TM(13)(r9)
++	/* ... and save r9 */
++	ld	r4, PACATMSCRATCH(r13)
++	std	r4, VCPU_GPRS_TM(9)(r9)
++
++	/* Reload stack pointer and TOC. */
++	ld	r1, HSTATE_HOST_R1(r13)
++	ld	r2, PACATOC(r13)
++
++	/* Set MSR RI now we have r1 and r13 back. */
++	li	r5, MSR_RI
++	mtmsrd	r5, 1
++
++	/* Save away checkpinted SPRs. */
++	std	r31, VCPU_PPR_TM(r9)
++	std	r30, VCPU_DSCR_TM(r9)
++	mflr	r5
++	mfcr	r6
++	mfctr	r7
++	mfspr	r8, SPRN_AMR
++	mfspr	r10, SPRN_TAR
++	std	r5, VCPU_LR_TM(r9)
++	stw	r6, VCPU_CR_TM(r9)
++	std	r7, VCPU_CTR_TM(r9)
++	std	r8, VCPU_AMR_TM(r9)
++	std	r10, VCPU_TAR_TM(r9)
++
++	/* Restore r12 as trap number. */
++	lwz	r12, VCPU_TRAP(r9)
++
++	/* Save FP/VSX. */
++	addi	r3, r9, VCPU_FPRS_TM
++	bl	store_fp_state
++	addi	r3, r9, VCPU_VRS_TM
++	bl	store_vr_state
++	mfspr	r6, SPRN_VRSAVE
++	stw	r6, VCPU_VRSAVE_TM(r9)
++1:
++	/*
++	 * We need to save these SPRs after the treclaim so that the software
++	 * error code is recorded correctly in the TEXASR.  Also the user may
++	 * change these outside of a transaction, so they must always be
++	 * context switched.
++	 */
++	mfspr	r5, SPRN_TFHAR
++	mfspr	r6, SPRN_TFIAR
++	mfspr	r7, SPRN_TEXASR
++	std	r5, VCPU_TFHAR(r9)
++	std	r6, VCPU_TFIAR(r9)
++	std	r7, VCPU_TEXASR(r9)
++
++	ld	r0, PPC_LR_STKOFF(r1)
++	mtlr	r0
++	blr
++
++/*
++ * Restore transactional state and TM-related registers.
++ * Called with r4 pointing to the vcpu struct.
++ * This potentially modifies all checkpointed registers.
++ * It restores r1, r2, r4 from the PACA.
++ */
++kvmppc_restore_tm:
++	mflr	r0
++	std	r0, PPC_LR_STKOFF(r1)
++
++	/* Turn on TM/FP/VSX/VMX so we can restore them. */
++	mfmsr	r5
++	li	r6, MSR_TM >> 32
++	sldi	r6, r6, 32
++	or	r5, r5, r6
++	ori	r5, r5, MSR_FP
++	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
++	mtmsrd	r5
++
++	/*
++	 * The user may change these outside of a transaction, so they must
++	 * always be context switched.
++	 */
++	ld	r5, VCPU_TFHAR(r4)
++	ld	r6, VCPU_TFIAR(r4)
++	ld	r7, VCPU_TEXASR(r4)
++	mtspr	SPRN_TFHAR, r5
++	mtspr	SPRN_TFIAR, r6
++	mtspr	SPRN_TEXASR, r7
++
++	ld	r5, VCPU_MSR(r4)
++	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++	beqlr		/* TM not active in guest */
++	std	r1, HSTATE_HOST_R1(r13)
++
++	/* Make sure the failure summary is set, otherwise we'll program check
++	 * when we trechkpt.  It's possible that this might have been not set
++	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
++	 * host.
++	 */
++	oris	r7, r7, (TEXASR_FS)@h
++	mtspr	SPRN_TEXASR, r7
++
++	/*
++	 * We need to load up the checkpointed state for the guest.
++	 * We need to do this early as it will blow away any GPRs, VSRs and
++	 * some SPRs.
++	 */
++
++	mr	r31, r4
++	addi	r3, r31, VCPU_FPRS_TM
++	bl	load_fp_state
++	addi	r3, r31, VCPU_VRS_TM
++	bl	load_vr_state
++	mr	r4, r31
++	lwz	r7, VCPU_VRSAVE_TM(r4)
++	mtspr	SPRN_VRSAVE, r7
++
++	ld	r5, VCPU_LR_TM(r4)
++	lwz	r6, VCPU_CR_TM(r4)
++	ld	r7, VCPU_CTR_TM(r4)
++	ld	r8, VCPU_AMR_TM(r4)
++	ld	r9, VCPU_TAR_TM(r4)
++	mtlr	r5
++	mtcr	r6
++	mtctr	r7
++	mtspr	SPRN_AMR, r8
++	mtspr	SPRN_TAR, r9
++
++	/*
++	 * Load up PPR and DSCR values but don't put them in the actual SPRs
++	 * till the last moment to avoid running with userspace PPR and DSCR for
++	 * too long.
++	 */
++	ld	r29, VCPU_DSCR_TM(r4)
++	ld	r30, VCPU_PPR_TM(r4)
++
++	std	r2, PACATMSCRATCH(r13) /* Save TOC */
++
++	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
++	li	r5, 0
++	mtmsrd	r5, 1
++
++	/* Load GPRs r0-r28 */
++	reg = 0
++	.rept	29
++	ld	reg, VCPU_GPRS_TM(reg)(r31)
++	reg = reg + 1
++	.endr
++
++	mtspr	SPRN_DSCR, r29
++	mtspr	SPRN_PPR, r30
++
++	/* Load final GPRs */
++	ld	29, VCPU_GPRS_TM(29)(r31)
++	ld	30, VCPU_GPRS_TM(30)(r31)
++	ld	31, VCPU_GPRS_TM(31)(r31)
++
++	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
++	TRECHKPT
++
++	/* Now let's get back the state we need. */
++	HMT_MEDIUM
++	GET_PACA(r13)
++	ld	r29, HSTATE_DSCR(r13)
++	mtspr	SPRN_DSCR, r29
++	ld	r4, HSTATE_KVM_VCPU(r13)
++	ld	r1, HSTATE_HOST_R1(r13)
++	ld	r2, PACATMSCRATCH(r13)
++
++	/* Set the MSR RI since we have our registers back. */
++	li	r5, MSR_RI
++	mtmsrd	r5, 1
++
++	ld	r0, PPC_LR_STKOFF(r1)
++	mtlr	r0
++	blr
++#endif
++
+ /*
+  * We come here if we get any exception or interrupt while we are
+  * executing host real mode code while in guest MMU context.
diff --git a/debian/patches/bugfix/powerpc/kvm-ppc-book3s-hv-save-restore-tm-state-in-h_cede.patch b/debian/patches/bugfix/powerpc/kvm-ppc-book3s-hv-save-restore-tm-state-in-h_cede.patch
new file mode 100644
index 0000000..2b05cf3
--- /dev/null
+++ b/debian/patches/bugfix/powerpc/kvm-ppc-book3s-hv-save-restore-tm-state-in-h_cede.patch
@@ -0,0 +1,65 @@
+From: Paul Mackerras <paulus at ozlabs.org>
+Date: Wed, 22 Jun 2016 15:52:55 +1000
+Subject: KVM: PPC: Book3S HV: Save/restore TM state in H_CEDE
+Origin: https://git.kernel.org/linus/93d17397e4e2182fdaad503e2f9da46202c0f1c3
+
+It turns out that if the guest does a H_CEDE while the CPU is in
+a transactional state, and the H_CEDE does a nap, and the nap
+loses the architected state of the CPU (which is is allowed to do),
+then we lose the checkpointed state of the virtual CPU.  In addition,
+the transactional-memory state recorded in the MSR gets reset back
+to non-transactional, and when we try to return to the guest, we take
+a TM bad thing type of program interrupt because we are trying to
+transition from non-transactional to transactional with a hrfid
+instruction, which is not permitted.
+
+The result of the program interrupt occurring at that point is that
+the host CPU will hang in an infinite loop with interrupts disabled.
+Thus this is a denial of service vulnerability in the host which can
+be triggered by any guest (and depending on the guest kernel, it can
+potentially triggered by unprivileged userspace in the guest).
+
+This vulnerability has been assigned the ID CVE-2016-5412.
+
+To fix this, we save the TM state before napping and restore it
+on exit from the nap, when handling a H_CEDE in real mode.  The
+case where H_CEDE exits to host virtual mode is already OK (as are
+other hcalls which exit to host virtual mode) because the exit
+path saves the TM state.
+
+Signed-off-by: Paul Mackerras <paulus at ozlabs.org>
+[bwh: Backported to 3.16: adjust context]
+Signed-off-by: Ben Hutchings <ben at decadent.org.uk>
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1965,6 +1965,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
+ 	/* save FP state */
+ 	bl	kvmppc_save_fp
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++BEGIN_FTR_SECTION
++	ld	r9, HSTATE_KVM_VCPU(r13)
++	bl	kvmppc_save_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
++#endif
++
+ 	/*
+ 	 * Take a nap until a decrementer or external or doobell interrupt
+ 	 * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
+@@ -2004,6 +2011,12 @@ kvm_end_cede:
+ 	/* Woken by external or decrementer interrupt */
+ 	ld	r1, HSTATE_HOST_R1(r13)
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++BEGIN_FTR_SECTION
++	bl	kvmppc_restore_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
++#endif
++
+ 	/* load up FP state */
+ 	bl	kvmppc_load_fp
+ 
diff --git a/debian/patches/series b/debian/patches/series
index 16fdf50..68a0b7c 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -687,6 +687,8 @@ bugfix/all/fuse-propagate-dentry-down-to-inode_change_ok.patch
 bugfix/all/fs-give-dentry-to-inode_change_ok-instead-of-inode.patch
 bugfix/all/fs-avoid-premature-clearing-of-capabilities.patch
 bugfix/arm64/arm64-perf-reject-groups-spanning-multiple-hw-pmus.patch
+bugfix/powerpc/kvm-ppc-book3s-hv-pull-out-tm-state-save-restore-into-separate.patch
+bugfix/powerpc/kvm-ppc-book3s-hv-save-restore-tm-state-in-h_cede.patch
 
 # Fix ABI changes
 debian/of-fix-abi-changes.patch

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/kernel/linux.git



More information about the Kernel-svn-changes mailing list