summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2017-09-07 11:38:37 +0200
committerIngo Molnar <mingo@kernel.org>2017-09-07 11:53:11 +0200
commit21d9bb4a05bac50fb4f850517af4030baecd00f6 (patch)
tree729adf81c36b0c7d2745226f225b6b64fc655eb9 /include
parent1c9fe4409ce3e9c78b1ed96ee8ed699d4f03bf33 (diff)
downloadop-kernel-dev-21d9bb4a05bac50fb4f850517af4030baecd00f6.zip
op-kernel-dev-21d9bb4a05bac50fb4f850517af4030baecd00f6.tar.gz
x86/mm: Make the SME mask a u64
The SME encryption mask is for masking 64-bit pagetable entries. It being an unsigned long works fine on X86_64 but on 32-bit builds in truncates bits leading to Xen guests crashing very early. And regardless, the whole SME mask handling shouldnt've leaked into 32-bit because SME is X86_64-only feature. So, first make the mask u64. And then, add trivial 32-bit versions of the __sme_* macros so that nothing happens there. Reported-and-tested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Tested-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Tom Lendacky <Thomas.Lendacky@amd.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas <Thomas.Lendacky@amd.com> Fixes: 21729f81ce8a ("x86/mm: Provide general kernel support for memory encryption") Link: http://lkml.kernel.org/r/20170907093837.76zojtkgebwtqc74@pd.tnic Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mem_encrypt.h13
1 files changed, 9 insertions, 4 deletions
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 1255f09..265a9cd 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -21,7 +21,7 @@
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
-#define sme_me_mask 0UL
+#define sme_me_mask 0ULL
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
@@ -30,18 +30,23 @@ static inline bool sme_active(void)
return !!sme_me_mask;
}
-static inline unsigned long sme_get_me_mask(void)
+static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;
}
+#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* The __sme_set() and __sme_clr() macros are useful for adding or removing
* the encryption mask from a value (e.g. when dealing with pagetable
* entries).
*/
-#define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
-#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
+#define __sme_set(x) ((x) | sme_me_mask)
+#define __sme_clr(x) ((x) & ~sme_me_mask)
+#else
+#define __sme_set(x) (x)
+#define __sme_clr(x) (x)
+#endif
#endif /* __ASSEMBLY__ */
OpenPOWER on IntegriCloud