summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2016-04-18 13:24:52 +0200
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-20 09:54:51 +0200
commitfd8d4e3ab6993e194287a59c4d3a6a43da86b8dc (patch)
tree7b604322fdb22cda63e86edf7a81775df321b2e9 /arch/s390/mm
parent5b062bd4940f81e0bd26b0d75f56d7abebf0309f (diff)
downloadop-kernel-dev-fd8d4e3ab6993e194287a59c4d3a6a43da86b8dc.zip
op-kernel-dev-fd8d4e3ab6993e194287a59c4d3a6a43da86b8dc.tar.gz
s390/mm: support EDAT1 for gmap shadows
If the guest is enabled for EDAT1, we can easily create shadows for guest2 -> guest3 provided tables that make use of EDAT1. If guest2 references a 1MB page, this memory looks consecutive for guest2, but it might not be so for us. Therefore we have to create fake page tables. We can easily add that to our existing infrastructure. The invalidation mechanism will make sure that fake page tables are removed when the parent table (sgt table entry) is changed. As EDAT1 also introduced protection on all page table levels, we have to also shadow these correctly. We don't have to care about: - ACCF-Validity Control in STE - Access-Control Bits in STE - Fetch-Protection Bit in STE - Common-Segment Bit in STE As all bits might be dropped and there is no guaranteed that they are active ("unpredictable whether the CPU uses these bits", "may be used"). Without using EDAT1 in the shadow ourselfes (STE-format control == 0), simply shadowing these bits would not be enough. They would be ignored. Please note that we are using the "fake" flag to make this look consistent with further changes (EDAT2, real-space designation support) and don't let the shadow functions handle fc=1 stes. In the future, with huge pages in the host, gmap_shadow_pgt() could simply try to map a huge host page if "fake" is set to one and indicate via return value that no lower fake tables / shadow ptes are required. Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/gmap.c29
1 files changed, 25 insertions, 4 deletions
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index f0b2a53..de7ad7b 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -20,6 +20,8 @@
#include <asm/gmap.h>
#include <asm/tlb.h>
+#define GMAP_SHADOW_FAKE_TABLE 1ULL
+
/**
* gmap_alloc - allocate and initialize a guest address space
* @mm: pointer to the parent mm_struct
@@ -1521,6 +1523,8 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t)
/* mark as invalid as long as the parent table is not protected */
*table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
+ if (sg->edat_level >= 1)
+ *table |= (r2t & _REGION_ENTRY_PROTECT);
list_add(&page->lru, &sg->crst_list);
spin_unlock(&sg->guest_table_lock);
/* Make r2t read-only in parent gmap page table */
@@ -1592,6 +1596,8 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t)
/* mark as invalid as long as the parent table is not protected */
*table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
+ if (sg->edat_level >= 1)
+ *table |= (r3t & _REGION_ENTRY_PROTECT);
list_add(&page->lru, &sg->crst_list);
spin_unlock(&sg->guest_table_lock);
/* Make r3t read-only in parent gmap page table */
@@ -1664,6 +1670,8 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt)
/* mark as invalid as long as the parent table is not protected */
*table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
+ if (sg->edat_level >= 1)
+ *table |= sgt & _REGION_ENTRY_PROTECT;
list_add(&page->lru, &sg->crst_list);
spin_unlock(&sg->guest_table_lock);
/* Make sgt read-only in parent gmap page table */
@@ -1698,6 +1706,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
* @saddr: the address in the shadow aguest address space
* @pgt: parent gmap address of the page table to get shadowed
* @dat_protection: if the pgtable is marked as protected by dat
+ * @fake: pgt references contiguous guest memory block, not a pgtable
*
* Returns 0 if the shadow page table was found and -EAGAIN if the page
* table was not found.
@@ -1705,7 +1714,8 @@ EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
* Called with sg->mm->mmap_sem in read.
*/
int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
- unsigned long *pgt, int *dat_protection)
+ unsigned long *pgt, int *dat_protection,
+ int *fake)
{
unsigned long *table;
struct page *page;
@@ -1717,8 +1727,9 @@ int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
/* Shadow page tables are full pages (pte+pgste) */
page = pfn_to_page(*table >> PAGE_SHIFT);
- *pgt = page->index;
+ *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
+ *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
rc = 0;
} else {
rc = -EAGAIN;
@@ -1734,6 +1745,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
* @pgt: parent gmap address of the page table to get shadowed
+ * @fake: pgt references contiguous guest memory block, not a pgtable
*
* Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
* shadow table structure is incomplete, -ENOMEM if out of memory,
@@ -1741,19 +1753,22 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
*
* Called with gmap->mm->mmap_sem in read
*/
-int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt)
+int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
+ int fake)
{
unsigned long raddr, origin;
unsigned long *s_pgt, *table;
struct page *page;
int rc;
- BUG_ON(!gmap_is_shadow(sg));
+ BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
/* Allocate a shadow page table */
page = page_table_alloc_pgste(sg->mm);
if (!page)
return -ENOMEM;
page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
+ if (fake)
+ page->index |= GMAP_SHADOW_FAKE_TABLE;
s_pgt = (unsigned long *) page_to_phys(page);
/* Install shadow page table */
spin_lock(&sg->guest_table_lock);
@@ -1773,6 +1788,12 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt)
*table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
(pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
list_add(&page->lru, &sg->pt_list);
+ if (fake) {
+ /* nothing to protect for fake tables */
+ *table &= ~_SEGMENT_ENTRY_INVALID;
+ spin_unlock(&sg->guest_table_lock);
+ return 0;
+ }
spin_unlock(&sg->guest_table_lock);
/* Make pgt read-only in parent gmap page table (not the pgste) */
raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
OpenPOWER on IntegriCloud