summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/pgtable.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2008-02-09 18:24:35 +0100
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-02-09 18:24:40 +0100
commit146e4b3c8b92071b18f0b2e6f47165bad4f9e825 (patch)
tree7e9db61cacca0f55ce34db089f27fc22a56ebbdd /arch/s390/mm/pgtable.c
parent0c1f1dcd8c7792aeff6ef62e9508b0041928ab87 (diff)
downloadop-kernel-dev-146e4b3c8b92071b18f0b2e6f47165bad4f9e825.zip
op-kernel-dev-146e4b3c8b92071b18f0b2e6f47165bad4f9e825.tar.gz
[S390] 1K/2K page table pages.
This patch implements 1K/2K page table pages for s390. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/pgtable.c')
-rw-r--r--arch/s390/mm/pgtable.c102
1 files changed, 81 insertions, 21 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 019f518..809e7789 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -26,8 +26,14 @@
#ifndef CONFIG_64BIT
#define ALLOC_ORDER 1
+#define TABLES_PER_PAGE 4
+#define FRAG_MASK 15UL
+#define SECOND_HALVES 10UL
#else
#define ALLOC_ORDER 2
+#define TABLES_PER_PAGE 2
+#define FRAG_MASK 3UL
+#define SECOND_HALVES 2UL
#endif
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
@@ -45,13 +51,20 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
}
page->index = page_to_phys(shadow);
}
+ spin_lock(&mm->page_table_lock);
+ list_add(&page->lru, &mm->context.crst_list);
+ spin_unlock(&mm->page_table_lock);
return (unsigned long *) page_to_phys(page);
}
-void crst_table_free(unsigned long *table)
+void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
unsigned long *shadow = get_shadow_table(table);
+ struct page *page = virt_to_page(table);
+ spin_lock(&mm->page_table_lock);
+ list_del(&page->lru);
+ spin_unlock(&mm->page_table_lock);
if (shadow)
free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
@@ -60,37 +73,84 @@ void crst_table_free(unsigned long *table)
/*
* page table entry allocation/free routines.
*/
-unsigned long *page_table_alloc(int noexec)
+unsigned long *page_table_alloc(struct mm_struct *mm)
{
- struct page *page = alloc_page(GFP_KERNEL);
+ struct page *page;
unsigned long *table;
+ unsigned long bits;
- if (!page)
- return NULL;
- page->index = 0;
- if (noexec) {
- struct page *shadow = alloc_page(GFP_KERNEL);
- if (!shadow) {
- __free_page(page);
+ bits = mm->context.noexec ? 3UL : 1UL;
+ spin_lock(&mm->page_table_lock);
+ page = NULL;
+ if (!list_empty(&mm->context.pgtable_list)) {
+ page = list_first_entry(&mm->context.pgtable_list,
+ struct page, lru);
+ if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
+ page = NULL;
+ }
+ if (!page) {
+ spin_unlock(&mm->page_table_lock);
+ page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+ if (!page)
return NULL;
- }
- table = (unsigned long *) page_to_phys(shadow);
+ pgtable_page_ctor(page);
+ page->flags &= ~FRAG_MASK;
+ table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
- page->index = (addr_t) table;
+ spin_lock(&mm->page_table_lock);
+ list_add(&page->lru, &mm->context.pgtable_list);
}
- pgtable_page_ctor(page);
table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
+ while (page->flags & bits) {
+ table += 256;
+ bits <<= 1;
+ }
+ page->flags |= bits;
+ if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
+ list_move_tail(&page->lru, &mm->context.pgtable_list);
+ spin_unlock(&mm->page_table_lock);
return table;
}
-void page_table_free(unsigned long *table)
+void page_table_free(struct mm_struct *mm, unsigned long *table)
{
- unsigned long *shadow = get_shadow_pte(table);
+ struct page *page;
+ unsigned long bits;
- pgtable_page_dtor(virt_to_page(table));
- if (shadow)
- free_page((unsigned long) shadow);
- free_page((unsigned long) table);
+ bits = mm->context.noexec ? 3UL : 1UL;
+ bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ spin_lock(&mm->page_table_lock);
+ page->flags ^= bits;
+ if (page->flags & FRAG_MASK) {
+ /* Page now has some free pgtable fragments. */
+ list_move(&page->lru, &mm->context.pgtable_list);
+ page = NULL;
+ } else
+ /* All fragments of the 4K page have been freed. */
+ list_del(&page->lru);
+ spin_unlock(&mm->page_table_lock);
+ if (page) {
+ pgtable_page_dtor(page);
+ __free_page(page);
+ }
+}
+void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
+{
+ struct page *page;
+
+ spin_lock(&mm->page_table_lock);
+ /* Free shadow region and segment tables. */
+ list_for_each_entry(page, &mm->context.crst_list, lru)
+ if (page->index) {
+ free_pages((unsigned long) page->index, ALLOC_ORDER);
+ page->index = 0;
+ }
+ /* "Free" second halves of page tables. */
+ list_for_each_entry(page, &mm->context.pgtable_list, lru)
+ page->flags &= ~SECOND_HALVES;
+ spin_unlock(&mm->page_table_lock);
+ mm->context.noexec = 0;
+ update_mm(mm, tsk);
}
OpenPOWER on IntegriCloud