summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@infradead.org>2008-01-30 13:34:06 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:34:06 +0100
commit75cbade8ea3127a84e3da7c2c15808e54f0df7e8 (patch)
treeb91fd5e8f9f5f8da0da55a7783c70fc3149cbf2d
parente81d5dc41b67349c06e80658227c9156738f0df1 (diff)
downloadop-kernel-dev-75cbade8ea3127a84e3da7c2c15808e54f0df7e8.zip
op-kernel-dev-75cbade8ea3127a84e3da7c2c15808e54f0df7e8.tar.gz
x86: a new API for drivers/etc to control cache and other page attributes
Right now, if drivers or other code want to change, say, a cache attribute of a page, the only API they have is change_page_attr(). c-p-a is a really bad API for this, because it forces the caller to know *ALL* the attributes he wants for the page, not just the 1 thing he wants to change. So code that wants to set a page uncachable, needs to be aware of the NX status as well etc etc etc. This patch introduces a set of new APIs for this, set_pages_<attr> and set_memory_<attr>, that offer a logical change to the user, and leave all attributes not implied by the requested logical change alone. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/mm/pageattr.c197
-rw-r--r--include/asm-x86/cacheflush.h15
2 files changed, 212 insertions, 0 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4208571..f3c9510b 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -211,6 +211,8 @@ repeat:
* mem_map entry (pfn_valid() is false).
*
* See change_page_attr() documentation for more details.
+ *
+ * Modules and drivers should use the set_memory_* APIs instead.
*/
int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
@@ -273,6 +275,8 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
* (e.g. in user space) * This function only deals with the kernel linear map.
*
* For MMIO areas without mem_map use change_page_attr_addr() instead.
+ *
+ * Modules and drivers should use the set_pages_* APIs instead.
*/
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
@@ -282,6 +286,199 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
}
EXPORT_SYMBOL(change_page_attr);
+/**
+ * change_page_attr_set - Change page table attributes in the linear mapping.
+ * @addr: Virtual address in linear mapping.
+ * @numpages: Number of pages to change
+ * @prot: Protection/caching type bits to set (PAGE_*)
+ *
+ * Returns 0 on success, otherwise a negated errno.
+ *
+ * This should be used when a page is mapped with a different caching policy
+ * than write-back somewhere - some CPUs do not like it when mappings with
+ * different caching policies exist. This changes the page attributes of the
+ * in kernel linear mapping too.
+ *
+ * Caller must call global_flush_tlb() later to make the changes active.
+ *
+ * The caller needs to ensure that there are no conflicting mappings elsewhere
+ * (e.g. in user space) * This function only deals with the kernel linear map.
+ *
+ * This function is different from change_page_attr() in that only selected bits
+ * are impacted, all other bits remain as is.
+ */
+int change_page_attr_set(unsigned long addr, int numpages, pgprot_t prot)
+{
+ pgprot_t current_prot;
+ int level;
+ pte_t *pte;
+
+ pte = lookup_address(addr, &level);
+ if (pte)
+ current_prot = pte_pgprot(*pte);
+ else
+ pgprot_val(current_prot) = 0;
+
+ pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot);
+
+ return change_page_attr_addr(addr, numpages, prot);
+}
+
+/**
+ * change_page_attr_clear - Change page table attributes in the linear mapping.
+ * @addr: Virtual address in linear mapping.
+ * @numpages: Number of pages to change
+ * @prot: Protection/caching type bits to clear (PAGE_*)
+ *
+ * Returns 0 on success, otherwise a negated errno.
+ *
+ * This should be used when a page is mapped with a different caching policy
+ * than write-back somewhere - some CPUs do not like it when mappings with
+ * different caching policies exist. This changes the page attributes of the
+ * in kernel linear mapping too.
+ *
+ * Caller must call global_flush_tlb() later to make the changes active.
+ *
+ * The caller needs to ensure that there are no conflicting mappings elsewhere
+ * (e.g. in user space) * This function only deals with the kernel linear map.
+ *
+ * This function is different from change_page_attr() in that only selected bits
+ * are impacted, all other bits remain as is.
+ */
+int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t prot)
+{
+ pgprot_t current_prot;
+ int level;
+ pte_t *pte;
+
+ pte = lookup_address(addr, &level);
+ if (pte)
+ current_prot = pte_pgprot(*pte);
+ else
+ pgprot_val(current_prot) = 0;
+
+ pgprot_val(prot) = pgprot_val(current_prot) & ~pgprot_val(prot);
+
+ return change_page_attr_addr(addr, numpages, prot);
+}
+
+
+
+int set_memory_uc(unsigned long addr, int numpages)
+{
+ pgprot_t uncached;
+
+ pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
+ return change_page_attr_set(addr, numpages, uncached);
+}
+EXPORT_SYMBOL(set_memory_uc);
+
+int set_memory_wb(unsigned long addr, int numpages)
+{
+ pgprot_t uncached;
+
+ pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
+ return change_page_attr_clear(addr, numpages, uncached);
+}
+EXPORT_SYMBOL(set_memory_wb);
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ pgprot_t nx;
+
+ pgprot_val(nx) = _PAGE_NX;
+ return change_page_attr_clear(addr, numpages, nx);
+}
+EXPORT_SYMBOL(set_memory_x);
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ pgprot_t nx;
+
+ pgprot_val(nx) = _PAGE_NX;
+ return change_page_attr_set(addr, numpages, nx);
+}
+EXPORT_SYMBOL(set_memory_nx);
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ pgprot_t rw;
+
+ pgprot_val(rw) = _PAGE_RW;
+ return change_page_attr_clear(addr, numpages, rw);
+}
+EXPORT_SYMBOL(set_memory_ro);
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ pgprot_t rw;
+
+ pgprot_val(rw) = _PAGE_RW;
+ return change_page_attr_set(addr, numpages, rw);
+}
+EXPORT_SYMBOL(set_memory_rw);
+
+int set_pages_uc(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ pgprot_t uncached;
+
+ pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
+ return change_page_attr_set(addr, numpages, uncached);
+}
+EXPORT_SYMBOL(set_pages_uc);
+
+int set_pages_wb(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ pgprot_t uncached;
+
+ pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
+ return change_page_attr_clear(addr, numpages, uncached);
+}
+EXPORT_SYMBOL(set_pages_wb);
+
+int set_pages_x(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ pgprot_t nx;
+
+ pgprot_val(nx) = _PAGE_NX;
+ return change_page_attr_clear(addr, numpages, nx);
+}
+EXPORT_SYMBOL(set_pages_x);
+
+int set_pages_nx(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ pgprot_t nx;
+
+ pgprot_val(nx) = _PAGE_NX;
+ return change_page_attr_set(addr, numpages, nx);
+}
+EXPORT_SYMBOL(set_pages_nx);
+
+int set_pages_ro(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ pgprot_t rw;
+
+ pgprot_val(rw) = _PAGE_RW;
+ return change_page_attr_clear(addr, numpages, rw);
+}
+EXPORT_SYMBOL(set_pages_ro);
+
+int set_pages_rw(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+ pgprot_t rw;
+
+ pgprot_val(rw) = _PAGE_RW;
+ return change_page_attr_set(addr, numpages, rw);
+}
+EXPORT_SYMBOL(set_pages_rw);
+
+
void clflush_cache_range(void *addr, int size)
{
int i;
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index fccb563..7b928d5 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -27,6 +27,21 @@
void global_flush_tlb(void);
int change_page_attr(struct page *page, int numpages, pgprot_t prot);
int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot);
+
+int set_pages_uc(struct page *page, int numpages);
+int set_pages_wb(struct page *page, int numpages);
+int set_pages_x(struct page *page, int numpages);
+int set_pages_nx(struct page *page, int numpages);
+int set_pages_ro(struct page *page, int numpages);
+int set_pages_rw(struct page *page, int numpages);
+
+int set_memory_uc(unsigned long addr, int numpages);
+int set_memory_wb(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+
void clflush_cache_range(void *addr, int size);
#ifdef CONFIG_DEBUG_RODATA
OpenPOWER on IntegriCloud