From 41e94a851304f7acac840adec4004f8aeee53ad4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Aug 2015 16:00:35 +0200 Subject: add devm_memremap_pages This behaves like devm_memremap except that it ensures we have page structures available that can back the region. Signed-off-by: Christoph Hellwig [djbw: catch attempts to remap RAM, drop flags] Signed-off-by: Dan Williams --- include/linux/io.h | 20 ++++++++++++++++++++ kernel/memremap.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/include/linux/io.h b/include/linux/io.h index d8d749a..de64c1e 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -20,10 +20,13 @@ #include #include +#include +#include #include #include struct device; +struct resource; __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); void __iowrite64_copy(void __iomem *to, const void *from, size_t count); @@ -84,6 +87,23 @@ void *devm_memremap(struct device *dev, resource_size_t offset, size_t size, unsigned long flags); void devm_memunmap(struct device *dev, void *addr); +void *__devm_memremap_pages(struct device *dev, struct resource *res); + +#ifdef CONFIG_ZONE_DEVICE +void *devm_memremap_pages(struct device *dev, struct resource *res); +#else +static inline void *devm_memremap_pages(struct device *dev, struct resource *res) +{ + /* + * Fail attempts to call devm_memremap_pages() without + * ZONE_DEVICE support enabled, this requires callers to fall + * back to plain devm_memremap() based on config + */ + WARN_ON_ONCE(1); + return ERR_PTR(-ENXIO); +} +#endif + /* * Some systems do not have legacy ISA devices. * /dev/port is not a valid interface on these systems. diff --git a/kernel/memremap.c b/kernel/memremap.c index 5c9b55e..72b0c66 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -14,6 +14,7 @@ #include #include #include +#include #ifndef ioremap_cache /* temporary while we convert existing ioremap_cache users to memremap */ @@ -135,3 +136,55 @@ void devm_memunmap(struct device *dev, void *addr) memunmap(addr); } EXPORT_SYMBOL(devm_memunmap); + +#ifdef CONFIG_ZONE_DEVICE +struct page_map { + struct resource res; +}; + +static void devm_memremap_pages_release(struct device *dev, void *res) +{ + struct page_map *page_map = res; + + /* pages are dead and unused, undo the arch mapping */ + arch_remove_memory(page_map->res.start, resource_size(&page_map->res)); +} + +void *devm_memremap_pages(struct device *dev, struct resource *res) +{ + int is_ram = region_intersects(res->start, resource_size(res), + "System RAM"); + struct page_map *page_map; + int error, nid; + + if (is_ram == REGION_MIXED) { + WARN_ONCE(1, "%s attempted on mixed region %pr\n", + __func__, res); + return ERR_PTR(-ENXIO); + } + + if (is_ram == REGION_INTERSECTS) + return __va(res->start); + + page_map = devres_alloc(devm_memremap_pages_release, + sizeof(*page_map), GFP_KERNEL); + if (!page_map) + return ERR_PTR(-ENOMEM); + + memcpy(&page_map->res, res, sizeof(*res)); + + nid = dev_to_node(dev); + if (nid < 0) + nid = 0; + + error = arch_add_memory(nid, res->start, resource_size(res), true); + if (error) { + devres_free(page_map); + return ERR_PTR(error); + } + + devres_add(dev, page_map); + return __va(res->start); +} +EXPORT_SYMBOL(devm_memremap_pages); +#endif /* CONFIG_ZONE_DEVICE */ -- cgit v1.1