summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig89
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/bus.c3
-rw-r--r--drivers/base/core.c58
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/base/devres.c35
-rw-r--r--drivers/base/devtmpfs.c6
-rw-r--r--drivers/base/dma-buf.c12
-rw-r--r--drivers/base/dma-coherent.c42
-rw-r--r--drivers/base/dma-contiguous.c401
-rw-r--r--drivers/base/driver.c2
-rw-r--r--drivers/base/power/domain.c176
-rw-r--r--drivers/base/power/domain_governor.c166
-rw-r--r--drivers/base/power/main.c10
-rw-r--r--drivers/base/power/qos.c19
-rw-r--r--drivers/base/power/runtime.c103
-rw-r--r--drivers/base/power/sysfs.c54
-rw-r--r--drivers/base/power/wakeup.c174
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h26
-rw-r--r--drivers/base/regmap/regcache-lzo.c11
-rw-r--r--drivers/base/regmap/regcache-rbtree.c44
-rw-r--r--drivers/base/regmap/regcache.c34
-rw-r--r--drivers/base/regmap/regmap-debugfs.c18
-rw-r--r--drivers/base/regmap/regmap-i2c.c13
-rw-r--r--drivers/base/regmap/regmap-irq.c184
-rw-r--r--drivers/base/regmap/regmap-mmio.c224
-rw-r--r--drivers/base/regmap/regmap-spi.c13
-rw-r--r--drivers/base/regmap/regmap.c276
30 files changed, 1758 insertions, 445 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9aa618a..9b21469 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -192,4 +192,93 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
+config CMA
+ bool "Contiguous Memory Allocator (EXPERIMENTAL)"
+ depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+ select MIGRATION
+ help
+ This enables the Contiguous Memory Allocator which allows drivers
+ to allocate big physically-contiguous blocks of memory for use with
+ hardware components that do not support I/O map nor scatter-gather.
+
+ For more information see <include/linux/dma-contiguous.h>.
+ If unsure, say "n".
+
+if CMA
+
+config CMA_DEBUG
+ bool "CMA debug messages (DEVELOPMENT)"
+ depends on DEBUG_KERNEL
+ help
+ Turns on debug messages in CMA. This produces KERN_DEBUG
+ messages for every CMA call as well as various messages while
+ processing calls such as dma_alloc_from_contiguous().
+ This option does not affect warning and error messages.
+
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+ int "Size in Mega Bytes"
+ depends on !CMA_SIZE_SEL_PERCENTAGE
+ default 16
+ help
+ Defines the size (in MiB) of the default memory area for Contiguous
+ Memory Allocator.
+
+config CMA_SIZE_PERCENTAGE
+ int "Percentage of total memory"
+ depends on !CMA_SIZE_SEL_MBYTES
+ default 10
+ help
+ Defines the size of the default memory area for Contiguous Memory
+ Allocator as a percentage of the total memory in the system.
+
+choice
+ prompt "Selected region size"
+ default CMA_SIZE_SEL_ABSOLUTE
+
+config CMA_SIZE_SEL_MBYTES
+ bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+ bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+ bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+ bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+ range 4 9
+ default 8
+ help
+ DMA mapping framework by default aligns all buffers to the smallest
+ PAGE_SIZE order which is greater than or equal to the requested buffer
+ size. This works well for buffers up to a few hundreds kilobytes, but
+ for larger buffers it just a memory waste. With this parameter you can
+ specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+ buffers will be aligned only to this specified order. The order is
+ expressed as a power of two multiplied by the PAGE_SIZE.
+
+ For example, if your system defaults to 4KiB pages, the order value
+ of 8 means that the buffers will be aligned up to 1MiB only.
+
+ If unsure, leave the default value "8".
+
+config CMA_AREAS
+ int "Maximum count of the CMA device-private areas"
+ default 7
+ help
+ CMA allows to create CMA areas for particular devices. This parameter
+ sets the maximum number of such device private CMA areas in the
+ system.
+
+ If unsure, leave the default value "7".
+
+endif
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b6d1b9c..5aa2d70 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,6 +6,7 @@ obj-y := core.o bus.o dd.o syscore.o \
attribute_container.o transport_class.o \
topology.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
+obj-$(CONFIG_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 26a06b8..2bcef65 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -21,8 +21,7 @@
#include "power/power.h"
/* /sys/devices/system */
-/* FIXME: make static after drivers/base/sys.c is deleted */
-struct kset *system_kset;
+static struct kset *system_kset;
#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index e28ce98..346be8b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -25,6 +25,7 @@
#include <linux/mutex.h>
#include <linux/async.h>
#include <linux/pm_runtime.h>
+#include <linux/netdevice.h>
#include "base.h"
#include "power/power.h"
@@ -65,7 +66,7 @@ static inline int device_is_not_partition(struct device *dev)
* @dev: struct device to get the name of
*
* Will return the device's driver's name if it is bound to a device. If
- * the device is not bound to a device, it will return the name of the bus
+ * the device is not bound to a driver, it will return the name of the bus
* it is attached to. If it is not attached to a bus either, an empty
* string will be returned.
*/
@@ -878,8 +879,8 @@ EXPORT_SYMBOL_GPL(dev_set_name);
* to NULL prevents an entry from being created. class->dev_kobj must
* be set (or cleared) before any devices are registered to the class
* otherwise device_create_sys_dev_entry() and
- * device_remove_sys_dev_entry() will disagree about the the presence
- * of the link.
+ * device_remove_sys_dev_entry() will disagree about the presence of
+ * the link.
*/
static struct kobject *device_to_dev_kobj(struct device *dev)
{
@@ -1843,15 +1844,60 @@ void device_shutdown(void)
*/
#ifdef CONFIG_PRINTK
-
int __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf)
{
+ char dict[128];
+ size_t dictlen = 0;
+ const char *subsys;
+
if (!dev)
return printk("%s(NULL device *): %pV", level, vaf);
- return printk("%s%s %s: %pV",
- level, dev_driver_string(dev), dev_name(dev), vaf);
+ if (dev->class)
+ subsys = dev->class->name;
+ else if (dev->bus)
+ subsys = dev->bus->name;
+ else
+ goto skip;
+
+ dictlen += snprintf(dict + dictlen, sizeof(dict) - dictlen,
+ "SUBSYSTEM=%s", subsys);
+
+ /*
+ * Add device identifier DEVICE=:
+ * b12:8 block dev_t
+ * c127:3 char dev_t
+ * n8 netdev ifindex
+ * +sound:card0 subsystem:devname
+ */
+ if (MAJOR(dev->devt)) {
+ char c;
+
+ if (strcmp(subsys, "block") == 0)
+ c = 'b';
+ else
+ c = 'c';
+ dictlen++;
+ dictlen += snprintf(dict + dictlen, sizeof(dict) - dictlen,
+ "DEVICE=%c%u:%u",
+ c, MAJOR(dev->devt), MINOR(dev->devt));
+ } else if (strcmp(subsys, "net") == 0) {
+ struct net_device *net = to_net_dev(dev);
+
+ dictlen++;
+ dictlen += snprintf(dict + dictlen, sizeof(dict) - dictlen,
+ "DEVICE=n%u", net->ifindex);
+ } else {
+ dictlen++;
+ dictlen += snprintf(dict + dictlen, sizeof(dict) - dictlen,
+ "DEVICE=+%s:%s", subsys, dev_name(dev));
+ }
+skip:
+ return printk_emit(0, level[1] - '0',
+ dictlen ? dict : NULL, dictlen,
+ "%s %s: %pV",
+ dev_driver_string(dev), dev_name(dev), vaf);
}
EXPORT_SYMBOL(__dev_printk);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index adf937b..6345294 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -330,8 +330,4 @@ void __init cpu_dev_init(void)
panic("Failed to register CPU subsystem");
cpu_dev_register_generic();
-
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root);
-#endif
}
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 524bf96..2360adb 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -309,6 +309,10 @@ EXPORT_SYMBOL_GPL(devres_remove);
* which @match returns 1. If @match is NULL, it's considered to
* match all. If found, the resource is removed atomically and freed.
*
+ * Note that the release function for the resource will not be called,
+ * only the devres-allocated data will be freed. The caller becomes
+ * responsible for freeing any other data.
+ *
* RETURNS:
* 0 if devres is found and freed, -ENOENT if not found.
*/
@@ -326,6 +330,37 @@ int devres_destroy(struct device *dev, dr_release_t release,
}
EXPORT_SYMBOL_GPL(devres_destroy);
+
+/**
+ * devres_release - Find a device resource and destroy it, calling release
+ * @dev: Device to find resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev associated with @release and for
+ * which @match returns 1. If @match is NULL, it's considered to
+ * match all. If found, the resource is removed atomically, the
+ * release function called and the resource freed.
+ *
+ * RETURNS:
+ * 0 if devres is found and freed, -ENOENT if not found.
+ */
+int devres_release(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
+{
+ void *res;
+
+ res = devres_remove(dev, release, match, match_data);
+ if (unlikely(!res))
+ return -ENOENT;
+
+ (*release)(dev, res);
+ devres_free(res);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devres_release);
+
static int remove_nodes(struct device *dev,
struct list_head *first, struct list_head *end,
struct list_head *todo)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 8493536..765c3a2 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -7,9 +7,9 @@
* devtmpfs, a tmpfs-based filesystem is created. Every driver-core
* device which requests a device node, will add a node in this
* filesystem.
- * By default, all devices are named after the the name of the
- * device, owned by root and have a default mode of 0600. Subsystems
- * can overwrite the default setting if needed.
+ * By default, all devices are named after the name of the device,
+ * owned by root and have a default mode of 0600. Subsystems can
+ * overwrite the default setting if needed.
*/
#include <linux/kernel.h>
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 20258e1..24e88fe 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
* cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
* preparations. Coherency is only guaranteed in the specified range for the
* specified access direction.
- * @dma_buf: [in] buffer to prepare cpu access for.
+ * @dmabuf: [in] buffer to prepare cpu access for.
* @start: [in] start of range for cpu access.
* @len: [in] length of range for cpu access.
* @direction: [in] length of range for cpu access.
@@ -339,7 +339,7 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
* cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
* actions. Coherency is only guaranteed in the specified range for the
* specified access direction.
- * @dma_buf: [in] buffer to complete cpu access for.
+ * @dmabuf: [in] buffer to complete cpu access for.
* @start: [in] start of range for cpu access.
* @len: [in] length of range for cpu access.
* @direction: [in] length of range for cpu access.
@@ -359,7 +359,7 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
/**
* dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
* space. The same restrictions as for kmap_atomic and friends apply.
- * @dma_buf: [in] buffer to map page from.
+ * @dmabuf: [in] buffer to map page from.
* @page_num: [in] page in PAGE_SIZE units to map.
*
* This call must always succeed, any necessary preparations that might fail
@@ -375,7 +375,7 @@ EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
/**
* dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
- * @dma_buf: [in] buffer to unmap page from.
+ * @dmabuf: [in] buffer to unmap page from.
* @page_num: [in] page in PAGE_SIZE units to unmap.
* @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
*
@@ -394,7 +394,7 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
/**
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
* same restrictions as for kmap and friends apply.
- * @dma_buf: [in] buffer to map page from.
+ * @dmabuf: [in] buffer to map page from.
* @page_num: [in] page in PAGE_SIZE units to map.
*
* This call must always succeed, any necessary preparations that might fail
@@ -410,7 +410,7 @@ EXPORT_SYMBOL_GPL(dma_buf_kmap);
/**
* dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
- * @dma_buf: [in] buffer to unmap page from.
+ * @dmabuf: [in] buffer to unmap page from.
* @page_num: [in] page in PAGE_SIZE units to unmap.
* @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
*
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bb0025c..1b85949 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,6 +10,7 @@
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
+ phys_addr_t pfn_base;
int size;
int flags;
unsigned long *bitmap;
@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
+ dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
return 0;
}
EXPORT_SYMBOL(dma_release_from_coherent);
+
+/**
+ * dma_mmap_from_coherent() - try to mmap the memory allocated from
+ * per-device coherent memory pool to userspace
+ * @dev: device from which the memory was allocated
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_coherent
+ * @size: size of the memory buffer allocated by dma_alloc_from_coherent
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if
+ * dma_release_coherent() should proceed with mapping memory from
+ * generic pools.
+ */
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *vaddr, size_t size, int *ret)
+{
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+ if (mem && vaddr >= mem->virt_base && vaddr + size <=
+ (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ unsigned long off = vma->vm_pgoff;
+ int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+ int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int count = size >> PAGE_SHIFT;
+
+ *ret = -ENXIO;
+ if (off < count && user_count <= count - off) {
+ unsigned pfn = mem->pfn_base + start + off;
+ *ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
new file mode 100644
index 0000000..78efb03
--- /dev/null
+++ b/drivers/base/dma-contiguous.c
@@ -0,0 +1,401 @@
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ * Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+# define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-isolation.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/mm_types.h>
+#include <linux/dma-contiguous.h>
+
+#ifndef SZ_1M
+#define SZ_1M (1 << 20)
+#endif
+
+struct cma {
+ unsigned long base_pfn;
+ unsigned long count;
+ unsigned long *bitmap;
+};
+
+struct cma *dma_contiguous_default_area;
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is usefull mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static long size_cmdline = -1;
+
+static int __init early_cma(char *p)
+{
+ pr_debug("%s(%s)\n", __func__, p);
+ size_cmdline = memparse(p, &p);
+ return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+{
+ struct memblock_region *reg;
+ unsigned long total_pages = 0;
+
+ /*
+ * We cannot use memblock_phys_mem_size() here, because
+ * memblock_analyze() has not been called yet.
+ */
+ for_each_memblock(memory, reg)
+ total_pages += memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+
+ return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+{
+ return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+ unsigned long selected_size = 0;
+
+ pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+ if (size_cmdline != -1) {
+ selected_size = size_cmdline;
+ } else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+ selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+ selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+ selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+ selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+ }
+
+ if (selected_size) {
+ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ selected_size / SZ_1M);
+
+ dma_declare_contiguous(NULL, selected_size, 0, limit);
+ }
+};
+
+static DEFINE_MUTEX(cma_mutex);
+
+static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+{
+ unsigned long pfn = base_pfn;
+ unsigned i = count >> pageblock_order;
+ struct zone *zone;
+
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ zone = page_zone(pfn_to_page(pfn));
+
+ do {
+ unsigned j;
+ base_pfn = pfn;
+ for (j = pageblock_nr_pages; j; --j, pfn++) {
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ if (page_zone(pfn_to_page(pfn)) != zone)
+ return -EINVAL;
+ }
+ init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+ } while (--i);
+ return 0;
+}
+
+static __init struct cma *cma_create_area(unsigned long base_pfn,
+ unsigned long count)
+{
+ int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct cma *cma;
+ int ret = -ENOMEM;
+
+ pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
+
+ cma = kmalloc(sizeof *cma, GFP_KERNEL);
+ if (!cma)
+ return ERR_PTR(-ENOMEM);
+
+ cma->base_pfn = base_pfn;
+ cma->count = count;
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+ if (!cma->bitmap)
+ goto no_mem;
+
+ ret = cma_activate_area(base_pfn, count);
+ if (ret)
+ goto error;
+
+ pr_debug("%s: returned %p\n", __func__, (void *)cma);
+ return cma;
+
+error:
+ kfree(cma->bitmap);
+no_mem:
+ kfree(cma);
+ return ERR_PTR(ret);
+}
+
+static struct cma_reserved {
+ phys_addr_t start;
+ unsigned long size;
+ struct device *dev;
+} cma_reserved[MAX_CMA_AREAS] __initdata;
+static unsigned cma_reserved_count __initdata;
+
+static int __init cma_init_reserved_areas(void)
+{
+ struct cma_reserved *r = cma_reserved;
+ unsigned i = cma_reserved_count;
+
+ pr_debug("%s()\n", __func__);
+
+ for (; i; --i, ++r) {
+ struct cma *cma;
+ cma = cma_create_area(PFN_DOWN(r->start),
+ r->size >> PAGE_SHIFT);
+ if (!IS_ERR(cma))
+ dev_set_cma_area(r->dev, cma);
+ }
+ return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ * for particular device
+ * @dev: Pointer to device structure.
+ * @size: Size of the reserved memory.
+ * @base: Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+ unsigned long alignment;
+
+ pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+ (unsigned long)size, (unsigned long)base,
+ (unsigned long)limit);
+
+ /* Sanity checks */
+ if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+ pr_err("Not enough slots for CMA reserved regions!\n");
+ return -ENOSPC;
+ }
+
+ if (!size)
+ return -EINVAL;
+
+ /* Sanitise input arguments */
+ alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+ base = ALIGN(base, alignment);
+ size = ALIGN(size, alignment);
+ limit &= ~(alignment - 1);
+
+ /* Reserve memory */
+ if (base) {
+ if (memblock_is_region_reserved(base, size) ||
+ memblock_reserve(base, size) < 0) {
+ base = -EBUSY;
+ goto err;
+ }
+ } else {
+ /*
+ * Use __memblock_alloc_base() since
+ * memblock_alloc_base() panic()s.
+ */
+ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+ if (!addr) {
+ base = -ENOMEM;
+ goto err;
+ } else if (addr + size > ~(unsigned long)0) {
+ memblock_free(addr, size);
+ base = -EINVAL;
+ goto err;
+ } else {
+ base = addr;
+ }
+ }
+
+ /*
+ * Each reserved area must be initialised later, when more kernel
+ * subsystems (like slab allocator) are available.
+ */
+ r->start = base;
+ r->size = size;
+ r->dev = dev;
+ cma_reserved_count++;
+ pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+ (unsigned long)base);
+
+ /* Architecture specific contiguous memory fixup. */
+ dma_contiguous_early_fixup(base, size);
+ return 0;
+err:
+ pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+ return base;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev: Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific get_dev_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int align)
+{
+ unsigned long mask, pfn, pageno, start = 0;
+ struct cma *cma = dev_get_cma_area(dev);
+ int ret;
+
+ if (!cma || !cma->count)
+ return NULL;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+ count, align);
+
+ if (!count)
+ return NULL;
+
+ mask = (1 << align) - 1;
+
+ mutex_lock(&cma_mutex);
+
+ for (;;) {
+ pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+ start, count, mask);
+ if (pageno >= cma->count) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ pfn = cma->base_pfn + pageno;
+ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ if (ret == 0) {
+ bitmap_set(cma->bitmap, pageno, count);
+ break;
+ } else if (ret != -EBUSY) {
+ goto error;
+ }
+ pr_debug("%s(): memory range at %p is busy, retrying\n",
+ __func__, pfn_to_page(pfn));
+ /* try again with a bit different memory target */
+ start = pageno + mask + 1;
+ }
+
+ mutex_unlock(&cma_mutex);
+
+ pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
+ return pfn_to_page(pfn);
+error:
+ mutex_unlock(&cma_mutex);
+ return NULL;
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev: Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+{
+ struct cma *cma = dev_get_cma_area(dev);
+ unsigned long pfn;
+
+ if (!cma || !pages)
+ return false;
+
+ pr_debug("%s(page %p)\n", __func__, (void *)pages);
+
+ pfn = page_to_pfn(pages);
+
+ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+ return false;
+
+ VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+
+ mutex_lock(&cma_mutex);
+ bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+ free_contig_range(pfn, count);
+ mutex_unlock(&cma_mutex);
+
+ return true;
+}
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 3ec3896..207c27d 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -80,7 +80,7 @@ struct device *driver_find_device(struct device_driver *drv,
struct klist_iter i;
struct device *dev;
- if (!drv)
+ if (!drv || !drv->p)
return NULL;
klist_iter_init_node(&drv->p->klist_devices, &i,
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 73ce9fb..83aa694 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sched.h>
@@ -38,11 +39,13 @@
ktime_t __start = ktime_get(); \
type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
- struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
- if (__elapsed > __gpd_data->td.field) { \
- __gpd_data->td.field = __elapsed; \
+ struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
+ if (!__retval && __elapsed > __td->field) { \
+ __td->field = __elapsed; \
dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
__elapsed); \
+ genpd->max_off_time_changed = true; \
+ __td->constraint_changed = true; \
} \
__retval; \
})
@@ -211,6 +214,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > genpd->power_on_latency_ns) {
genpd->power_on_latency_ns = elapsed_ns;
+ genpd->max_off_time_changed = true;
if (genpd->name)
pr_warning("%s: Power-on latency exceeded, "
"new value %lld ns\n", genpd->name,
@@ -247,6 +251,53 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_RUNTIME
+static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
+ unsigned long val, void *ptr)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct device *dev;
+
+ gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
+
+ mutex_lock(&gpd_data->lock);
+ dev = gpd_data->base.dev;
+ if (!dev) {
+ mutex_unlock(&gpd_data->lock);
+ return NOTIFY_DONE;
+ }
+ mutex_unlock(&gpd_data->lock);
+
+ for (;;) {
+ struct generic_pm_domain *genpd;
+ struct pm_domain_data *pdd;
+
+ spin_lock_irq(&dev->power.lock);
+
+ pdd = dev->power.subsys_data ?
+ dev->power.subsys_data->domain_data : NULL;
+ if (pdd) {
+ to_gpd_data(pdd)->td.constraint_changed = true;
+ genpd = dev_to_genpd(dev);
+ } else {
+ genpd = ERR_PTR(-ENODATA);
+ }
+
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!IS_ERR(genpd)) {
+ mutex_lock(&genpd->lock);
+ genpd->max_off_time_changed = true;
+ mutex_unlock(&genpd->lock);
+ }
+
+ dev = dev->parent;
+ if (!dev || dev->power.ignore_children)
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
/**
* __pm_genpd_save_device - Save the pre-suspend state of a device.
* @pdd: Domain data of the device to save the state of.
@@ -435,6 +486,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > genpd->power_off_latency_ns) {
genpd->power_off_latency_ns = elapsed_ns;
+ genpd->max_off_time_changed = true;
if (genpd->name)
pr_warning("%s: Power-off latency exceeded, "
"new value %lld ns\n", genpd->name,
@@ -443,17 +495,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
genpd->status = GPD_STATE_POWER_OFF;
- genpd->power_off_time = ktime_get();
-
- /* Update PM QoS information for devices in the domain. */
- list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
- struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
-
- pm_runtime_update_max_time_suspended(pdd->dev,
- td->start_latency_ns +
- td->restore_state_latency_ns +
- genpd->power_on_latency_ns);
- }
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
@@ -514,9 +555,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (ret)
return ret;
- pm_runtime_update_max_time_suspended(dev,
- dev_gpd_data(dev)->td.start_latency_ns);
-
/*
* If power.irq_safe is set, this routine will be run with interrupts
* off, so it can't use mutexes.
@@ -613,6 +651,12 @@ void pm_genpd_poweroff_unused(void)
#else
+static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
+ unsigned long val, void *ptr)
+{
+ return NOTIFY_DONE;
+}
+
static inline void genpd_power_off_work_fn(struct work_struct *work) {}
#define pm_genpd_runtime_suspend NULL
@@ -1209,12 +1253,15 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- genpd_acquire_lock(genpd);
+ gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+ if (!gpd_data)
+ return -ENOMEM;
- if (genpd->status == GPD_STATE_POWER_OFF) {
- ret = -EINVAL;
- goto out;
- }
+ mutex_init(&gpd_data->lock);
+ gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+
+ genpd_acquire_lock(genpd);
if (genpd->prepared_count > 0) {
ret = -EAGAIN;
@@ -1227,26 +1274,35 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
goto out;
}
- gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
- if (!gpd_data) {
- ret = -ENOMEM;
- goto out;
- }
-
genpd->device_count++;
+ genpd->max_off_time_changed = true;
- dev->pm_domain = &genpd->domain;
dev_pm_get_subsys_data(dev);
+
+ mutex_lock(&gpd_data->lock);
+ spin_lock_irq(&dev->power.lock);
+ dev->pm_domain = &genpd->domain;
dev->power.subsys_data->domain_data = &gpd_data->base;
gpd_data->base.dev = dev;
- gpd_data->need_restore = false;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+ gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
if (td)
gpd_data->td = *td;
+ gpd_data->td.constraint_changed = true;
+ gpd_data->td.effective_constraint_ns = -1;
+ spin_unlock_irq(&dev->power.lock);
+ mutex_unlock(&gpd_data->lock);
+
+ genpd_release_lock(genpd);
+
+ return 0;
+
out:
genpd_release_lock(genpd);
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+ kfree(gpd_data);
return ret;
}
@@ -1290,12 +1346,15 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
struct device *dev)
{
+ struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
- int ret = -EINVAL;
+ int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
+ || IS_ERR_OR_NULL(dev->pm_domain)
+ || pd_to_genpd(dev->pm_domain) != genpd)
return -EINVAL;
genpd_acquire_lock(genpd);
@@ -1305,21 +1364,27 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- if (pdd->dev != dev)
- continue;
+ genpd->device_count--;
+ genpd->max_off_time_changed = true;
- list_del_init(&pdd->list_node);
- pdd->dev = NULL;
- dev_pm_put_subsys_data(dev);
- dev->pm_domain = NULL;
- kfree(to_gpd_data(pdd));
+ spin_lock_irq(&dev->power.lock);
+ dev->pm_domain = NULL;
+ pdd = dev->power.subsys_data->domain_data;
+ list_del_init(&pdd->list_node);
+ dev->power.subsys_data->domain_data = NULL;
+ spin_unlock_irq(&dev->power.lock);
- genpd->device_count--;
+ gpd_data = to_gpd_data(pdd);
+ mutex_lock(&gpd_data->lock);
+ pdd->dev = NULL;
+ mutex_unlock(&gpd_data->lock);
- ret = 0;
- break;
- }
+ genpd_release_lock(genpd);
+
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+ kfree(gpd_data);
+ dev_pm_put_subsys_data(dev);
+ return 0;
out:
genpd_release_lock(genpd);
@@ -1348,6 +1413,26 @@ void pm_genpd_dev_always_on(struct device *dev, bool val)
EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
/**
+ * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
+ * @dev: Device to set/unset the flag for.
+ * @val: The new value of the device's "need restore" flag.
+ */
+void pm_genpd_dev_need_restore(struct device *dev, bool val)
+{
+ struct pm_subsys_data *psd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ psd = dev_to_psd(dev);
+ if (psd && psd->domain_data)
+ to_gpd_data(psd->domain_data)->need_restore = val;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
+
+/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
* @genpd: Master PM domain to add the subdomain to.
* @subdomain: Subdomain to be added.
@@ -1378,7 +1463,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
goto out;
}
- list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ list_for_each_entry(link, &genpd->master_links, master_node) {
if (link->slave == subdomain && link->master == genpd) {
ret = -EINVAL;
goto out;
@@ -1690,6 +1775,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->resume_count = 0;
genpd->device_count = 0;
genpd->max_off_time_ns = -1;
+ genpd->max_off_time_changed = true;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 66a265b..28dee30 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -14,6 +14,31 @@
#ifdef CONFIG_PM_RUNTIME
+static int dev_update_qos_constraint(struct device *dev, void *data)
+{
+ s64 *constraint_ns_p = data;
+ s32 constraint_ns = -1;
+
+ if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
+ constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
+
+ if (constraint_ns < 0) {
+ constraint_ns = dev_pm_qos_read_value(dev);
+ constraint_ns *= NSEC_PER_USEC;
+ }
+ if (constraint_ns == 0)
+ return 0;
+
+ /*
+ * constraint_ns cannot be negative here, because the device has been
+ * suspended.
+ */
+ if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
+ *constraint_ns_p = constraint_ns;
+
+ return 0;
+}
+
/**
* default_stop_ok - Default PM domain governor routine for stopping devices.
* @dev: Device to check.
@@ -21,14 +46,52 @@
bool default_stop_ok(struct device *dev)
{
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ unsigned long flags;
+ s64 constraint_ns;
dev_dbg(dev, "%s()\n", __func__);
- if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
- return true;
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (!td->constraint_changed) {
+ bool ret = td->cached_stop_ok;
- return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
- && td->break_even_ns < dev->power.max_time_suspended_ns;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return ret;
+ }
+ td->constraint_changed = false;
+ td->cached_stop_ok = false;
+ td->effective_constraint_ns = -1;
+ constraint_ns = __dev_pm_qos_read_value(dev);
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (constraint_ns < 0)
+ return false;
+
+ constraint_ns *= NSEC_PER_USEC;
+ /*
+ * We can walk the children without any additional locking, because
+ * they all have been suspended at this point and their
+ * effective_constraint_ns fields won't be modified in parallel with us.
+ */
+ if (!dev->power.ignore_children)
+ device_for_each_child(dev, &constraint_ns,
+ dev_update_qos_constraint);
+
+ if (constraint_ns > 0) {
+ constraint_ns -= td->start_latency_ns;
+ if (constraint_ns == 0)
+ return false;
+ }
+ td->effective_constraint_ns = constraint_ns;
+ td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
+ constraint_ns == 0;
+ /*
+ * The children have been suspended already, so we don't need to take
+ * their stop latencies into account here.
+ */
+ return td->cached_stop_ok;
}
/**
@@ -42,9 +105,27 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct gpd_link *link;
struct pm_domain_data *pdd;
- s64 min_dev_off_time_ns;
+ s64 min_off_time_ns;
s64 off_on_time_ns;
- ktime_t time_now = ktime_get();
+
+ if (genpd->max_off_time_changed) {
+ struct gpd_link *link;
+
+ /*
+ * We have to invalidate the cached results for the masters, so
+ * use the observation that default_power_down_ok() is not
+ * going to be called for any master until this instance
+ * returns.
+ */
+ list_for_each_entry(link, &genpd->slave_links, slave_node)
+ link->master->max_off_time_changed = true;
+
+ genpd->max_off_time_changed = false;
+ genpd->cached_power_down_ok = false;
+ genpd->max_off_time_ns = -1;
+ } else {
+ return genpd->cached_power_down_ok;
+ }
off_on_time_ns = genpd->power_off_latency_ns +
genpd->power_on_latency_ns;
@@ -61,6 +142,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
to_gpd_data(pdd)->td.save_state_latency_ns;
}
+ min_off_time_ns = -1;
/*
* Check if subdomains can be off for enough time.
*
@@ -73,8 +155,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
if (sd_max_off_ns < 0)
continue;
- sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
- sd->power_off_time));
/*
* Check if the subdomain is allowed to be off long enough for
* the current domain to turn off and on (that's how much time
@@ -82,60 +162,64 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
*/
if (sd_max_off_ns <= off_on_time_ns)
return false;
+
+ if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
+ min_off_time_ns = sd_max_off_ns;
}
/*
* Check if the devices in the domain can be off enough time.
*/
- min_dev_off_time_ns = -1;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
struct gpd_timing_data *td;
- struct device *dev = pdd->dev;
- s64 dev_off_time_ns;
+ s64 constraint_ns;
- if (!dev->driver || dev->power.max_time_suspended_ns < 0)
+ if (!pdd->dev->driver)
continue;
+ /*
+ * Check if the device is allowed to be off long enough for the
+ * domain to turn off and on (that's how much time it will
+ * have to wait worst case).
+ */
td = &to_gpd_data(pdd)->td;
- dev_off_time_ns = dev->power.max_time_suspended_ns -
- (td->start_latency_ns + td->restore_state_latency_ns +
- ktime_to_ns(ktime_sub(time_now,
- dev->power.suspend_time)));
- if (dev_off_time_ns <= off_on_time_ns)
- return false;
-
- if (min_dev_off_time_ns > dev_off_time_ns
- || min_dev_off_time_ns < 0)
- min_dev_off_time_ns = dev_off_time_ns;
- }
+ constraint_ns = td->effective_constraint_ns;
+ /* default_stop_ok() need not be called before us. */
+ if (constraint_ns < 0) {
+ constraint_ns = dev_pm_qos_read_value(pdd->dev);
+ constraint_ns *= NSEC_PER_USEC;
+ }
+ if (constraint_ns == 0)
+ continue;
- if (min_dev_off_time_ns < 0) {
/*
- * There are no latency constraints, so the domain can spend
- * arbitrary time in the "off" state.
+ * constraint_ns cannot be negative here, because the device has
+ * been suspended.
*/
- genpd->max_off_time_ns = -1;
- return true;
+ constraint_ns -= td->restore_state_latency_ns;
+ if (constraint_ns <= off_on_time_ns)
+ return false;
+
+ if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
+ min_off_time_ns = constraint_ns;
}
+ genpd->cached_power_down_ok = true;
+
/*
- * The difference between the computed minimum delta and the time needed
- * to turn the domain on is the maximum theoretical time this domain can
- * spend in the "off" state.
+ * If the computed minimum device off time is negative, there are no
+ * latency constraints, so the domain can spend arbitrary time in the
+ * "off" state.
*/
- min_dev_off_time_ns -= genpd->power_on_latency_ns;
+ if (min_off_time_ns < 0)
+ return true;
/*
- * If the difference between the computed minimum delta and the time
- * needed to turn the domain off and back on on is smaller than the
- * domain's power break even time, removing power from the domain is not
- * worth it.
+ * The difference between the computed minimum subdomain or device off
+ * time and the time needed to turn the domain on is the maximum
+ * theoretical time this domain can spend in the "off" state.
*/
- if (genpd->break_even_ns >
- min_dev_off_time_ns - genpd->power_off_latency_ns)
- return false;
-
- genpd->max_off_time_ns = min_dev_off_time_ns;
+ genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns;
return true;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index b462c0e..e0fb5b0 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -889,6 +889,11 @@ static int dpm_suspend_noirq(pm_message_t state)
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_noirq_list);
put_device(dev);
+
+ if (pm_wakeup_pending()) {
+ error = -EBUSY;
+ break;
+ }
}
mutex_unlock(&dpm_list_mtx);
if (error)
@@ -962,6 +967,11 @@ static int dpm_suspend_late(pm_message_t state)
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
+
+ if (pm_wakeup_pending()) {
+ error = -EBUSY;
+ break;
+ }
}
mutex_unlock(&dpm_list_mtx);
if (error)
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 7185557..fd849a2 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -352,21 +352,26 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
*
* Will register the notifier into a notification chain that gets called
* upon changes to the target value for the device.
+ *
+ * If the device's constraints object doesn't exist when this routine is called,
+ * it will be created (or error code will be returned if that fails).
*/
int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
{
- int retval = 0;
+ int ret = 0;
mutex_lock(&dev_pm_qos_mtx);
- /* Silently return if the constraints object is not present. */
- if (dev->power.constraints)
- retval = blocking_notifier_chain_register(
- dev->power.constraints->notifiers,
- notifier);
+ if (!dev->power.constraints)
+ ret = dev->power.power_state.event != PM_EVENT_INVALID ?
+ dev_pm_qos_constraints_allocate(dev) : -ENODEV;
+
+ if (!ret)
+ ret = blocking_notifier_chain_register(
+ dev->power.constraints->notifiers, notifier);
mutex_unlock(&dev_pm_qos_mtx);
- return retval;
+ return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index bd0f394..5989487 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -282,47 +282,6 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
return retval != -EACCES ? retval : -EIO;
}
-struct rpm_qos_data {
- ktime_t time_now;
- s64 constraint_ns;
-};
-
-/**
- * rpm_update_qos_constraint - Update a given PM QoS constraint data.
- * @dev: Device whose timing data to use.
- * @data: PM QoS constraint data to update.
- *
- * Use the suspend timing data of @dev to update PM QoS constraint data pointed
- * to by @data.
- */
-static int rpm_update_qos_constraint(struct device *dev, void *data)
-{
- struct rpm_qos_data *qos = data;
- unsigned long flags;
- s64 delta_ns;
- int ret = 0;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- if (dev->power.max_time_suspended_ns < 0)
- goto out;
-
- delta_ns = dev->power.max_time_suspended_ns -
- ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
- if (delta_ns <= 0) {
- ret = -EBUSY;
- goto out;
- }
-
- if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
- qos->constraint_ns = delta_ns;
-
- out:
- spin_unlock_irqrestore(&dev->power.lock, flags);
-
- return ret;
-}
-
/**
* rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
@@ -349,7 +308,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
struct device *parent = NULL;
- struct rpm_qos_data qos;
int retval;
trace_rpm_suspend(dev, rpmflags);
@@ -445,38 +403,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
- qos.constraint_ns = __dev_pm_qos_read_value(dev);
- if (qos.constraint_ns < 0) {
- /* Negative constraint means "never suspend". */
+ if (__dev_pm_qos_read_value(dev) < 0) {
+ /* Negative PM QoS constraint means "never suspend". */
retval = -EPERM;
goto out;
}
- qos.constraint_ns *= NSEC_PER_USEC;
- qos.time_now = ktime_get();
__update_runtime_status(dev, RPM_SUSPENDING);
- if (!dev->power.ignore_children) {
- if (dev->power.irq_safe)
- spin_unlock(&dev->power.lock);
- else
- spin_unlock_irq(&dev->power.lock);
-
- retval = device_for_each_child(dev, &qos,
- rpm_update_qos_constraint);
-
- if (dev->power.irq_safe)
- spin_lock(&dev->power.lock);
- else
- spin_lock_irq(&dev->power.lock);
-
- if (retval)
- goto fail;
- }
-
- dev->power.suspend_time = qos.time_now;
- dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
-
if (dev->pm_domain)
callback = dev->pm_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
@@ -529,8 +463,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
fail:
__update_runtime_status(dev, RPM_ACTIVE);
- dev->power.suspend_time = ktime_set(0, 0);
- dev->power.max_time_suspended_ns = -1;
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@@ -704,9 +636,6 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
- dev->power.suspend_time = ktime_set(0, 0);
- dev->power.max_time_suspended_ns = -1;
-
__update_runtime_status(dev, RPM_RESUMING);
if (dev->pm_domain)
@@ -1369,9 +1298,6 @@ void pm_runtime_init(struct device *dev)
setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
(unsigned long)dev);
- dev->power.suspend_time = ktime_set(0, 0);
- dev->power.max_time_suspended_ns = -1;
-
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1389,28 +1315,3 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put_sync(dev->parent);
}
-
-/**
- * pm_runtime_update_max_time_suspended - Update device's suspend time data.
- * @dev: Device to handle.
- * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
- *
- * Update the device's power.max_time_suspended_ns field by subtracting
- * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
- * never negative.
- */
-void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
- if (dev->power.max_time_suspended_ns > delta_ns)
- dev->power.max_time_suspended_ns -= delta_ns;
- else
- dev->power.max_time_suspended_ns = 0;
- }
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 95c12f6..48be2ad 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -314,22 +314,41 @@ static ssize_t wakeup_active_count_show(struct device *dev,
static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
-static ssize_t wakeup_hit_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_abort_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long count = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->wakeup_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL);
+
+static ssize_t wakeup_expire_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
- count = dev->power.wakeup->hit_count;
+ count = dev->power.wakeup->expire_count;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL);
+static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL);
static ssize_t wakeup_active_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -398,6 +417,27 @@ static ssize_t wakeup_last_time_show(struct device *dev,
}
static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
+
+#ifdef CONFIG_PM_AUTOSLEEP
+static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s64 msec = 0;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+ return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
+ wakeup_prevent_sleep_time_show, NULL);
+#endif /* CONFIG_PM_AUTOSLEEP */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_ADVANCED_DEBUG
@@ -486,11 +526,15 @@ static struct attribute *wakeup_attrs[] = {
&dev_attr_wakeup.attr,
&dev_attr_wakeup_count.attr,
&dev_attr_wakeup_active_count.attr,
- &dev_attr_wakeup_hit_count.attr,
+ &dev_attr_wakeup_abort_count.attr,
+ &dev_attr_wakeup_expire_count.attr,
&dev_attr_wakeup_active.attr,
&dev_attr_wakeup_total_time_ms.attr,
&dev_attr_wakeup_max_time_ms.attr,
&dev_attr_wakeup_last_time_ms.attr,
+#ifdef CONFIG_PM_AUTOSLEEP
+ &dev_attr_wakeup_prevent_sleep_time_ms.attr,
+#endif
#endif
NULL,
};
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 2a3e581..cbb463b 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -14,16 +14,15 @@
#include <linux/suspend.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <trace/events/power.h>
#include "power.h"
-#define TIMEOUT 100
-
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
*/
-bool events_check_enabled;
+bool events_check_enabled __read_mostly;
/*
* Combined counters of registered wakeup events and wakeup events in progress.
@@ -52,6 +51,8 @@ static void pm_wakeup_timer_fn(unsigned long data);
static LIST_HEAD(wakeup_sources);
+static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
+
/**
* wakeup_source_prepare - Prepare a new wakeup source for initialization.
* @ws: Wakeup source to prepare.
@@ -132,6 +133,7 @@ void wakeup_source_add(struct wakeup_source *ws)
spin_lock_init(&ws->lock);
setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
ws->active = false;
+ ws->last_time = ktime_get();
spin_lock_irq(&events_lock);
list_add_rcu(&ws->entry, &wakeup_sources);
@@ -374,12 +376,33 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
*/
static void wakeup_source_activate(struct wakeup_source *ws)
{
+ unsigned int cec;
+
ws->active = true;
ws->active_count++;
ws->last_time = ktime_get();
+ if (ws->autosleep_enabled)
+ ws->start_prevent_time = ws->last_time;
/* Increment the counter of events in progress. */
- atomic_inc(&combined_event_count);
+ cec = atomic_inc_return(&combined_event_count);
+
+ trace_wakeup_source_activate(ws->name, cec);
+}
+
+/**
+ * wakeup_source_report_event - Report wakeup event using the given source.
+ * @ws: Wakeup source to report the event for.
+ */
+static void wakeup_source_report_event(struct wakeup_source *ws)
+{
+ ws->event_count++;
+ /* This is racy, but the counter is approximate anyway. */
+ if (events_check_enabled)
+ ws->wakeup_count++;
+
+ if (!ws->active)
+ wakeup_source_activate(ws);
}
/**
@@ -397,10 +420,7 @@ void __pm_stay_awake(struct wakeup_source *ws)
spin_lock_irqsave(&ws->lock, flags);
- ws->event_count++;
- if (!ws->active)
- wakeup_source_activate(ws);
-
+ wakeup_source_report_event(ws);
del_timer(&ws->timer);
ws->timer_expires = 0;
@@ -432,6 +452,17 @@ void pm_stay_awake(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_stay_awake);
+#ifdef CONFIG_PM_AUTOSLEEP
+static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
+{
+ ktime_t delta = ktime_sub(now, ws->start_prevent_time);
+ ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
+}
+#else
+static inline void update_prevent_sleep_time(struct wakeup_source *ws,
+ ktime_t now) {}
+#endif
+
/**
* wakup_source_deactivate - Mark given wakeup source as inactive.
* @ws: Wakeup source to handle.
@@ -442,6 +473,7 @@ EXPORT_SYMBOL_GPL(pm_stay_awake);
*/
static void wakeup_source_deactivate(struct wakeup_source *ws)
{
+ unsigned int cnt, inpr, cec;
ktime_t duration;
ktime_t now;
@@ -468,14 +500,23 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
ws->max_time = duration;
+ ws->last_time = now;
del_timer(&ws->timer);
ws->timer_expires = 0;
+ if (ws->autosleep_enabled)
+ update_prevent_sleep_time(ws, now);
+
/*
* Increment the counter of registered wakeup events and decrement the
* couter of wakeup events in progress simultaneously.
*/
- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
+ cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
+ trace_wakeup_source_deactivate(ws->name, cec);
+
+ split_counters(&cnt, &inpr);
+ if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
+ wake_up(&wakeup_count_wait_queue);
}
/**
@@ -536,8 +577,10 @@ static void pm_wakeup_timer_fn(unsigned long data)
spin_lock_irqsave(&ws->lock, flags);
if (ws->active && ws->timer_expires
- && time_after_eq(jiffies, ws->timer_expires))
+ && time_after_eq(jiffies, ws->timer_expires)) {
wakeup_source_deactivate(ws);
+ ws->expire_count++;
+ }
spin_unlock_irqrestore(&ws->lock, flags);
}
@@ -564,9 +607,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
spin_lock_irqsave(&ws->lock, flags);
- ws->event_count++;
- if (!ws->active)
- wakeup_source_activate(ws);
+ wakeup_source_report_event(ws);
if (!msec) {
wakeup_source_deactivate(ws);
@@ -609,24 +650,6 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
EXPORT_SYMBOL_GPL(pm_wakeup_event);
/**
- * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources.
- */
-static void pm_wakeup_update_hit_counts(void)
-{
- unsigned long flags;
- struct wakeup_source *ws;
-
- rcu_read_lock();
- list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
- spin_lock_irqsave(&ws->lock, flags);
- if (ws->active)
- ws->hit_count++;
- spin_unlock_irqrestore(&ws->lock, flags);
- }
- rcu_read_unlock();
-}
-
-/**
* pm_wakeup_pending - Check if power transition in progress should be aborted.
*
* Compare the current number of registered wakeup events with its preserved
@@ -648,32 +671,38 @@ bool pm_wakeup_pending(void)
events_check_enabled = !ret;
}
spin_unlock_irqrestore(&events_lock, flags);
- if (ret)
- pm_wakeup_update_hit_counts();
return ret;
}
/**
* pm_get_wakeup_count - Read the number of registered wakeup events.
* @count: Address to store the value at.
+ * @block: Whether or not to block.
*
- * Store the number of registered wakeup events at the address in @count. Block
- * if the current number of wakeup events being processed is nonzero.
+ * Store the number of registered wakeup events at the address in @count. If
+ * @block is set, block until the current number of wakeup events being
+ * processed is zero.
*
- * Return 'false' if the wait for the number of wakeup events being processed to
- * drop down to zero has been interrupted by a signal (and the current number
- * of wakeup events being processed is still nonzero). Otherwise return 'true'.
+ * Return 'false' if the current number of wakeup events being processed is
+ * nonzero. Otherwise return 'true'.
*/
-bool pm_get_wakeup_count(unsigned int *count)
+bool pm_get_wakeup_count(unsigned int *count, bool block)
{
unsigned int cnt, inpr;
- for (;;) {
- split_counters(&cnt, &inpr);
- if (inpr == 0 || signal_pending(current))
- break;
- pm_wakeup_update_hit_counts();
- schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
+ if (block) {
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait(&wakeup_count_wait_queue, &wait,
+ TASK_INTERRUPTIBLE);
+ split_counters(&cnt, &inpr);
+ if (inpr == 0 || signal_pending(current))
+ break;
+
+ schedule();
+ }
+ finish_wait(&wakeup_count_wait_queue, &wait);
}
split_counters(&cnt, &inpr);
@@ -703,11 +732,37 @@ bool pm_save_wakeup_count(unsigned int count)
events_check_enabled = true;
}
spin_unlock_irq(&events_lock);
- if (!events_check_enabled)
- pm_wakeup_update_hit_counts();
return events_check_enabled;
}
+#ifdef CONFIG_PM_AUTOSLEEP
+/**
+ * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
+ * @enabled: Whether to set or to clear the autosleep_enabled flags.
+ */
+void pm_wakep_autosleep_enabled(bool set)
+{
+ struct wakeup_source *ws;
+ ktime_t now = ktime_get();
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ spin_lock_irq(&ws->lock);
+ if (ws->autosleep_enabled != set) {
+ ws->autosleep_enabled = set;
+ if (ws->active) {
+ if (set)
+ ws->start_prevent_time = now;
+ else
+ update_prevent_sleep_time(ws, now);
+ }
+ }
+ spin_unlock_irq(&ws->lock);
+ }
+ rcu_read_unlock();
+}
+#endif /* CONFIG_PM_AUTOSLEEP */
+
static struct dentry *wakeup_sources_stats_dentry;
/**
@@ -723,27 +778,37 @@ static int print_wakeup_source_stats(struct seq_file *m,
ktime_t max_time;
unsigned long active_count;
ktime_t active_time;
+ ktime_t prevent_sleep_time;
int ret;
spin_lock_irqsave(&ws->lock, flags);
total_time = ws->total_time;
max_time = ws->max_time;
+ prevent_sleep_time = ws->prevent_sleep_time;
active_count = ws->active_count;
if (ws->active) {
- active_time = ktime_sub(ktime_get(), ws->last_time);
+ ktime_t now = ktime_get();
+
+ active_time = ktime_sub(now, ws->last_time);
total_time = ktime_add(total_time, active_time);
if (active_time.tv64 > max_time.tv64)
max_time = active_time;
+
+ if (ws->autosleep_enabled)
+ prevent_sleep_time = ktime_add(prevent_sleep_time,
+ ktime_sub(now, ws->start_prevent_time));
} else {
active_time = ktime_set(0, 0);
}
- ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t"
- "%lld\t\t%lld\t\t%lld\t\t%lld\n",
- ws->name, active_count, ws->event_count, ws->hit_count,
+ ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t"
+ "%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+ ws->name, active_count, ws->event_count,
+ ws->wakeup_count, ws->expire_count,
ktime_to_ms(active_time), ktime_to_ms(total_time),
- ktime_to_ms(max_time), ktime_to_ms(ws->last_time));
+ ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
+ ktime_to_ms(prevent_sleep_time));
spin_unlock_irqrestore(&ws->lock, flags);
@@ -758,8 +823,9 @@ static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
{
struct wakeup_source *ws;
- seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t"
- "active_since\ttotal_time\tmax_time\tlast_change\n");
+ seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
+ "expire_count\tactive_since\ttotal_time\tmax_time\t"
+ "last_change\tprevent_suspend_time\n");
rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 0f6c7fb..6be390b 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,6 +6,7 @@ config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI)
select LZO_COMPRESS
select LZO_DECOMPRESS
+ select IRQ_DOMAIN if REGMAP_IRQ
bool
config REGMAP_I2C
@@ -14,5 +15,8 @@ config REGMAP_I2C
config REGMAP_SPI
tristate
+config REGMAP_MMIO
+ tristate
+
config REGMAP_IRQ
bool
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index defd579..5e75d1b 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -3,4 +3,5 @@ obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index fcafc5b..b986b86 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -26,21 +26,30 @@ struct regmap_format {
size_t val_bytes;
void (*format_write)(struct regmap *map,
unsigned int reg, unsigned int val);
- void (*format_reg)(void *buf, unsigned int reg);
- void (*format_val)(void *buf, unsigned int val);
+ void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
+ void (*format_val)(void *buf, unsigned int val, unsigned int shift);
unsigned int (*parse_val)(void *buf);
};
+typedef void (*regmap_lock)(struct regmap *map);
+typedef void (*regmap_unlock)(struct regmap *map);
+
struct regmap {
- struct mutex lock;
+ struct mutex mutex;
+ spinlock_t spinlock;
+ regmap_lock lock;
+ regmap_unlock unlock;
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
struct regmap_format format; /* Buffer format */
const struct regmap_bus *bus;
+ void *bus_context;
+ const char *name;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
+ const char *debugfs_name;
#endif
unsigned int max_register;
@@ -52,6 +61,10 @@ struct regmap {
u8 read_flag_mask;
u8 write_flag_mask;
+ /* number of bits to (left) shift the reg value when formatting*/
+ int reg_shift;
+ int reg_stride;
+
/* regcache specific members */
const struct regcache_ops *cache_ops;
enum regcache_type cache_type;
@@ -79,6 +92,9 @@ struct regmap {
struct reg_default *patch;
int patch_regs;
+
+ /* if set, converts bulk rw to single rw */
+ bool use_single_rw;
};
struct regcache_ops {
@@ -101,11 +117,11 @@ int _regmap_write(struct regmap *map, unsigned int reg,
#ifdef CONFIG_DEBUG_FS
extern void regmap_debugfs_initcall(void);
-extern void regmap_debugfs_init(struct regmap *map);
+extern void regmap_debugfs_init(struct regmap *map, const char *name);
extern void regmap_debugfs_exit(struct regmap *map);
#else
static inline void regmap_debugfs_initcall(void) { }
-static inline void regmap_debugfs_init(struct regmap *map) { }
+static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
static inline void regmap_debugfs_exit(struct regmap *map) { }
#endif
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 483b06d..afd6aa9 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -108,7 +108,7 @@ static int regcache_lzo_decompress_cache_block(struct regmap *map,
static inline int regcache_lzo_get_blkindex(struct regmap *map,
unsigned int reg)
{
- return (reg * map->cache_word_size) /
+ return ((reg / map->reg_stride) * map->cache_word_size) /
DIV_ROUND_UP(map->cache_size_raw,
regcache_lzo_block_count(map));
}
@@ -116,9 +116,10 @@ static inline int regcache_lzo_get_blkindex(struct regmap *map,
static inline int regcache_lzo_get_blkpos(struct regmap *map,
unsigned int reg)
{
- return reg % (DIV_ROUND_UP(map->cache_size_raw,
- regcache_lzo_block_count(map)) /
- map->cache_word_size);
+ return (reg / map->reg_stride) %
+ (DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map)) /
+ map->cache_word_size);
}
static inline int regcache_lzo_get_blksize(struct regmap *map)
@@ -322,7 +323,7 @@ static int regcache_lzo_write(struct regmap *map,
}
/* set the bit so we know we have to sync this register */
- set_bit(reg, lzo_block->sync_bmp);
+ set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
kfree(tmp_dst);
kfree(lzo_block->src);
return 0;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 92b779e..e6732cf 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -39,11 +39,12 @@ struct regcache_rbtree_ctx {
};
static inline void regcache_rbtree_get_base_top_reg(
+ struct regmap *map,
struct regcache_rbtree_node *rbnode,
unsigned int *base, unsigned int *top)
{
*base = rbnode->base_reg;
- *top = rbnode->base_reg + rbnode->blklen - 1;
+ *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
}
static unsigned int regcache_rbtree_get_register(
@@ -70,7 +71,8 @@ static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
rbnode = rbtree_ctx->cached_rbnode;
if (rbnode) {
- regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
+ regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+ &top_reg);
if (reg >= base_reg && reg <= top_reg)
return rbnode;
}
@@ -78,7 +80,8 @@ static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
node = rbtree_ctx->root.rb_node;
while (node) {
rbnode = container_of(node, struct regcache_rbtree_node, node);
- regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
+ regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+ &top_reg);
if (reg >= base_reg && reg <= top_reg) {
rbtree_ctx->cached_rbnode = rbnode;
return rbnode;
@@ -92,7 +95,7 @@ static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
return NULL;
}
-static int regcache_rbtree_insert(struct rb_root *root,
+static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
struct regcache_rbtree_node *rbnode)
{
struct rb_node **new, *parent;
@@ -106,7 +109,7 @@ static int regcache_rbtree_insert(struct rb_root *root,
rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
node);
/* base and top registers of the current rbnode */
- regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
+ regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
&top_reg_tmp);
/* base register of the rbnode to be added */
base_reg = rbnode->base_reg;
@@ -138,19 +141,20 @@ static int rbtree_show(struct seq_file *s, void *ignored)
unsigned int base, top;
int nodes = 0;
int registers = 0;
- int average;
+ int this_registers, average;
- mutex_lock(&map->lock);
+ map->lock(map);
for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) {
n = container_of(node, struct regcache_rbtree_node, node);
- regcache_rbtree_get_base_top_reg(n, &base, &top);
- seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1);
+ regcache_rbtree_get_base_top_reg(map, n, &base, &top);
+ this_registers = ((top - base) / map->reg_stride) + 1;
+ seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
nodes++;
- registers += top - base + 1;
+ registers += this_registers;
}
if (nodes)
@@ -161,7 +165,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
seq_printf(s, "%d nodes, %d registers, average %d registers\n",
nodes, registers, average);
- mutex_unlock(&map->lock);
+ map->unlock(map);
return 0;
}
@@ -255,7 +259,7 @@ static int regcache_rbtree_read(struct regmap *map,
rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) {
- reg_tmp = reg - rbnode->base_reg;
+ reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
*value = regcache_rbtree_get_register(rbnode, reg_tmp,
map->cache_word_size);
} else {
@@ -310,7 +314,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
*/
rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) {
- reg_tmp = reg - rbnode->base_reg;
+ reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
val = regcache_rbtree_get_register(rbnode, reg_tmp,
map->cache_word_size);
if (val == value)
@@ -321,13 +325,15 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
/* look for an adjacent register to the one we are about to add */
for (node = rb_first(&rbtree_ctx->root); node;
node = rb_next(node)) {
- rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node);
+ rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
+ node);
for (i = 0; i < rbnode_tmp->blklen; i++) {
- reg_tmp = rbnode_tmp->base_reg + i;
- if (abs(reg_tmp - reg) != 1)
+ reg_tmp = rbnode_tmp->base_reg +
+ (i * map->reg_stride);
+ if (abs(reg_tmp - reg) != map->reg_stride)
continue;
/* decide where in the block to place our register */
- if (reg_tmp + 1 == reg)
+ if (reg_tmp + map->reg_stride == reg)
pos = i + 1;
else
pos = i;
@@ -357,7 +363,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
return -ENOMEM;
}
regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
- regcache_rbtree_insert(&rbtree_ctx->root, rbnode);
+ regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}
@@ -397,7 +403,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
end = rbnode->blklen;
for (i = base; i < end; i++) {
- regtmp = rbnode->base_reg + i;
+ regtmp = rbnode->base_reg + (i * map->reg_stride);
val = regcache_rbtree_get_register(rbnode, i,
map->cache_word_size);
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 74b6909..835883b 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -59,7 +59,7 @@ static int regcache_hw_init(struct regmap *map)
for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
val = regcache_get_val(map->reg_defaults_raw,
i, map->cache_word_size);
- if (regmap_volatile(map, i))
+ if (regmap_volatile(map, i * map->reg_stride))
continue;
count++;
}
@@ -76,9 +76,9 @@ static int regcache_hw_init(struct regmap *map)
for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
val = regcache_get_val(map->reg_defaults_raw,
i, map->cache_word_size);
- if (regmap_volatile(map, i))
+ if (regmap_volatile(map, i * map->reg_stride))
continue;
- map->reg_defaults[j].reg = i;
+ map->reg_defaults[j].reg = i * map->reg_stride;
map->reg_defaults[j].def = val;
j++;
}
@@ -98,6 +98,10 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
int i;
void *tmp_buf;
+ for (i = 0; i < config->num_reg_defaults; i++)
+ if (config->reg_defaults[i].reg % map->reg_stride)
+ return -EINVAL;
+
if (map->cache_type == REGCACHE_NONE) {
map->cache_bypass = true;
return 0;
@@ -264,7 +268,7 @@ int regcache_sync(struct regmap *map)
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
- mutex_lock(&map->lock);
+ map->lock(map);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
dev_dbg(map->dev, "Syncing %s cache\n",
@@ -278,6 +282,10 @@ int regcache_sync(struct regmap *map)
/* Apply any patch first */
map->cache_bypass = 1;
for (i = 0; i < map->patch_regs; i++) {
+ if (map->patch[i].reg % map->reg_stride) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
if (ret != 0) {
dev_err(map->dev, "Failed to write %x = %x: %d\n",
@@ -296,7 +304,7 @@ out:
trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
map->cache_bypass = bypass;
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
@@ -323,7 +331,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
- mutex_lock(&map->lock);
+ map->lock(map);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
@@ -342,7 +350,7 @@ out:
trace_regcache_sync(map->dev, name, "stop region");
/* Restore the bypass state */
map->cache_bypass = bypass;
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
@@ -362,11 +370,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
*/
void regcache_cache_only(struct regmap *map, bool enable)
{
- mutex_lock(&map->lock);
+ map->lock(map);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map->dev, enable);
- mutex_unlock(&map->lock);
+ map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -381,9 +389,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
*/
void regcache_mark_dirty(struct regmap *map)
{
- mutex_lock(&map->lock);
+ map->lock(map);
map->cache_dirty = true;
- mutex_unlock(&map->lock);
+ map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_mark_dirty);
@@ -400,11 +408,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
*/
void regcache_cache_bypass(struct regmap *map, bool enable)
{
- mutex_lock(&map->lock);
+ map->lock(map);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
trace_regmap_cache_bypass(map->dev, enable);
- mutex_unlock(&map->lock);
+ map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 251eb70..bb1ff17 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -80,7 +80,7 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
val_len = 2 * map->format.val_bytes;
tot_len = reg_len + val_len + 3; /* : \n */
- for (i = 0; i < map->max_register + 1; i++) {
+ for (i = 0; i <= map->max_register; i += map->reg_stride) {
if (!regmap_readable(map, i))
continue;
@@ -197,7 +197,7 @@ static ssize_t regmap_access_read_file(struct file *file,
reg_len = regmap_calc_reg_len(map->max_register, buf, count);
tot_len = reg_len + 10; /* ': R W V P\n' */
- for (i = 0; i < map->max_register + 1; i++) {
+ for (i = 0; i <= map->max_register; i += map->reg_stride) {
/* Ignore registers which are neither readable nor writable */
if (!regmap_readable(map, i) && !regmap_writeable(map, i))
continue;
@@ -242,10 +242,17 @@ static const struct file_operations regmap_access_fops = {
.llseek = default_llseek,
};
-void regmap_debugfs_init(struct regmap *map)
+void regmap_debugfs_init(struct regmap *map, const char *name)
{
- map->debugfs = debugfs_create_dir(dev_name(map->dev),
- regmap_debugfs_root);
+ if (name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ dev_name(map->dev), name);
+ name = map->debugfs_name;
+ } else {
+ name = dev_name(map->dev);
+ }
+
+ map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
if (!map->debugfs) {
dev_warn(map->dev, "Failed to create debugfs directory\n");
return;
@@ -274,6 +281,7 @@ void regmap_debugfs_init(struct regmap *map)
void regmap_debugfs_exit(struct regmap *map)
{
debugfs_remove_recursive(map->debugfs);
+ kfree(map->debugfs_name);
}
void regmap_debugfs_initcall(void)
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 9a3a8c5..5f6b247 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -15,8 +15,9 @@
#include <linux/module.h>
#include <linux/init.h>
-static int regmap_i2c_write(struct device *dev, const void *data, size_t count)
+static int regmap_i2c_write(void *context, const void *data, size_t count)
{
+ struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
int ret;
@@ -29,10 +30,11 @@ static int regmap_i2c_write(struct device *dev, const void *data, size_t count)
return -EIO;
}
-static int regmap_i2c_gather_write(struct device *dev,
+static int regmap_i2c_gather_write(void *context,
const void *reg, size_t reg_size,
const void *val, size_t val_size)
{
+ struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
struct i2c_msg xfer[2];
int ret;
@@ -62,10 +64,11 @@ static int regmap_i2c_gather_write(struct device *dev,
return -EIO;
}
-static int regmap_i2c_read(struct device *dev,
+static int regmap_i2c_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
{
+ struct device *dev = context;
struct i2c_client *i2c = to_i2c_client(dev);
struct i2c_msg xfer[2];
int ret;
@@ -107,7 +110,7 @@ static struct regmap_bus regmap_i2c = {
struct regmap *regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config)
{
- return regmap_init(&i2c->dev, &regmap_i2c, config);
+ return regmap_init(&i2c->dev, &regmap_i2c, &i2c->dev, config);
}
EXPORT_SYMBOL_GPL(regmap_init_i2c);
@@ -124,7 +127,7 @@ EXPORT_SYMBOL_GPL(regmap_init_i2c);
struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config)
{
- return devm_regmap_init(&i2c->dev, &regmap_i2c, config);
+ return devm_regmap_init(&i2c->dev, &regmap_i2c, &i2c->dev, config);
}
EXPORT_SYMBOL_GPL(devm_regmap_init_i2c);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 1befaa7..4fac4b9 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -15,6 +15,7 @@
#include <linux/regmap.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/slab.h>
#include "internal.h"
@@ -26,18 +27,20 @@ struct regmap_irq_chip_data {
struct regmap_irq_chip *chip;
int irq_base;
+ struct irq_domain *domain;
- void *status_reg_buf;
unsigned int *status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
+
+ unsigned int irq_reg_stride;
};
static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
int irq)
{
- return &data->chip->irqs[irq - data->irq_base];
+ return &data->chip->irqs[irq];
}
static void regmap_irq_lock(struct irq_data *data)
@@ -50,6 +53,7 @@ static void regmap_irq_lock(struct irq_data *data)
static void regmap_irq_sync_unlock(struct irq_data *data)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
int i, ret;
/*
@@ -58,11 +62,13 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* suppress pointless writes.
*/
for (i = 0; i < d->chip->num_regs; i++) {
- ret = regmap_update_bits(d->map, d->chip->mask_base + i,
+ ret = regmap_update_bits(d->map, d->chip->mask_base +
+ (i * map->reg_stride *
+ d->irq_reg_stride),
d->mask_buf_def[i], d->mask_buf[i]);
if (ret != 0)
dev_err(d->map->dev, "Failed to sync masks in %x\n",
- d->chip->mask_base + i);
+ d->chip->mask_base + (i * map->reg_stride));
}
mutex_unlock(&d->lock);
@@ -71,17 +77,19 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
static void regmap_irq_enable(struct irq_data *data)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
- const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
- d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask;
+ d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
}
static void regmap_irq_disable(struct irq_data *data)
{
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
- const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
- d->mask_buf[irq_data->reg_offset] |= irq_data->mask;
+ d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
}
static struct irq_chip regmap_irq_chip = {
@@ -98,18 +106,8 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
int ret, i;
- u8 *buf8 = data->status_reg_buf;
- u16 *buf16 = data->status_reg_buf;
- u32 *buf32 = data->status_reg_buf;
bool handled = false;
- ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf,
- chip->num_regs);
- if (ret != 0) {
- dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
- return IRQ_NONE;
- }
-
/*
* Ignore masked IRQs and ack if we need to; we ack early so
* there is no race between handling and acknowleding the
@@ -118,36 +116,34 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
* doing a write per register.
*/
for (i = 0; i < data->chip->num_regs; i++) {
- switch (map->format.val_bytes) {
- case 1:
- data->status_buf[i] = buf8[i];
- break;
- case 2:
- data->status_buf[i] = buf16[i];
- break;
- case 4:
- data->status_buf[i] = buf32[i];
- break;
- default:
- BUG();
+ ret = regmap_read(map, chip->status_base + (i * map->reg_stride
+ * data->irq_reg_stride),
+ &data->status_buf[i]);
+
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read IRQ status: %d\n",
+ ret);
return IRQ_NONE;
}
data->status_buf[i] &= ~data->mask_buf[i];
if (data->status_buf[i] && chip->ack_base) {
- ret = regmap_write(map, chip->ack_base + i,
+ ret = regmap_write(map, chip->ack_base +
+ (i * map->reg_stride *
+ data->irq_reg_stride),
data->status_buf[i]);
if (ret != 0)
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
- chip->ack_base + i, ret);
+ chip->ack_base + (i * map->reg_stride),
+ ret);
}
}
for (i = 0; i < chip->num_irqs; i++) {
- if (data->status_buf[chip->irqs[i].reg_offset] &
- chip->irqs[i].mask) {
- handle_nested_irq(data->irq_base + i);
+ if (data->status_buf[chip->irqs[i].reg_offset /
+ map->reg_stride] & chip->irqs[i].mask) {
+ handle_nested_irq(irq_find_mapping(data->domain, i));
handled = true;
}
}
@@ -158,6 +154,31 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
return IRQ_NONE;
}
+static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct regmap_irq_chip_data *data = h->host_data;
+
+ irq_set_chip_data(virq, data);
+ irq_set_chip_and_handler(virq, &regmap_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops regmap_domain_ops = {
+ .map = regmap_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
/**
* regmap_add_irq_chip(): Use standard regmap IRQ controller handling
*
@@ -178,30 +199,37 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data *d;
- int cur_irq, i;
+ int i;
int ret = -ENOMEM;
- irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
- if (irq_base < 0) {
- dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
- irq_base);
- return irq_base;
+ for (i = 0; i < chip->num_irqs; i++) {
+ if (chip->irqs[i].reg_offset % map->reg_stride)
+ return -EINVAL;
+ if (chip->irqs[i].reg_offset / map->reg_stride >=
+ chip->num_regs)
+ return -EINVAL;
+ }
+
+ if (irq_base) {
+ irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
+ if (irq_base < 0) {
+ dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
+ irq_base);
+ return irq_base;
+ }
}
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
+ *data = d;
+
d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
GFP_KERNEL);
if (!d->status_buf)
goto err_alloc;
- d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs,
- GFP_KERNEL);
- if (!d->status_reg_buf)
- goto err_alloc;
-
d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
GFP_KERNEL);
if (!d->mask_buf)
@@ -215,54 +243,59 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
d->map = map;
d->chip = chip;
d->irq_base = irq_base;
+
+ if (chip->irq_reg_stride)
+ d->irq_reg_stride = chip->irq_reg_stride;
+ else
+ d->irq_reg_stride = 1;
+
mutex_init(&d->lock);
for (i = 0; i < chip->num_irqs; i++)
- d->mask_buf_def[chip->irqs[i].reg_offset]
+ d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
|= chip->irqs[i].mask;
/* Mask all the interrupts by default */
for (i = 0; i < chip->num_regs; i++) {
d->mask_buf[i] = d->mask_buf_def[i];
- ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]);
+ ret = regmap_write(map, chip->mask_base + (i * map->reg_stride
+ * d->irq_reg_stride),
+ d->mask_buf[i]);
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
- chip->mask_base + i, ret);
+ chip->mask_base + (i * map->reg_stride), ret);
goto err_alloc;
}
}
- /* Register them with genirq */
- for (cur_irq = irq_base;
- cur_irq < chip->num_irqs + irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, d);
- irq_set_chip_and_handler(cur_irq, &regmap_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
+ if (irq_base)
+ d->domain = irq_domain_add_legacy(map->dev->of_node,
+ chip->num_irqs, irq_base, 0,
+ &regmap_domain_ops, d);
+ else
+ d->domain = irq_domain_add_linear(map->dev->of_node,
+ chip->num_irqs,
+ &regmap_domain_ops, d);
+ if (!d->domain) {
+ dev_err(map->dev, "Failed to create IRQ domain\n");
+ ret = -ENOMEM;
+ goto err_alloc;
}
ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
chip->name, d);
if (ret != 0) {
dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
- goto err_alloc;
+ goto err_domain;
}
return 0;
+err_domain:
+ /* Should really dispose of the domain but... */
err_alloc:
kfree(d->mask_buf_def);
kfree(d->mask_buf);
- kfree(d->status_reg_buf);
kfree(d->status_buf);
kfree(d);
return ret;
@@ -281,9 +314,9 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
return;
free_irq(irq, d);
+ /* We should unmap the domain but... */
kfree(d->mask_buf_def);
kfree(d->mask_buf);
- kfree(d->status_reg_buf);
kfree(d->status_buf);
kfree(d);
}
@@ -298,6 +331,21 @@ EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
*/
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
+ WARN_ON(!data->irq_base);
return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
+
+/**
+ * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
+ *
+ * Useful for drivers to request their own IRQs.
+ *
+ * @data: regmap_irq controller to operate on.
+ * @irq: index of the interrupt requested in the chip IRQs
+ */
+int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
+{
+ return irq_create_mapping(data->domain, irq);
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
new file mode 100644
index 0000000..febd6de
--- /dev/null
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -0,0 +1,224 @@
+/*
+ * Register map access API - MMIO support
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+struct regmap_mmio_context {
+ void __iomem *regs;
+ unsigned val_bytes;
+};
+
+static int regmap_mmio_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ struct regmap_mmio_context *ctx = context;
+ u32 offset;
+
+ BUG_ON(reg_size != 4);
+
+ offset = be32_to_cpup(reg);
+
+ while (val_size) {
+ switch (ctx->val_bytes) {
+ case 1:
+ writeb(*(u8 *)val, ctx->regs + offset);
+ break;
+ case 2:
+ writew(be16_to_cpup(val), ctx->regs + offset);
+ break;
+ case 4:
+ writel(be32_to_cpup(val), ctx->regs + offset);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ writeq(be64_to_cpup(val), ctx->regs + offset);
+ break;
+#endif
+ default:
+ /* Should be caught by regmap_mmio_check_config */
+ BUG();
+ }
+ val_size -= ctx->val_bytes;
+ val += ctx->val_bytes;
+ offset += ctx->val_bytes;
+ }
+
+ return 0;
+}
+
+static int regmap_mmio_write(void *context, const void *data, size_t count)
+{
+ BUG_ON(count < 4);
+
+ return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4);
+}
+
+static int regmap_mmio_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct regmap_mmio_context *ctx = context;
+ u32 offset;
+
+ BUG_ON(reg_size != 4);
+
+ offset = be32_to_cpup(reg);
+
+ while (val_size) {
+ switch (ctx->val_bytes) {
+ case 1:
+ *(u8 *)val = readb(ctx->regs + offset);
+ break;
+ case 2:
+ *(u16 *)val = cpu_to_be16(readw(ctx->regs + offset));
+ break;
+ case 4:
+ *(u32 *)val = cpu_to_be32(readl(ctx->regs + offset));
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ *(u64 *)val = cpu_to_be32(readq(ctx->regs + offset));
+ break;
+#endif
+ default:
+ /* Should be caught by regmap_mmio_check_config */
+ BUG();
+ }
+ val_size -= ctx->val_bytes;
+ val += ctx->val_bytes;
+ offset += ctx->val_bytes;
+ }
+
+ return 0;
+}
+
+static void regmap_mmio_free_context(void *context)
+{
+ kfree(context);
+}
+
+static struct regmap_bus regmap_mmio = {
+ .fast_io = true,
+ .write = regmap_mmio_write,
+ .gather_write = regmap_mmio_gather_write,
+ .read = regmap_mmio_read,
+ .free_context = regmap_mmio_free_context,
+};
+
+struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
+ const struct regmap_config *config)
+{
+ struct regmap_mmio_context *ctx;
+ int min_stride;
+
+ if (config->reg_bits != 32)
+ return ERR_PTR(-EINVAL);
+
+ if (config->pad_bits)
+ return ERR_PTR(-EINVAL);
+
+ switch (config->val_bits) {
+ case 8:
+ /* The core treats 0 as 1 */
+ min_stride = 0;
+ break;
+ case 16:
+ min_stride = 2;
+ break;
+ case 32:
+ min_stride = 4;
+ break;
+#ifdef CONFIG_64BIT
+ case 64:
+ min_stride = 8;
+ break;
+#endif
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (config->reg_stride < min_stride)
+ return ERR_PTR(-EINVAL);
+
+ ctx = kzalloc(GFP_KERNEL, sizeof(*ctx));
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->regs = regs;
+ ctx->val_bytes = config->val_bits / 8;
+
+ return ctx;
+}
+
+/**
+ * regmap_init_mmio(): Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+struct regmap *regmap_init_mmio(struct device *dev,
+ void __iomem *regs,
+ const struct regmap_config *config)
+{
+ struct regmap_mmio_context *ctx;
+
+ ctx = regmap_mmio_gen_context(regs, config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return regmap_init(dev, &regmap_mmio, ctx, config);
+}
+EXPORT_SYMBOL_GPL(regmap_init_mmio);
+
+/**
+ * devm_regmap_init_mmio(): Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_mmio(struct device *dev,
+ void __iomem *regs,
+ const struct regmap_config *config)
+{
+ struct regmap_mmio_context *ctx;
+
+ ctx = regmap_mmio_gen_context(regs, config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return devm_regmap_init(dev, &regmap_mmio, ctx, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_mmio);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 7c0c35a..ffa46a9 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -15,17 +15,19 @@
#include <linux/init.h>
#include <linux/module.h>
-static int regmap_spi_write(struct device *dev, const void *data, size_t count)
+static int regmap_spi_write(void *context, const void *data, size_t count)
{
+ struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
return spi_write(spi, data, count);
}
-static int regmap_spi_gather_write(struct device *dev,
+static int regmap_spi_gather_write(void *context,
const void *reg, size_t reg_len,
const void *val, size_t val_len)
{
+ struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
struct spi_message m;
struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
@@ -38,10 +40,11 @@ static int regmap_spi_gather_write(struct device *dev,
return spi_sync(spi, &m);
}
-static int regmap_spi_read(struct device *dev,
+static int regmap_spi_read(void *context,
const void *reg, size_t reg_size,
void *val, size_t val_size)
{
+ struct device *dev = context;
struct spi_device *spi = to_spi_device(dev);
return spi_write_then_read(spi, reg, reg_size, val, val_size);
@@ -66,7 +69,7 @@ static struct regmap_bus regmap_spi = {
struct regmap *regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config)
{
- return regmap_init(&spi->dev, &regmap_spi, config);
+ return regmap_init(&spi->dev, &regmap_spi, &spi->dev, config);
}
EXPORT_SYMBOL_GPL(regmap_init_spi);
@@ -83,7 +86,7 @@ EXPORT_SYMBOL_GPL(regmap_init_spi);
struct regmap *devm_regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config)
{
- return devm_regmap_init(&spi->dev, &regmap_spi, config);
+ return devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config);
}
EXPORT_SYMBOL_GPL(devm_regmap_init_spi);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index bb80853..0bcda48 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -112,25 +112,36 @@ static void regmap_format_10_14_write(struct regmap *map,
out[0] = reg >> 2;
}
-static void regmap_format_8(void *buf, unsigned int val)
+static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
{
u8 *b = buf;
- b[0] = val;
+ b[0] = val << shift;
}
-static void regmap_format_16(void *buf, unsigned int val)
+static void regmap_format_16(void *buf, unsigned int val, unsigned int shift)
{
__be16 *b = buf;
- b[0] = cpu_to_be16(val);
+ b[0] = cpu_to_be16(val << shift);
}
-static void regmap_format_32(void *buf, unsigned int val)
+static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
+{
+ u8 *b = buf;
+
+ val <<= shift;
+
+ b[0] = val >> 16;
+ b[1] = val >> 8;
+ b[2] = val;
+}
+
+static void regmap_format_32(void *buf, unsigned int val, unsigned int shift)
{
__be32 *b = buf;
- b[0] = cpu_to_be32(val);
+ b[0] = cpu_to_be32(val << shift);
}
static unsigned int regmap_parse_8(void *buf)
@@ -149,6 +160,16 @@ static unsigned int regmap_parse_16(void *buf)
return b[0];
}
+static unsigned int regmap_parse_24(void *buf)
+{
+ u8 *b = buf;
+ unsigned int ret = b[2];
+ ret |= ((unsigned int)b[1]) << 8;
+ ret |= ((unsigned int)b[0]) << 16;
+
+ return ret;
+}
+
static unsigned int regmap_parse_32(void *buf)
{
__be32 *b = buf;
@@ -158,11 +179,41 @@ static unsigned int regmap_parse_32(void *buf)
return b[0];
}
+static void regmap_lock_mutex(struct regmap *map)
+{
+ mutex_lock(&map->mutex);
+}
+
+static void regmap_unlock_mutex(struct regmap *map)
+{
+ mutex_unlock(&map->mutex);
+}
+
+static void regmap_lock_spinlock(struct regmap *map)
+{
+ spin_lock(&map->spinlock);
+}
+
+static void regmap_unlock_spinlock(struct regmap *map)
+{
+ spin_unlock(&map->spinlock);
+}
+
+static void dev_get_regmap_release(struct device *dev, void *res)
+{
+ /*
+ * We don't actually have anything to do here; the goal here
+ * is not to manage the regmap but to provide a simple way to
+ * get the regmap back given a struct device.
+ */
+}
+
/**
* regmap_init(): Initialise register map
*
* @dev: Device that will be interacted with
* @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
@@ -171,9 +222,10 @@ static unsigned int regmap_parse_32(void *buf)
*/
struct regmap *regmap_init(struct device *dev,
const struct regmap_bus *bus,
+ void *bus_context,
const struct regmap_config *config)
{
- struct regmap *map;
+ struct regmap *map, **m;
int ret = -EINVAL;
if (!bus || !config)
@@ -185,20 +237,36 @@ struct regmap *regmap_init(struct device *dev,
goto err;
}
- mutex_init(&map->lock);
+ if (bus->fast_io) {
+ spin_lock_init(&map->spinlock);
+ map->lock = regmap_lock_spinlock;
+ map->unlock = regmap_unlock_spinlock;
+ } else {
+ mutex_init(&map->mutex);
+ map->lock = regmap_lock_mutex;
+ map->unlock = regmap_unlock_mutex;
+ }
map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
map->format.buf_size += map->format.pad_bytes;
+ map->reg_shift = config->pad_bits % 8;
+ if (config->reg_stride)
+ map->reg_stride = config->reg_stride;
+ else
+ map->reg_stride = 1;
+ map->use_single_rw = config->use_single_rw;
map->dev = dev;
map->bus = bus;
+ map->bus_context = bus_context;
map->max_register = config->max_register;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
+ map->name = config->name;
if (config->read_flag_mask || config->write_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
@@ -207,7 +275,7 @@ struct regmap *regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
- switch (config->reg_bits) {
+ switch (config->reg_bits + map->reg_shift) {
case 2:
switch (config->val_bits) {
case 6:
@@ -273,12 +341,19 @@ struct regmap *regmap_init(struct device *dev,
map->format.format_val = regmap_format_16;
map->format.parse_val = regmap_parse_16;
break;
+ case 24:
+ map->format.format_val = regmap_format_24;
+ map->format.parse_val = regmap_parse_24;
+ break;
case 32:
map->format.format_val = regmap_format_32;
map->format.parse_val = regmap_parse_32;
break;
}
+ if (map->format.format_write)
+ map->use_single_rw = true;
+
if (!map->format.format_write &&
!(map->format.format_reg && map->format.format_val))
goto err_map;
@@ -289,14 +364,25 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
- regmap_debugfs_init(map);
+ regmap_debugfs_init(map, config->name);
ret = regcache_init(map, config);
if (ret < 0)
goto err_free_workbuf;
+ /* Add a devres resource for dev_get_regmap() */
+ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
+ if (!m) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+ *m = map;
+ devres_add(dev, m);
+
return map;
+err_cache:
+ regcache_exit(map);
err_free_workbuf:
kfree(map->work_buf);
err_map:
@@ -316,6 +402,7 @@ static void devm_regmap_release(struct device *dev, void *res)
*
* @dev: Device that will be interacted with
* @bus: Bus-specific callbacks to use with device
+ * @bus_context: Data passed to bus-specific callbacks
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer
@@ -325,6 +412,7 @@ static void devm_regmap_release(struct device *dev, void *res)
*/
struct regmap *devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
+ void *bus_context,
const struct regmap_config *config)
{
struct regmap **ptr, *regmap;
@@ -333,7 +421,7 @@ struct regmap *devm_regmap_init(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- regmap = regmap_init(dev, bus, config);
+ regmap = regmap_init(dev, bus, bus_context, config);
if (!IS_ERR(regmap)) {
*ptr = regmap;
devres_add(dev, ptr);
@@ -360,7 +448,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
{
int ret;
- mutex_lock(&map->lock);
+ map->lock(map);
regcache_exit(map);
regmap_debugfs_exit(map);
@@ -372,14 +460,14 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
- regmap_debugfs_init(map);
+ regmap_debugfs_init(map, config->name);
map->cache_bypass = false;
map->cache_only = false;
ret = regcache_init(map, config);
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
@@ -391,11 +479,51 @@ void regmap_exit(struct regmap *map)
{
regcache_exit(map);
regmap_debugfs_exit(map);
+ if (map->bus->free_context)
+ map->bus->free_context(map->bus_context);
kfree(map->work_buf);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
+static int dev_get_regmap_match(struct device *dev, void *res, void *data)
+{
+ struct regmap **r = res;
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+
+ /* If the user didn't specify a name match any */
+ if (data)
+ return (*r)->name == data;
+ else
+ return 1;
+}
+
+/**
+ * dev_get_regmap(): Obtain the regmap (if any) for a device
+ *
+ * @dev: Device to retrieve the map for
+ * @name: Optional name for the register map, usually NULL.
+ *
+ * Returns the regmap for the device if one is present, or NULL. If
+ * name is specified then it must match the name specified when
+ * registering the device, if it is NULL then the first regmap found
+ * will be used. Devices with multiple register maps are very rare,
+ * generic code should normally not need to specify a name.
+ */
+struct regmap *dev_get_regmap(struct device *dev, const char *name)
+{
+ struct regmap **r = devres_find(dev, dev_get_regmap_release,
+ dev_get_regmap_match, (void *)name);
+
+ if (!r)
+ return NULL;
+ return *r;
+}
+EXPORT_SYMBOL_GPL(dev_get_regmap);
+
static int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
@@ -408,7 +536,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
/* Check for unwritable registers before we start */
if (map->writeable_reg)
for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!map->writeable_reg(map->dev, reg + i))
+ if (!map->writeable_reg(map->dev,
+ reg + (i * map->reg_stride)))
return -EINVAL;
if (!map->cache_bypass && map->format.parse_val) {
@@ -417,7 +546,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
for (i = 0; i < val_len / val_bytes; i++) {
memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
ival = map->format.parse_val(map->work_buf);
- ret = regcache_write(map, reg + i, ival);
+ ret = regcache_write(map, reg + (i * map->reg_stride),
+ ival);
if (ret) {
dev_err(map->dev,
"Error in caching of register: %u ret: %d\n",
@@ -431,7 +561,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
}
}
- map->format.format_reg(map->work_buf, reg);
+ map->format.format_reg(map->work_buf, reg, map->reg_shift);
u8[0] |= map->write_flag_mask;
@@ -444,12 +574,12 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
*/
if (val == (map->work_buf + map->format.pad_bytes +
map->format.reg_bytes))
- ret = map->bus->write(map->dev, map->work_buf,
+ ret = map->bus->write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes +
val_len);
else if (map->bus->gather_write)
- ret = map->bus->gather_write(map->dev, map->work_buf,
+ ret = map->bus->gather_write(map->bus_context, map->work_buf,
map->format.reg_bytes +
map->format.pad_bytes,
val, val_len);
@@ -464,7 +594,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
memcpy(buf, map->work_buf, map->format.reg_bytes);
memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
- ret = map->bus->write(map->dev, buf, len);
+ ret = map->bus->write(map->bus_context, buf, len);
kfree(buf);
}
@@ -498,7 +628,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
trace_regmap_hw_write_start(map->dev, reg, 1);
- ret = map->bus->write(map->dev, map->work_buf,
+ ret = map->bus->write(map->bus_context, map->work_buf,
map->format.buf_size);
trace_regmap_hw_write_done(map->dev, reg, 1);
@@ -506,7 +636,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
return ret;
} else {
map->format.format_val(map->work_buf + map->format.reg_bytes
- + map->format.pad_bytes, val);
+ + map->format.pad_bytes, val, 0);
return _regmap_raw_write(map, reg,
map->work_buf +
map->format.reg_bytes +
@@ -529,11 +659,14 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
{
int ret;
- mutex_lock(&map->lock);
+ if (reg % map->reg_stride)
+ return -EINVAL;
+
+ map->lock(map);
ret = _regmap_write(map, reg, val);
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
@@ -560,11 +693,16 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
{
int ret;
- mutex_lock(&map->lock);
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+
+ map->lock(map);
ret = _regmap_raw_write(map, reg, val, val_len);
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
@@ -593,8 +731,10 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (!map->format.parse_val)
return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
- mutex_lock(&map->lock);
+ map->lock(map);
/* No formatting is require if val_byte is 1 */
if (val_bytes == 1) {
@@ -609,13 +749,28 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_val(wval + i);
}
- ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+ /*
+ * Some devices does not support bulk write, for
+ * them we have a series of single write operations.
+ */
+ if (map->use_single_rw) {
+ for (i = 0; i < val_count; i++) {
+ ret = regmap_raw_write(map,
+ reg + (i * map->reg_stride),
+ val + (i * val_bytes),
+ val_bytes);
+ if (ret != 0)
+ return ret;
+ }
+ } else {
+ ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+ }
if (val_bytes != 1)
kfree(wval);
out:
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
@@ -626,7 +781,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
u8 *u8 = map->work_buf;
int ret;
- map->format.format_reg(map->work_buf, reg);
+ map->format.format_reg(map->work_buf, reg, map->reg_shift);
/*
* Some buses or devices flag reads by setting the high bits in the
@@ -639,7 +794,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
trace_regmap_hw_read_start(map->dev, reg,
val_len / map->format.val_bytes);
- ret = map->bus->read(map->dev, map->work_buf,
+ ret = map->bus->read(map->bus_context, map->work_buf,
map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
@@ -672,6 +827,9 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
trace_regmap_reg_read(map->dev, reg, *val);
}
+ if (ret == 0 && !map->cache_bypass)
+ regcache_write(map, reg, *val);
+
return ret;
}
@@ -689,11 +847,14 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
{
int ret;
- mutex_lock(&map->lock);
+ if (reg % map->reg_stride)
+ return -EINVAL;
+
+ map->lock(map);
ret = _regmap_read(map, reg, val);
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
@@ -718,7 +879,12 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int v;
int ret, i;
- mutex_lock(&map->lock);
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
+
+ map->lock(map);
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
@@ -730,16 +896,17 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
* cost as we expect to hit the cache.
*/
for (i = 0; i < val_count; i++) {
- ret = _regmap_read(map, reg + i, &v);
+ ret = _regmap_read(map, reg + (i * map->reg_stride),
+ &v);
if (ret != 0)
goto out;
- map->format.format_val(val + (i * val_bytes), v);
+ map->format.format_val(val + (i * val_bytes), v, 0);
}
}
out:
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
@@ -765,18 +932,37 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
if (!map->format.parse_val)
return -EINVAL;
+ if (reg % map->reg_stride)
+ return -EINVAL;
if (vol || map->cache_type == REGCACHE_NONE) {
- ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
- if (ret != 0)
- return ret;
+ /*
+ * Some devices does not support bulk read, for
+ * them we have a series of single read operations.
+ */
+ if (map->use_single_rw) {
+ for (i = 0; i < val_count; i++) {
+ ret = regmap_raw_read(map,
+ reg + (i * map->reg_stride),
+ val + (i * val_bytes),
+ val_bytes);
+ if (ret != 0)
+ return ret;
+ }
+ } else {
+ ret = regmap_raw_read(map, reg, val,
+ val_bytes * val_count);
+ if (ret != 0)
+ return ret;
+ }
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_val(val + i);
} else {
for (i = 0; i < val_count; i++) {
unsigned int ival;
- ret = regmap_read(map, reg + i, &ival);
+ ret = regmap_read(map, reg + (i * map->reg_stride),
+ &ival);
if (ret != 0)
return ret;
memcpy(val + (i * val_bytes), &ival, val_bytes);
@@ -794,7 +980,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
int ret;
unsigned int tmp, orig;
- mutex_lock(&map->lock);
+ map->lock(map);
ret = _regmap_read(map, reg, &orig);
if (ret != 0)
@@ -811,7 +997,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
}
out:
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
@@ -878,7 +1064,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
if (map->patch)
return -EBUSY;
- mutex_lock(&map->lock);
+ map->lock(map);
bypass = map->cache_bypass;
@@ -906,7 +1092,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
out:
map->cache_bypass = bypass;
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
OpenPOWER on IntegriCloud