diff options
author | Richard Fitzgerald <rf@opensource.wolfsonmicro.com> | 2015-06-23 14:32:55 +0100 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2015-07-20 18:45:00 +0100 |
commit | d3dc5430d68fb91a62d971648170b34d46ab85bc (patch) | |
tree | 4b2d4fd06598a3837636e83c5d8b8d286e8ffc5d | |
parent | 0642ef6f2992eba46c41abb5ceb7d4fa14ba888e (diff) | |
download | op-kernel-dev-d3dc5430d68fb91a62d971648170b34d46ab85bc.zip op-kernel-dev-d3dc5430d68fb91a62d971648170b34d46ab85bc.tar.gz |
regmap: debugfs: Allow writes to cache state settings
Allow the user to write the cache_only and cache_bypass settings.
This can be useful for debugging.
Since this can lead to the hardware getting out-of-sync with the
cache, at least for the period that the cache state is forced, the
kernel is tainted and the action is recorded in the kernel log.
When disabling cache_only through debugfs a cache sync will be performed.
Signed-off-by: Richard Fitzgerald <rf@opensource.wolfsonmicro.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r-- | drivers/base/regmap/regmap-debugfs.c | 90 |
1 files changed, 86 insertions, 4 deletions
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 5799a0b..6a61e4f 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -469,6 +469,87 @@ static const struct file_operations regmap_access_fops = { .llseek = default_llseek, }; +static ssize_t regmap_cache_only_write_file(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct regmap *map = container_of(file->private_data, + struct regmap, cache_only); + ssize_t result; + bool was_enabled, require_sync = false; + int err; + + map->lock(map->lock_arg); + + was_enabled = map->cache_only; + + result = debugfs_write_file_bool(file, user_buf, count, ppos); + if (result < 0) { + map->unlock(map->lock_arg); + return result; + } + + if (map->cache_only && !was_enabled) { + dev_warn(map->dev, "debugfs cache_only=Y forced\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } else if (!map->cache_only && was_enabled) { + dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); + require_sync = true; + } + + map->unlock(map->lock_arg); + + if (require_sync) { + err = regcache_sync(map); + if (err) + dev_err(map->dev, "Failed to sync cache %d\n", err); + } + + return result; +} + +static const struct file_operations regmap_cache_only_fops = { + .open = simple_open, + .read = debugfs_read_file_bool, + .write = regmap_cache_only_write_file, +}; + +static ssize_t regmap_cache_bypass_write_file(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct regmap *map = container_of(file->private_data, + struct regmap, cache_bypass); + ssize_t result; + bool was_enabled; + + map->lock(map->lock_arg); + + was_enabled = map->cache_bypass; + + result = debugfs_write_file_bool(file, user_buf, count, ppos); + if (result < 0) + goto out; + + if (map->cache_bypass && !was_enabled) { + dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } else if (!map->cache_bypass && was_enabled) { + dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); + } + +out: + map->unlock(map->lock_arg); + + return result; +} + +static const struct file_operations regmap_cache_bypass_fops = { + .open = simple_open, + .read = debugfs_read_file_bool, + .write = regmap_cache_bypass_write_file, +}; + void regmap_debugfs_init(struct regmap *map, const char *name) { struct rb_node *next; @@ -530,12 +611,13 @@ void regmap_debugfs_init(struct regmap *map, const char *name) } if (map->cache_type) { - debugfs_create_bool("cache_only", 0400, map->debugfs, - &map->cache_only); + debugfs_create_file("cache_only", 0600, map->debugfs, + &map->cache_only, ®map_cache_only_fops); debugfs_create_bool("cache_dirty", 0400, map->debugfs, &map->cache_dirty); - debugfs_create_bool("cache_bypass", 0400, map->debugfs, - &map->cache_bypass); + debugfs_create_file("cache_bypass", 0600, map->debugfs, + &map->cache_bypass, + ®map_cache_bypass_fops); } next = rb_first(&map->range_tree); |