summaryrefslogtreecommitdiffstats
path: root/fs/pstore
diff options
context:
space:
mode:
authorJoel Fernandes <joelaf@google.com>2016-10-20 00:34:00 -0700
committerKees Cook <keescook@chromium.org>2016-11-11 10:35:37 -0800
commit109704492ef637956265ec2eb72ae7b3b39eb6f4 (patch)
tree8faf79b5252f5c7b14ce25d58c2d758b83ed7970 /fs/pstore
parent959217c84c2765434cd2cbf3d7ffdc09e90834f9 (diff)
downloadop-kernel-dev-109704492ef637956265ec2eb72ae7b3b39eb6f4.zip
op-kernel-dev-109704492ef637956265ec2eb72ae7b3b39eb6f4.tar.gz
pstore: Make spinlock per zone instead of global
Currently pstore has a global spinlock for all zones. Since the zones are independent and modify different areas of memory, there's no need to have a global lock, so we should use a per-zone lock as introduced here. Also, when ramoops's ftrace use-case has a FTRACE_PER_CPU flag introduced later, which splits the ftrace memory area into a single zone per CPU, it will eliminate the need for locking. In preparation for this, make the locking optional. Signed-off-by: Joel Fernandes <joelaf@google.com> [kees: updated commit message] Signed-off-by: Kees Cook <keescook@chromium.org>
Diffstat (limited to 'fs/pstore')
-rw-r--r--fs/pstore/ram_core.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 3975dee..cb92055 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -48,8 +48,6 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
return atomic_read(&prz->buffer->start);
}
-static DEFINE_RAW_SPINLOCK(buffer_lock);
-
/* increase and wrap the start pointer, returning the old value */
static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
{
@@ -57,7 +55,7 @@ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
int new;
unsigned long flags;
- raw_spin_lock_irqsave(&buffer_lock, flags);
+ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
old = atomic_read(&prz->buffer->start);
new = old + a;
@@ -65,7 +63,7 @@ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
new -= prz->buffer_size;
atomic_set(&prz->buffer->start, new);
- raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
return old;
}
@@ -77,7 +75,7 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
size_t new;
unsigned long flags;
- raw_spin_lock_irqsave(&buffer_lock, flags);
+ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
old = atomic_read(&prz->buffer->size);
if (old == prz->buffer_size)
@@ -89,7 +87,7 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
atomic_set(&prz->buffer->size, new);
exit:
- raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
}
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
@@ -493,6 +491,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
prz->buffer->sig = sig;
persistent_ram_zap(prz);
+ prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
return 0;
}
OpenPOWER on IntegriCloud