summaryrefslogtreecommitdiffstats
path: root/lib/sbitmap.c
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2016-09-17 01:28:22 -0700
committerJens Axboe <axboe@fb.com>2016-09-17 08:39:08 -0600
commit48e28166a7b608e19a6aea3acadd81cdfe660f6b (patch)
treeece76660b963252e9371c8aea0a4d04f8b69c42f /lib/sbitmap.c
parent88459642cba452630326b9cab1c651e09577d4e4 (diff)
downloadop-kernel-dev-48e28166a7b608e19a6aea3acadd81cdfe660f6b.zip
op-kernel-dev-48e28166a7b608e19a6aea3acadd81cdfe660f6b.tar.gz
sbitmap: allocate wait queues on a specific node
The original bt_alloc() we converted from was using kzalloc(), not kzalloc_node(), to allocate the wait queues. This was probably an oversight, so fix it for sbitmap_queue_init_node(). Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'lib/sbitmap.c')
-rw-r--r--lib/sbitmap.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index dfc084a..4d8e97e 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -208,7 +208,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
sbq->wake_batch = sbq_calc_wake_batch(depth);
atomic_set(&sbq->wake_index, 0);
- sbq->ws = kzalloc(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags);
+ sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
sbitmap_free(&sbq->sb);
return -ENOMEM;
OpenPOWER on IntegriCloud