diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/drbd/drbd_bitmap.c | 2 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_receiver.c | 3 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 2 | ||||
-rw-r--r-- | drivers/block/nbd.c | 15 | ||||
-rw-r--r-- | drivers/block/osdblk.c | 2 | ||||
-rw-r--r-- | drivers/block/paride/pd.c | 2 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 4 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.c | 28 |
8 files changed, 26 insertions, 32 deletions
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index d3d73d1..9462d27 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1007,7 +1007,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho bm_set_page_unchanged(b->bm_pages[page_nr]); if (ctx->flags & BM_AIO_COPY_PAGES) { - page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT); + page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM); copy_highpage(page, b->bm_pages[page_nr]); bm_store_page_idx(page, page_nr); } else diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index c097909..b4b5680 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -357,7 +357,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto } if (has_payload && data_size) { - page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT)); + page = drbd_alloc_pages(peer_device, nr_pages, + gfpflags_allow_blocking(gfp_mask)); if (!page) goto fail; } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index f504232..a28a562 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -173,7 +173,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) { struct request *rq; - rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true); + rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true); return blk_mq_rq_to_pdu(rq); } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 1b87623..93b3f99 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -444,9 +444,7 @@ static int nbd_thread_recv(struct nbd_device *nbd) spin_unlock_irqrestore(&nbd->tasks_lock, flags); if (signal_pending(current)) { - siginfo_t info; - - ret = dequeue_signal_lock(current, ¤t->blocked, &info); + ret = kernel_dequeue_signal(NULL); dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n", task_pid_nr(current), current->comm, ret); mutex_lock(&nbd->tx_lock); @@ -560,11 +558,8 @@ static int nbd_thread_send(void *data) !list_empty(&nbd->waiting_queue)); if (signal_pending(current)) { - siginfo_t info; - int ret; + int ret = kernel_dequeue_signal(NULL); - ret = dequeue_signal_lock(current, ¤t->blocked, - &info); dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n", task_pid_nr(current), current->comm, ret); mutex_lock(&nbd->tx_lock); @@ -592,10 +587,8 @@ static int nbd_thread_send(void *data) spin_unlock_irqrestore(&nbd->tasks_lock, flags); /* Clear maybe pending signals */ - if (signal_pending(current)) { - siginfo_t info; - dequeue_signal_lock(current, ¤t->blocked, &info); - } + if (signal_pending(current)) + kernel_dequeue_signal(NULL); return 0; } diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index e229425..1b709a4 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c @@ -271,7 +271,7 @@ static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask) goto err_out; tmp->bi_bdev = NULL; - gfpmask &= ~__GFP_WAIT; + gfpmask &= ~__GFP_DIRECT_RECLAIM; tmp->bi_next = NULL; if (!new_chain) diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index b9242d7..562b5a4 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -723,7 +723,7 @@ static int pd_special_command(struct pd_unit *disk, struct request *rq; int err = 0; - rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT); + rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM); if (IS_ERR(rq)) return PTR_ERR(rq); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index cd813f9..2f477d4 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -704,14 +704,14 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * int ret = 0; rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? - WRITE : READ, __GFP_WAIT); + WRITE : READ, __GFP_RECLAIM); if (IS_ERR(rq)) return PTR_ERR(rq); blk_rq_set_block_pc(rq); if (cgc->buflen) { ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, - __GFP_WAIT); + __GFP_RECLAIM); if (ret) goto out; } diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 9fa15bb..81a557c 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -106,7 +106,7 @@ static void zram_set_obj_size(struct zram_meta *meta, meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; } -static inline int is_partial_io(struct bio_vec *bvec) +static inline bool is_partial_io(struct bio_vec *bvec) { return bvec->bv_len != PAGE_SIZE; } @@ -114,25 +114,25 @@ static inline int is_partial_io(struct bio_vec *bvec) /* * Check if request is within bounds and aligned on zram logical blocks. */ -static inline int valid_io_request(struct zram *zram, +static inline bool valid_io_request(struct zram *zram, sector_t start, unsigned int size) { u64 end, bound; /* unaligned request */ if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) - return 0; + return false; if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) - return 0; + return false; end = start + (size >> SECTOR_SHIFT); bound = zram->disksize >> SECTOR_SHIFT; /* out of range range */ if (unlikely(start >= bound || end > bound || start > end)) - return 0; + return false; /* I/O request is valid */ - return 1; + return true; } static void update_position(u32 *index, int *offset, struct bio_vec *bvec) @@ -157,7 +157,7 @@ static inline void update_used_max(struct zram *zram, } while (old_max != cur_max); } -static int page_zero_filled(void *ptr) +static bool page_zero_filled(void *ptr) { unsigned int pos; unsigned long *page; @@ -166,10 +166,10 @@ static int page_zero_filled(void *ptr) for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { if (page[pos]) - return 0; + return false; } - return 1; + return true; } static void handle_zero_page(struct bio_vec *bvec) @@ -365,6 +365,9 @@ static ssize_t comp_algorithm_store(struct device *dev, struct zram *zram = dev_to_zram(dev); size_t sz; + if (!zcomp_available_algorithm(buf)) + return -EINVAL; + down_write(&zram->init_lock); if (init_done(zram)) { up_write(&zram->init_lock); @@ -378,9 +381,6 @@ static ssize_t comp_algorithm_store(struct device *dev, if (sz > 0 && zram->compressor[sz - 1] == '\n') zram->compressor[sz - 1] = 0x00; - if (!zcomp_available_algorithm(zram->compressor)) - len = -EINVAL; - up_write(&zram->init_lock); return len; } @@ -726,14 +726,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, } alloced_pages = zs_get_total_pages(meta->mem_pool); + update_used_max(zram, alloced_pages); + if (zram->limit_pages && alloced_pages > zram->limit_pages) { zs_free(meta->mem_pool, handle); ret = -ENOMEM; goto out; } - update_used_max(zram, alloced_pages); - cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { |