diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/bfq-iosched.c | 12 | ||||
-rw-r--r-- | block/bio.c | 4 | ||||
-rw-r--r-- | block/blk-mq-sched.h | 2 | ||||
-rw-r--r-- | block/blk-mq.c | 18 | ||||
-rw-r--r-- | block/blk-settings.c | 46 | ||||
-rw-r--r-- | block/partitions/core.c | 2 | ||||
-rw-r--r-- | block/partitions/ibm.c | 7 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 13 |
8 files changed, 66 insertions, 38 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index a251b80d8ee5..9e81d1052091 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -5900,18 +5900,6 @@ static void bfq_finish_requeue_request(struct request *rq) struct bfq_data *bfqd; /* - * Requeue and finish hooks are invoked in blk-mq without - * checking whether the involved request is actually still - * referenced in the scheduler. To handle this fact, the - * following two checks make this function exit in case of - * spurious invocations, for which there is nothing to do. - * - * First, check whether rq has nothing to do with an elevator. - */ - if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) - return; - - /* * rq either is not associated with any icq, or is an already * requeued request that has not (yet) been re-inserted into * a bfq_queue. diff --git a/block/bio.c b/block/bio.c index b42e046b12eb..640d0fb74a8b 100644 --- a/block/bio.c +++ b/block/bio.c @@ -877,8 +877,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { - if (bio->bi_iter.bi_size > UINT_MAX - len) + if (bio->bi_iter.bi_size > UINT_MAX - len) { + *same_page = false; return false; + } bv->bv_len += len; bio->bi_iter.bi_size += len; return true; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index fe62e7ce0f9e..0476360f05f1 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -63,7 +63,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq) struct request_queue *q = rq->q; struct elevator_queue *e = q->elevator; - if (e && e->type->ops.requeue_request) + if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) e->type->ops.requeue_request(rq); } diff --git a/block/blk-mq.c b/block/blk-mq.c index e11ee801ba19..deca157032c2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1413,6 +1413,11 @@ out: hctx->dispatched[queued_to_index(queued)]++; + /* If we didn't flush the entire list, we could have told the driver + * there was more coming, but that turned out to be a lie. + */ + if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued) + q->mq_ops->commit_rqs(hctx); /* * Any items that need requeuing? Stuff them into hctx->dispatch, * that is where we will continue on next queue run. @@ -1426,14 +1431,6 @@ out: blk_mq_release_budgets(q, nr_budgets); - /* - * If we didn't flush the entire list, we could have told - * the driver there was more coming, but that turned out to - * be a lie. - */ - if (q->mq_ops->commit_rqs && queued) - q->mq_ops->commit_rqs(hctx); - spin_lock(&hctx->lock); list_splice_tail_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); @@ -2085,6 +2082,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list) { int queued = 0; + int errors = 0; while (!list_empty(list)) { blk_status_t ret; @@ -2101,6 +2099,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, break; } blk_mq_end_request(rq, ret); + errors++; } else queued++; } @@ -2110,7 +2109,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, * the driver there was more coming, but that turned out to * be a lie. */ - if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs && queued) + if ((!list_empty(list) || errors) && + hctx->queue->mq_ops->commit_rqs && queued) hctx->queue->mq_ops->commit_rqs(hctx); } diff --git a/block/blk-settings.c b/block/blk-settings.c index 4f6eb4bb1723..9741d1d83e98 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -817,6 +817,52 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q, } EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); +/** + * blk_queue_set_zoned - configure a disk queue zoned model. + * @disk: the gendisk of the queue to configure + * @model: the zoned model to set + * + * Set the zoned model of the request queue of @disk according to @model. + * When @model is BLK_ZONED_HM (host managed), this should be called only + * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option). + * If @model specifies BLK_ZONED_HA (host aware), the effective model used + * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions + * on the disk. + */ +void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) +{ + switch (model) { + case BLK_ZONED_HM: + /* + * Host managed devices are supported only if + * CONFIG_BLK_DEV_ZONED is enabled. + */ + WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)); + break; + case BLK_ZONED_HA: + /* + * Host aware devices can be treated either as regular block + * devices (similar to drive managed devices) or as zoned block + * devices to take advantage of the zone command set, similarly + * to host managed devices. We try the latter if there are no + * partitions and zoned block device support is enabled, else + * we do nothing special as far as the block layer is concerned. + */ + if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || + disk_has_partitions(disk)) + model = BLK_ZONED_NONE; + break; + case BLK_ZONED_NONE: + default: + if (WARN_ON_ONCE(model != BLK_ZONED_NONE)) + model = BLK_ZONED_NONE; + break; + } + + disk->queue->limits.zoned = model; +} +EXPORT_SYMBOL_GPL(blk_queue_set_zoned); + static int __init blk_settings_init(void) { blk_max_low_pfn = max_low_pfn - 1; diff --git a/block/partitions/core.c b/block/partitions/core.c index 5309e0f44ba3..a02e22411594 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -540,7 +540,7 @@ int bdev_del_partition(struct block_device *bdev, int partno) bdevp = bdget_disk(bdev->bd_disk, partno); if (!bdevp) - return -ENOMEM; + return -ENXIO; mutex_lock(&bdevp->bd_mutex); mutex_lock_nested(&bdev->bd_mutex, 1); diff --git a/block/partitions/ibm.c b/block/partitions/ibm.c index d6e18df9c53c..4b044e620d35 100644 --- a/block/partitions/ibm.c +++ b/block/partitions/ibm.c @@ -305,8 +305,6 @@ int ibm_partition(struct parsed_partitions *state) if (!disk->fops->getgeo) goto out_exit; fn = symbol_get(dasd_biodasdinfo); - if (!fn) - goto out_exit; blocksize = bdev_logical_block_size(bdev); if (blocksize <= 0) goto out_symbol; @@ -326,7 +324,7 @@ int ibm_partition(struct parsed_partitions *state) geo->start = get_start_sect(bdev); if (disk->fops->getgeo(bdev, geo)) goto out_freeall; - if (fn(disk, info)) { + if (!fn || fn(disk, info)) { kfree(info); info = NULL; } @@ -370,7 +368,8 @@ out_nolab: out_nogeo: kfree(info); out_symbol: - symbol_put(dasd_biodasdinfo); + if (fn) + symbol_put(dasd_biodasdinfo); out_exit: return res; } diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index d4abd1ed5a2d..198bb33481fb 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -331,16 +331,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, struct iov_iter i; struct iovec *iov = NULL; -#ifdef CONFIG_COMPAT - if (in_compat_syscall()) - ret = compat_import_iovec(rq_data_dir(rq), - hdr->dxferp, hdr->iovec_count, - 0, &iov, &i); - else -#endif - ret = import_iovec(rq_data_dir(rq), - hdr->dxferp, hdr->iovec_count, - 0, &iov, &i); + ret = import_iovec(rq_data_dir(rq), hdr->dxferp, + hdr->iovec_count, 0, &iov, &i); if (ret < 0) goto out_free_cdb; @@ -649,6 +641,7 @@ struct compat_cdrom_generic_command { compat_int_t stat; compat_caddr_t sense; unsigned char data_direction; + unsigned char pad[3]; compat_int_t quiet; compat_int_t timeout; compat_caddr_t reserved[1]; |