From e2e1a148bc45855816ae6b4692ce29d0020fa22e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 9 Jun 2010 10:42:09 +0200 Subject: block: add sysfs knob for turning off disk entropy contributions There are two reasons for doing this: - On SSD disks, the completion times aren't as random as they are for rotational drives. So it's questionable whether they should contribute to the random pool in the first place. - Calling add_disk_randomness() has a lot of overhead. This adds /sys/block//queue/add_random that will allow you to switch off on a per-device basis. The default setting is on, so there should be no functional changes from this patch. Signed-off-by: Jens Axboe --- block/blk-core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index f0640d7f800f..b4131d29148c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2111,7 +2111,8 @@ static bool blk_update_bidi_request(struct request *rq, int error, blk_update_request(rq->next_rq, error, bidi_bytes)) return true; - add_disk_randomness(rq->rq_disk); + if (blk_queue_add_random(rq->q)) + add_disk_randomness(rq->rq_disk); return false; } -- cgit v1.2.3 From 33659ebbae262228eef4e0fe990f393d1f0ed941 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 7 Aug 2010 18:17:56 +0200 Subject: block: remove wrappers for request type/flags Remove all the trivial wrappers for the cmd_type and cmd_flags fields in struct requests. This allows much easier grepping for different request types instead of unwinding through macros. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-barrier.c | 7 ++-- block/blk-core.c | 13 ++++---- block/blk-exec.c | 2 +- block/blk-merge.c | 4 +-- block/blk.h | 6 ++-- block/cfq-iosched.c | 19 ++++++----- block/elevator.c | 16 +++++---- drivers/ata/libata-scsi.c | 2 +- drivers/block/cciss.c | 49 ++++++++++++++++----------- drivers/block/hd.c | 2 +- drivers/block/mg_disk.c | 4 +-- drivers/block/nbd.c | 2 +- drivers/block/osdblk.c | 3 +- drivers/block/paride/pd.c | 2 +- drivers/block/ps3disk.c | 2 +- drivers/block/ub.c | 8 ++--- drivers/block/viodasd.c | 2 +- drivers/block/virtio_blk.c | 15 +++++---- drivers/block/xd.c | 2 +- drivers/block/xen-blkfront.c | 4 +-- drivers/block/xsysace.c | 2 +- drivers/cdrom/gdrom.c | 2 +- drivers/cdrom/viocd.c | 2 +- drivers/ide/ide-atapi.c | 17 ++++++---- drivers/ide/ide-cd.c | 66 ++++++++++++++++++++----------------- drivers/ide/ide-disk.c | 2 +- drivers/ide/ide-eh.c | 5 +-- drivers/ide/ide-floppy.c | 25 +++++++++----- drivers/ide/ide-io.c | 8 ++--- drivers/ide/ide-pm.c | 8 ++--- drivers/ide/ide-tape.c | 3 +- drivers/md/dm.c | 10 +++--- drivers/memstick/core/mspro_block.c | 3 +- drivers/message/i2o/i2o_block.c | 2 +- drivers/mmc/card/queue.c | 2 +- drivers/mtd/mtd_blkdevs.c | 4 +-- drivers/scsi/scsi_error.c | 10 +++--- drivers/scsi/scsi_lib.c | 5 +-- drivers/scsi/sd.c | 12 +++---- drivers/scsi/sun3_NCR5380.c | 2 +- drivers/scsi/sun3_scsi.c | 2 +- drivers/scsi/sun3_scsi_vme.c | 2 +- drivers/staging/hv/blkvsc_drv.c | 8 +++-- include/linux/blkdev.h | 41 ++++++++--------------- include/linux/blktrace_api.h | 2 +- include/trace/events/block.h | 15 ++++++--- kernel/trace/blktrace.c | 10 +++--- 47 files changed, 236 insertions(+), 198 deletions(-) (limited to 'block/blk-core.c') diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 0d710c9d403b..74e404393172 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -79,7 +79,7 @@ unsigned blk_ordered_req_seq(struct request *rq) * * http://thread.gmane.org/gmane.linux.kernel/537473 */ - if (!blk_fs_request(rq)) + if (rq->cmd_type != REQ_TYPE_FS) return QUEUE_ORDSEQ_DRAIN; if ((rq->cmd_flags & REQ_ORDERED_COLOR) == @@ -236,7 +236,8 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) bool blk_do_ordered(struct request_queue *q, struct request **rqp) { struct request *rq = *rqp; - const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); + const int is_barrier = rq->cmd_type == REQ_TYPE_FS && + (rq->cmd_flags & REQ_HARDBARRIER); if (!q->ordseq) { if (!is_barrier) @@ -261,7 +262,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp) */ /* Special requests are not subject to ordering rules. */ - if (!blk_fs_request(rq) && + if (rq->cmd_type != REQ_TYPE_FS && rq != &q->pre_flush_rq && rq != &q->post_flush_rq) return true; diff --git a/block/blk-core.c b/block/blk-core.c index b4131d29148c..dca43a31e725 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -184,7 +184,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg) printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); - if (blk_pc_request(rq)) { + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { printk(KERN_INFO " cdb: "); for (bit = 0; bit < BLK_MAX_CDB; bit++) printk("%02x ", rq->cmd[bit]); @@ -1796,7 +1796,7 @@ struct request *blk_peek_request(struct request_queue *q) * sees this request (possibly after * requeueing). Notify IO scheduler. */ - if (blk_sorted_rq(rq)) + if (rq->cmd_flags & REQ_SORTED) elv_activate_rq(q, rq); /* @@ -1984,10 +1984,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) * TODO: tj: This is too subtle. It would be better to let * low level drivers do what they see fit. */ - if (blk_fs_request(req)) + if (req->cmd_type == REQ_TYPE_FS) req->errors = 0; - if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { + if (error && req->cmd_type == REQ_TYPE_FS && + !(req->cmd_flags & REQ_QUIET)) { printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", req->rq_disk ? req->rq_disk->disk_name : "?", (unsigned long long)blk_rq_pos(req)); @@ -2074,7 +2075,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) req->buffer = bio_data(req->bio); /* update sector only for requests with clear definition of sector */ - if (blk_fs_request(req) || blk_discard_rq(req)) + if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) req->__sector += total_bytes >> 9; /* mixed attributes always follow the first bio */ @@ -2127,7 +2128,7 @@ static void blk_finish_request(struct request *req, int error) BUG_ON(blk_queued_rq(req)); - if (unlikely(laptop_mode) && blk_fs_request(req)) + if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) laptop_io_completion(&req->q->backing_dev_info); blk_delete_timer(req); diff --git a/block/blk-exec.c b/block/blk-exec.c index 49557e91f0da..e1672f14840e 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -57,7 +57,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, __elv_add_request(q, rq, where, 1); __generic_unplug_device(q); /* the queue is stopped so it won't be plugged+unplugged */ - if (blk_pm_resume_request(rq)) + if (rq->cmd_type == REQ_TYPE_PM_RESUME) q->request_fn(q); spin_unlock_irq(q->queue_lock); } diff --git a/block/blk-merge.c b/block/blk-merge.c index 5e7dc9973458..87e4fb7d0e98 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -226,7 +226,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, { unsigned short max_sectors; - if (unlikely(blk_pc_request(req))) + if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) max_sectors = queue_max_hw_sectors(q); else max_sectors = queue_max_sectors(q); @@ -250,7 +250,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, { unsigned short max_sectors; - if (unlikely(blk_pc_request(req))) + if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) max_sectors = queue_max_hw_sectors(q); else max_sectors = queue_max_sectors(q); diff --git a/block/blk.h b/block/blk.h index 5ee3d7e72feb..6e7dc87141e4 100644 --- a/block/blk.h +++ b/block/blk.h @@ -161,8 +161,10 @@ static inline int blk_cpu_to_group(int cpu) */ static inline int blk_do_io_stat(struct request *rq) { - return rq->rq_disk && blk_rq_io_stat(rq) && - (blk_fs_request(rq) || blk_discard_rq(rq)); + return rq->rq_disk && + (rq->cmd_flags & REQ_IO_STAT) && + (rq->cmd_type == REQ_TYPE_FS || + (rq->cmd_flags & REQ_DISCARD)); } #endif diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 7982b830db58..d4edeb8fceb8 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -646,9 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, return rq1; else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) return rq2; - if (rq_is_meta(rq1) && !rq_is_meta(rq2)) + if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META)) return rq1; - else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) + else if ((rq2->cmd_flags & REQ_RW_META) && + !(rq1->cmd_flags & REQ_RW_META)) return rq2; s1 = blk_rq_pos(rq1); @@ -1484,7 +1485,7 @@ static void cfq_remove_request(struct request *rq) cfqq->cfqd->rq_queued--; cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), rq_is_sync(rq)); - if (rq_is_meta(rq)) { + if (rq->cmd_flags & REQ_RW_META) { WARN_ON(!cfqq->meta_pending); cfqq->meta_pending--; } @@ -3176,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, * So both queues are sync. Let the new request get disk time if * it's a metadata request and the current queue is doing regular IO. */ - if (rq_is_meta(rq) && !cfqq->meta_pending) + if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending) return true; /* @@ -3230,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_io_context *cic = RQ_CIC(rq); cfqd->rq_queued++; - if (rq_is_meta(rq)) + if (rq->cmd_flags & REQ_RW_META) cfqq->meta_pending++; cfq_update_io_thinktime(cfqd, cic); @@ -3365,7 +3366,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) unsigned long now; now = jiffies; - cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq)); + cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", + !!(rq->cmd_flags & REQ_NOIDLE)); cfq_update_hw_tag(cfqd); @@ -3419,11 +3421,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) cfq_slice_expired(cfqd, 1); else if (sync && cfqq_empty && !cfq_close_cooperator(cfqd, cfqq)) { - cfqd->noidle_tree_requires_idle |= !rq_noidle(rq); + cfqd->noidle_tree_requires_idle |= + !(rq->cmd_flags & REQ_NOIDLE); /* * Idling is enabled for SYNC_WORKLOAD. * SYNC_NOIDLE_WORKLOAD idles at the end of the tree - * only if we processed at least one !rq_noidle request + * only if we processed at least one !REQ_NOIDLE request */ if (cfqd->serving_type == SYNC_WORKLOAD || cfqd->noidle_tree_requires_idle diff --git a/block/elevator.c b/block/elevator.c index 923a9139106c..aa99b59c03d6 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -428,7 +428,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); - if (blk_discard_rq(rq) != blk_discard_rq(pos)) + if ((rq->cmd_flags & REQ_DISCARD) != + (pos->cmd_flags & REQ_DISCARD)) break; if (rq_data_dir(rq) != rq_data_dir(pos)) break; @@ -558,7 +559,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) */ if (blk_account_rq(rq)) { q->in_flight[rq_is_sync(rq)]--; - if (blk_sorted_rq(rq)) + if (rq->cmd_flags & REQ_SORTED) elv_deactivate_rq(q, rq); } @@ -644,7 +645,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) break; case ELEVATOR_INSERT_SORT: - BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq)); + BUG_ON(rq->cmd_type != REQ_TYPE_FS && + !(rq->cmd_flags & REQ_DISCARD)); rq->cmd_flags |= REQ_SORTED; q->nr_sorted++; if (rq_mergeable(rq)) { @@ -716,7 +718,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, /* * toggle ordered color */ - if (blk_barrier_rq(rq)) + if (rq->cmd_flags & REQ_HARDBARRIER) q->ordcolor ^= 1; /* @@ -729,7 +731,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, * this request is scheduling boundary, update * end_sector */ - if (blk_fs_request(rq) || blk_discard_rq(rq)) { + if (rq->cmd_type == REQ_TYPE_FS || + (rq->cmd_flags & REQ_DISCARD)) { q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; } @@ -843,7 +846,8 @@ void elv_completed_request(struct request_queue *q, struct request *rq) */ if (blk_account_rq(rq)) { q->in_flight[rq_is_sync(rq)]--; - if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) + if ((rq->cmd_flags & REQ_SORTED) && + e->ops->elevator_completed_req_fn) e->ops->elevator_completed_req_fn(q, rq); } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a54273d2c3c6..a5c08b082edb 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1111,7 +1111,7 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) */ static int atapi_drain_needed(struct request *rq) { - if (likely(!blk_pc_request(rq))) + if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) return 0; if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW)) diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 10a0268a1f92..11b377762b8e 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1783,7 +1783,7 @@ static void cciss_softirq_done(struct request *rq) #endif /* CCISS_DEBUG */ /* set the residual count for pc requests */ - if (blk_pc_request(rq)) + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) rq->resid_len = cmd->err_info->ResidualCnt; blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); @@ -2983,7 +2983,7 @@ static inline int evaluate_target_status(ctlr_info_t *h, driver_byte = DRIVER_OK; msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ - if (blk_pc_request(cmd->rq)) + if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) host_byte = DID_PASSTHROUGH; else host_byte = DID_OK; @@ -2992,7 +2992,7 @@ static inline int evaluate_target_status(ctlr_info_t *h, host_byte, driver_byte); if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { - if (!blk_pc_request(cmd->rq)) + if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) printk(KERN_WARNING "cciss: cmd %p " "has SCSI Status 0x%x\n", cmd, cmd->err_info->ScsiStatus); @@ -3002,15 +3002,17 @@ static inline int evaluate_target_status(ctlr_info_t *h, /* check the sense key */ sense_key = 0xf & cmd->err_info->SenseInfo[2]; /* no status or recovered error */ - if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq)) + if (((sense_key == 0x0) || (sense_key == 0x1)) && + (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) error_value = 0; if (check_for_unit_attention(h, cmd)) { - *retry_cmd = !blk_pc_request(cmd->rq); + *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); return 0; } - if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */ + /* Not SG_IO or similar? */ + if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { if (error_value != 0) printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION" " sense key = 0x%x\n", cmd, sense_key); @@ -3052,7 +3054,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, rq->errors = evaluate_target_status(h, cmd, &retry_cmd); break; case CMD_DATA_UNDERRUN: - if (blk_fs_request(cmd->rq)) { + if (cmd->rq->cmd_type == REQ_TYPE_FS) { printk(KERN_WARNING "cciss: cmd %p has" " completed with data underrun " "reported\n", cmd); @@ -3060,7 +3062,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, } break; case CMD_DATA_OVERRUN: - if (blk_fs_request(cmd->rq)) + if (cmd->rq->cmd_type == REQ_TYPE_FS) printk(KERN_WARNING "cciss: cmd %p has" " completed with data overrun " "reported\n", cmd); @@ -3070,42 +3072,48 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, "reported invalid\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + DID_PASSTHROUGH : DID_ERROR); break; case CMD_PROTOCOL_ERR: printk(KERN_WARNING "cciss: cmd %p has " "protocol error \n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + DID_PASSTHROUGH : DID_ERROR); break; case CMD_HARDWARE_ERR: printk(KERN_WARNING "cciss: cmd %p had " " hardware error\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + DID_PASSTHROUGH : DID_ERROR); break; case CMD_CONNECTION_LOST: printk(KERN_WARNING "cciss: cmd %p had " "connection lost\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + DID_PASSTHROUGH : DID_ERROR); break; case CMD_ABORTED: printk(KERN_WARNING "cciss: cmd %p was " "aborted\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + DID_PASSTHROUGH : DID_ABORT); break; case CMD_ABORT_FAILED: printk(KERN_WARNING "cciss: cmd %p reports " "abort failed\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + DID_PASSTHROUGH : DID_ERROR); break; case CMD_UNSOLICITED_ABORT: printk(KERN_WARNING "cciss%d: unsolicited " @@ -3121,13 +3129,15 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, "many times\n", h->ctlr, cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + DID_PASSTHROUGH : DID_ABORT); break; case CMD_TIMEOUT: printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + DID_PASSTHROUGH : DID_ERROR); break; default: printk(KERN_WARNING "cciss: cmd %p returned " @@ -3135,7 +3145,8 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, cmd->err_info->CommandStatus); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + DID_PASSTHROUGH : DID_ERROR); } after_error_processing: @@ -3294,7 +3305,7 @@ static void do_cciss_request(struct request_queue *q) c->Header.SGList = h->max_cmd_sgentries; set_performant_mode(h, c); - if (likely(blk_fs_request(creq))) { + if (likely(creq->cmd_type == REQ_TYPE_FS)) { if(h->cciss_read == CCISS_READ_10) { c->Request.CDB[1] = 0; c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ @@ -3324,7 +3335,7 @@ static void do_cciss_request(struct request_queue *q) c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; c->Request.CDB[14] = c->Request.CDB[15] = 0; } - } else if (blk_pc_request(creq)) { + } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { c->Request.CDBLen = creq->cmd_len; memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); } else { diff --git a/drivers/block/hd.c b/drivers/block/hd.c index 81c78b3ce2df..30ec6b37424e 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -627,7 +627,7 @@ repeat: req_data_dir(req) == READ ? "read" : "writ", cyl, head, sec, nsect, req->buffer); #endif - if (blk_fs_request(req)) { + if (req->cmd_type == REQ_TYPE_FS) { switch (rq_data_dir(req)) { case READ: hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ, diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 28db925dbdad..b82c5ce5e9df 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -670,7 +670,7 @@ static void mg_request_poll(struct request_queue *q) break; } - if (unlikely(!blk_fs_request(host->req))) { + if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { mg_end_request_cur(host, -EIO); continue; } @@ -756,7 +756,7 @@ static void mg_request(struct request_queue *q) continue; } - if (unlikely(!blk_fs_request(req))) { + if (unlikely(req->cmd_type != REQ_TYPE_FS)) { mg_end_request_cur(host, -EIO); continue; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 218d091f3c52..2e74e7d475ca 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -448,7 +448,7 @@ static void nbd_clear_que(struct nbd_device *lo) static void nbd_handle_req(struct nbd_device *lo, struct request *req) { - if (!blk_fs_request(req)) + if (req->cmd_type != REQ_TYPE_FS) goto error_out; nbd_cmd(req) = NBD_CMD_READ; diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index 6cd8b705b11b..819002ba3433 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c @@ -310,7 +310,8 @@ static void osdblk_rq_fn(struct request_queue *q) break; /* filter out block requests we don't understand */ - if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) { + if (rq->cmd_type != REQ_TYPE_FS && + !(rq->cmd_flags & REQ_HARDBARRIER)) { blk_end_request_all(rq, 0); continue; } diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index c1e5cd029b23..4e8b9bff3abe 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -439,7 +439,7 @@ static char *pd_buf; /* buffer for request in progress */ static enum action do_pd_io_start(void) { - if (blk_special_request(pd_req)) { + if (pd_req->cmd_type == REQ_TYPE_SPECIAL) { phase = pd_special; return pd_special(); } diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 3b419e3fffa1..5f208c0bf156 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -196,7 +196,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); while ((req = blk_fetch_request(q))) { - if (blk_fs_request(req)) { + if (req->cmd_type == REQ_TYPE_FS) { if (ps3disk_submit_request_sg(dev, req)) break; } else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 0536b5b29adc..034b34440ffa 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -648,7 +648,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) return 0; } - if (lun->changed && !blk_pc_request(rq)) { + if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) blk_start_request(rq); ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); return 0; @@ -684,7 +684,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) } urq->nsg = n_elem; - if (blk_pc_request(rq)) { + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { ub_cmd_build_packet(sc, lun, cmd, urq); } else { ub_cmd_build_block(sc, lun, cmd, urq); @@ -781,7 +781,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) rq = urq->rq; if (cmd->error == 0) { - if (blk_pc_request(rq)) { + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { if (cmd->act_len >= rq->resid_len) rq->resid_len = 0; else @@ -795,7 +795,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) } } } else { - if (blk_pc_request(rq)) { + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); rq->sense_len = UB_SENSE_SIZE; diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index 788d93882ab9..5663d3c284c8 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c @@ -361,7 +361,7 @@ static void do_viodasd_request(struct request_queue *q) if (req == NULL) return; /* check that request contains a valid command */ - if (!blk_fs_request(req)) { + if (req->cmd_type != REQ_TYPE_FS) { viodasd_end_request(req, -EIO, blk_rq_sectors(req)); continue; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 258bc2ae2885..774144334ece 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -65,13 +65,16 @@ static void blk_done(struct virtqueue *vq) break; } - if (blk_pc_request(vbr->req)) { + switch (vbr->req->cmd_type) { + case REQ_TYPE_BLOCK_PC: vbr->req->resid_len = vbr->in_hdr.residual; vbr->req->sense_len = vbr->in_hdr.sense_len; vbr->req->errors = vbr->in_hdr.errors; - } - if (blk_special_request(vbr->req)) + break; + case REQ_TYPE_SPECIAL: vbr->req->errors = (error != 0); + break; + } __blk_end_request_all(vbr->req, error); list_del(&vbr->list); @@ -123,7 +126,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, BUG(); } - if (blk_barrier_rq(vbr->req)) + if (vbr->req->cmd_flags & REQ_HARDBARRIER) vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); @@ -134,12 +137,12 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, * block, and before the normal inhdr we put the sense data and the * inhdr with additional status information before the normal inhdr. */ - if (blk_pc_request(vbr->req)) + if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); - if (blk_pc_request(vbr->req)) { + if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) { sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, sizeof(vbr->in_hdr)); diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 18a80ff57ce8..4dc298376098 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -322,7 +322,7 @@ static void do_xd_request (struct request_queue * q) int res = -EIO; int retry; - if (!blk_fs_request(req)) + if (req->cmd_type != REQ_TYPE_FS) { goto done; if (block + count > get_capacity(req->rq_disk)) goto done; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 82ed403147c0..495533e66542 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -238,7 +238,7 @@ static int blkif_queue_request(struct request *req) ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; - if (blk_barrier_rq(req)) + if (req->cmd_flags & REQ_HARDBARRIER) ring_req->operation = BLKIF_OP_WRITE_BARRIER; ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); @@ -309,7 +309,7 @@ static void do_blkif_request(struct request_queue *rq) blk_start_request(req); - if (!blk_fs_request(req)) { + if (req->cmd_type != REQ_TYPE_FS) { __blk_end_request_all(req, -EIO); continue; } diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index a7b83c0a7eb5..ac278ac908d5 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -465,7 +465,7 @@ struct request *ace_get_next_request(struct request_queue * q) struct request *req; while ((req = blk_peek_request(q)) != NULL) { - if (blk_fs_request(req)) + if (req->cmd_type == REQ_TYPE_FS) break; blk_start_request(req); __blk_end_request_all(req, -EIO); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 03c71f7698cb..7c05ddc63ae8 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -643,7 +643,7 @@ static void gdrom_request(struct request_queue *rq) struct request *req; while ((req = blk_fetch_request(rq)) != NULL) { - if (!blk_fs_request(req)) { + if (req->cmd_type != REQ_TYPE_FS) { printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); __blk_end_request_all(req, -EIO); continue; diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 451cd7071b1d..14e420168764 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -298,7 +298,7 @@ static void do_viocd_request(struct request_queue *q) struct request *req; while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) { - if (!blk_fs_request(req)) + if (req->cmd_type != REQ_TYPE_FS) __blk_end_request_all(req, -EIO); else if (send_request(req) < 0) { printk(VIOCD_KERN_WARNING diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index f9daffd7d0e3..3117a894d20e 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -190,7 +190,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq) BUG_ON(sense_len > sizeof(*sense)); - if (blk_sense_request(rq) || drive->sense_rq_armed) + if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed) return; memset(sense, 0, sizeof(*sense)); @@ -307,13 +307,16 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry); int ide_cd_get_xferlen(struct request *rq) { - if (blk_fs_request(rq)) + switch (rq->cmd_type) + case REQ_TYPE_FS: return 32768; - else if (blk_sense_request(rq) || blk_pc_request(rq) || - rq->cmd_type == REQ_TYPE_ATA_PC) + case REQ_TYPE_SENSE: + case REQ_TYPE_BLOCK_PC: + case REQ_TYPE_ATA_PC: return blk_rq_bytes(rq); - else + default: return 0; + } } EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); @@ -474,12 +477,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) if (uptodate == 0) drive->failed_pc = NULL; - if (blk_special_request(rq)) { + if (rq->cmd_type == REQ_TYPE_SPECIAL) rq->errors = 0; error = 0; } else { - if (blk_fs_request(rq) == 0 && uptodate <= 0) { + if (req->cmd_type != REQ_TYPE_FS && uptodate <= 0) { if (rq->errors == 0) rq->errors = -EIO; } diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 64207df8da82..26a3688de467 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -176,7 +176,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive, if (!sense->valid) break; if (failed_command == NULL || - !blk_fs_request(failed_command)) + failed_command->cmd_type != REQ_TYPE_FS) break; sector = (sense->information[0] << 24) | (sense->information[1] << 16) | @@ -292,7 +292,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) "stat 0x%x", rq->cmd[0], rq->cmd_type, err, stat); - if (blk_sense_request(rq)) { + if (rq->cmd_type == REQ_TYPE_SENSE) { /* * We got an error trying to get sense info from the drive * (probably while trying to recover from a former error). @@ -303,7 +303,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) } /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ - if (blk_pc_request(rq) && !rq->errors) + if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors) rq->errors = SAM_STAT_CHECK_CONDITION; if (blk_noretry_request(rq)) @@ -311,13 +311,14 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) switch (sense_key) { case NOT_READY: - if (blk_fs_request(rq) && rq_data_dir(rq) == WRITE) { + if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) { if (ide_cd_breathe(drive, rq)) return 1; } else { cdrom_saw_media_change(drive); - if (blk_fs_request(rq) && !blk_rq_quiet(rq)) + if (rq->cmd_type == REQ_TYPE_FS && + !(rq->cmd_flags & REQ_QUIET)) { printk(KERN_ERR PFX "%s: tray open\n", drive->name); } @@ -326,7 +327,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) case UNIT_ATTENTION: cdrom_saw_media_change(drive); - if (blk_fs_request(rq) == 0) + if (rq->cmd_type != REQ_TYPE_FS) return 0; /* @@ -352,7 +353,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) * No point in retrying after an illegal request or data * protect error. */ - if (!blk_rq_quiet(rq)) + if (!(rq->cmd_flags & REQ_QUIET)) ide_dump_status(drive, "command error", stat); do_end_request = 1; break; @@ -361,20 +362,20 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) * No point in re-trying a zillion times on a bad sector. * If we got here the error is not correctable. */ - if (!blk_rq_quiet(rq)) + if (!(rq->cmd_flags & REQ_QUIET)) ide_dump_status(drive, "media error " "(bad sector)", stat); do_end_request = 1; break; case BLANK_CHECK: /* disk appears blank? */ - if (!blk_rq_quiet(rq)) + if (!(rq->cmd_flags & REQ_QUIET)) ide_dump_status(drive, "media error (blank)", stat); do_end_request = 1; break; default: - if (blk_fs_request(rq) == 0) + if (req->cmd_type != REQ_TYPE_FS) break; if (err & ~ATA_ABORTED) { /* go to the default handler for other errors */ @@ -385,7 +386,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) do_end_request = 1; } - if (blk_fs_request(rq) == 0) { + if (rq->cmd_type != REQ_TYPE_FS) { rq->cmd_flags |= REQ_FAILED; do_end_request = 1; } @@ -525,7 +526,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) ide_expiry_t *expiry = NULL; int dma_error = 0, dma, thislen, uptodate = 0; int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; - int sense = blk_sense_request(rq); + int sense = (rq->cmd_type == REQ_TYPE_SENSE); unsigned int timeout; u16 len; u8 ireason, stat; @@ -568,7 +569,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) ide_read_bcount_and_ireason(drive, &len, &ireason); - thislen = blk_fs_request(rq) ? len : cmd->nleft; + thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft; if (thislen > len) thislen = len; @@ -577,7 +578,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) /* If DRQ is clear, the command has completed. */ if ((stat & ATA_DRQ) == 0) { - if (blk_fs_request(rq)) { + if (rq->cmd_type == REQ_TYPE_FS) { /* * If we're not done reading/writing, complain. * Otherwise, complete the command normally. @@ -591,7 +592,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) rq->cmd_flags |= REQ_FAILED; uptodate = 0; } - } else if (!blk_pc_request(rq)) { + } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { ide_cd_request_sense_fixup(drive, cmd); uptodate = cmd->nleft ? 0 : 1; @@ -640,7 +641,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) /* pad, if necessary */ if (len > 0) { - if (blk_fs_request(rq) == 0 || write == 0) + if (rq->cmd_type != REQ_TYPE_FS || write == 0) ide_pad_transfer(drive, write, len); else { printk(KERN_ERR PFX "%s: confused, missing data\n", @@ -649,11 +650,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) } } - if (blk_pc_request(rq)) { + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { timeout = rq->timeout; } else { timeout = ATAPI_WAIT_PC; - if (!blk_fs_request(rq)) + if (rq->cmd_type != REQ_TYPE_FS) expiry = ide_cd_expiry; } @@ -662,7 +663,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) return ide_started; out_end: - if (blk_pc_request(rq) && rc == 0) { + if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) { rq->resid_len = 0; blk_end_request_all(rq, 0); hwif->rq = NULL; @@ -670,7 +671,7 @@ out_end: if (sense && uptodate) ide_cd_complete_failed_rq(drive, rq); - if (blk_fs_request(rq)) { + if (rq->cmd_type == REQ_TYPE_FS) { if (cmd->nleft == 0) uptodate = 1; } else { @@ -682,7 +683,7 @@ out_end: ide_cd_error_cmd(drive, cmd); /* make sure it's fully ended */ - if (blk_fs_request(rq) == 0) { + if (rq->cmd_type != REQ_TYPE_FS) { rq->resid_len -= cmd->nbytes - cmd->nleft; if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) rq->resid_len += cmd->last_xfer_len; @@ -742,7 +743,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x", rq->cmd[0], rq->cmd_type); - if (blk_pc_request(rq)) + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) rq->cmd_flags |= REQ_QUIET; else rq->cmd_flags &= ~REQ_FAILED; @@ -783,21 +784,26 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, if (drive->debug_mask & IDE_DBG_RQ) blk_dump_rq_flags(rq, "ide_cd_do_request"); - if (blk_fs_request(rq)) { + switch (rq->cmd_type) { + case REQ_TYPE_FS: if (cdrom_start_rw(drive, rq) == ide_stopped) goto out_end; - } else if (blk_sense_request(rq) || blk_pc_request(rq) || - rq->cmd_type == REQ_TYPE_ATA_PC) { + break; + case REQ_TYPE_SENSE: + case REQ_TYPE_BLOCK_PC: + case REQ_TYPE_ATA_PC: if (!rq->timeout) rq->timeout = ATAPI_WAIT_PC; cdrom_do_block_pc(drive, rq); - } else if (blk_special_request(rq)) { + break; + case REQ_TYPE_SPECIAL: /* right now this can only be a reset... */ uptodate = 1; goto out_end; - } else + default: BUG(); + } /* prepare sense request for this command */ ide_prep_sense(drive, rq); @@ -809,7 +815,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, cmd.rq = rq; - if (blk_fs_request(rq) || blk_rq_bytes(rq)) { + if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_map_sg(drive, &cmd); } @@ -1365,9 +1371,9 @@ static int ide_cdrom_prep_pc(struct request *rq) static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) { - if (blk_fs_request(rq)) + if (rq->cmd_type == REQ_TYPE_FS) return ide_cdrom_prep_fs(q, rq); - else if (blk_pc_request(rq)) + else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) return ide_cdrom_prep_pc(rq); return 0; diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 33d65039cce9..df3d91ba1c96 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, ide_hwif_t *hwif = drive->hwif; BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED); - BUG_ON(!blk_fs_request(rq)); + BUG_ON(rq->cmd_type != REQ_TYPE_FS); ledtrig_ide_activity(); diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c index e9abf2c3c335..c0aa93fb7a60 100644 --- a/drivers/ide/ide-eh.c +++ b/drivers/ide/ide-eh.c @@ -122,7 +122,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat) return ide_stopped; /* retry only "normal" I/O: */ - if (!blk_fs_request(rq)) { + if (rq->cmd_type != REQ_TYPE_FS) { if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { struct ide_cmd *cmd = rq->special; @@ -146,7 +146,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) { struct request *rq = drive->hwif->rq; - if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) { + if (rq && rq->cmd_type == REQ_TYPE_SPECIAL && + rq->cmd[0] == REQ_DRIVE_RESET) { if (err <= 0 && rq->errors == 0) rq->errors = -EIO; ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq)); diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 4713bdca20b6..c7d0737bb18a 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -73,7 +73,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc) drive->failed_pc = NULL; if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || - (rq && blk_pc_request(rq))) + (rq && rq->cmd_type == REQ_TYPE_BLOCK_PC)) uptodate = 1; /* FIXME */ else if (pc->c[0] == GPCMD_REQUEST_SENSE) { @@ -98,7 +98,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc) "Aborting request!\n"); } - if (blk_special_request(rq)) + if (rq->cmd_type == REQ_TYPE_SPECIAL) rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; return uptodate; @@ -247,14 +247,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, } else printk(KERN_ERR PFX "%s: I/O error\n", drive->name); - if (blk_special_request(rq)) { + if (rq->cmd_type == REQ_TYPE_SPECIAL) { rq->errors = 0; ide_complete_rq(drive, 0, blk_rq_bytes(rq)); return ide_stopped; } else goto out_end; } - if (blk_fs_request(rq)) { + + switch (rq->cmd_type) { + case REQ_TYPE_FS: if (((long)blk_rq_pos(rq) % floppy->bs_factor) || (blk_rq_sectors(rq) % floppy->bs_factor)) { printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", @@ -263,13 +265,18 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, } pc = &floppy->queued_pc; idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); - } else if (blk_special_request(rq) || blk_sense_request(rq)) { + break; + case REQ_TYPE_SPECIAL: + case REQ_TYPE_SENSE: pc = (struct ide_atapi_pc *)rq->special; - } else if (blk_pc_request(rq)) { + break; + case REQ_TYPE_BLOCK_PC: pc = &floppy->queued_pc; idefloppy_blockpc_cmd(floppy, pc, rq); - } else + break; + default: BUG(); + } ide_prep_sense(drive, rq); @@ -280,7 +287,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, cmd.rq = rq; - if (blk_fs_request(rq) || blk_rq_bytes(rq)) { + if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_map_sg(drive, &cmd); } @@ -290,7 +297,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, return ide_floppy_issue_pc(drive, &cmd, pc); out_end: drive->failed_pc = NULL; - if (blk_fs_request(rq) == 0 && rq->errors == 0) + if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) rq->errors = -EIO; ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); return ide_stopped; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 172ac9218154..9304a7e54d9e 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq); void ide_kill_rq(ide_drive_t *drive, struct request *rq) { - u8 drv_req = blk_special_request(rq) && rq->rq_disk; + u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk; u8 media = drive->media; drive->failed_pc = NULL; @@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq) } else { if (media == ide_tape) rq->errors = IDE_DRV_ERROR_GENERAL; - else if (blk_fs_request(rq) == 0 && rq->errors == 0) + else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) rq->errors = -EIO; } @@ -307,7 +307,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) { ide_startstop_t startstop; - BUG_ON(!blk_rq_started(rq)); + BUG_ON(!(rq->cmd_flags & REQ_STARTED)); #ifdef DEBUG printk("%s: start_request: current=0x%08lx\n", @@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) pm->pm_step == IDE_PM_COMPLETED) ide_complete_pm_rq(drive, rq); return startstop; - } else if (!rq->rq_disk && blk_special_request(rq)) + } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL) { /* * TODO: Once all ULDs have been modified to * check for specific op codes rather than diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index 1c08311b0a0e..92406097efeb 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c @@ -191,10 +191,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) #ifdef DEBUG_PM printk("%s: completing PM request, %s\n", drive->name, - blk_pm_suspend_request(rq) ? "suspend" : "resume"); + (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume"); #endif spin_lock_irqsave(q->queue_lock, flags); - if (blk_pm_suspend_request(rq)) + if (rq->cmd_type == REQ_TYPE_PM_SUSPEND) blk_stop_queue(q); else drive->dev_flags &= ~IDE_DFLAG_BLOCKED; @@ -210,11 +210,11 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq) { struct request_pm_state *pm = rq->special; - if (blk_pm_suspend_request(rq) && + if (rq->cmd_type == REQ_TYPE_PM_SUSPEND && pm->pm_step == IDE_PM_START_SUSPEND) /* Mark drive blocked when starting the suspend sequence. */ drive->dev_flags |= IDE_DFLAG_BLOCKED; - else if (blk_pm_resume_request(rq) && + else if (rq->cmd_type == REQ_TYPE_PM_RESUME && pm->pm_step == IDE_PM_START_RESUME) { /* * The first thing we do on wakeup is to wait for BSY bit to diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index b07232880ec9..635fd72d4728 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -577,7 +577,8 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, rq->cmd[0], (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq)); - BUG_ON(!(blk_special_request(rq) || blk_sense_request(rq))); + BUG_ON(!(rq->cmd_type == REQ_TYPE_SPECIAL || + rq->cmd_type == REQ_TYPE_SENSE)); /* Retry a failed packet command */ if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d21e1284604f..1e0e6dd51501 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -792,12 +792,12 @@ static void dm_end_request(struct request *clone, int error) { int rw = rq_data_dir(clone); int run_queue = 1; - bool is_barrier = blk_barrier_rq(clone); + bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER; struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; struct request *rq = tio->orig; - if (blk_pc_request(rq) && !is_barrier) { + if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) { rq->errors = clone->errors; rq->resid_len = clone->resid_len; @@ -844,7 +844,7 @@ void dm_requeue_unmapped_request(struct request *clone) struct request_queue *q = rq->q; unsigned long flags; - if (unlikely(blk_barrier_rq(clone))) { + if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { /* * Barrier clones share an original request. * Leave it to dm_end_request(), which handles this special @@ -943,7 +943,7 @@ static void dm_complete_request(struct request *clone, int error) struct dm_rq_target_io *tio = clone->end_io_data; struct request *rq = tio->orig; - if (unlikely(blk_barrier_rq(clone))) { + if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { /* * Barrier clones share an original request. So can't use * softirq_done with the original. @@ -972,7 +972,7 @@ void dm_kill_unmapped_request(struct request *clone, int error) struct dm_rq_target_io *tio = clone->end_io_data; struct request *rq = tio->orig; - if (unlikely(blk_barrier_rq(clone))) { + if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { /* * Barrier clones share an original request. * Leave it to dm_end_request(), which handles this special diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 8327e248520a..56645408d225 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -805,7 +805,8 @@ static void mspro_block_start(struct memstick_dev *card) static int mspro_block_prepare_req(struct request_queue *q, struct request *req) { - if (!blk_fs_request(req) && !blk_pc_request(req)) { + if (req->cmd_type != REQ_TYPE_FS && + req->cmd_type != REQ_TYPE_BLOCK_PC) { blk_dump_rq_flags(req, "MSPro unsupported request"); return BLKPREP_KILL; } diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index fc593fbab696..108f0c2b2bfd 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -883,7 +883,7 @@ static void i2o_block_request_fn(struct request_queue *q) if (!req) break; - if (blk_fs_request(req)) { + if (req->cmd_type == REQ_TYPE_FS) { struct i2o_block_delayed_request *dreq; struct i2o_block_request *ireq = req->special; unsigned int queue_depth; diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index d6ded247d941..ec92bcbdeddb 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -32,7 +32,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) /* * We only like normal block requests. */ - if (!blk_fs_request(req)) { + if (req->cmd_type != REQ_TYPE_FS) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 03e19c1965cc..475af42745cb 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -73,14 +73,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, buf = req->buffer; - if (!blk_fs_request(req)) + if (req->cmd_type != REQ_TYPE_FS) return -EIO; if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(req->rq_disk)) return -EIO; - if (blk_discard_rq(req)) + if (req->cmd_flags & REQ_DISCARD) return tr->discard(dev, block, nsect); switch(rq_data_dir(req)) { diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index a5d630f5f519..1b88af89d0c7 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -307,7 +307,7 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) return FAILED; - if (blk_barrier_rq(scmd->request)) + if (scmd->request->cmd_flags & REQ_HARDBARRIER) /* * barrier requests should always retry on UA * otherwise block will get a spurious error @@ -1318,16 +1318,16 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd) case DID_OK: break; case DID_BUS_BUSY: - return blk_failfast_transport(scmd->request); + return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT); case DID_PARITY: - return blk_failfast_dev(scmd->request); + return (scmd->request->cmd_flags & REQ_FAILFAST_DEV); case DID_ERROR: if (msg_byte(scmd->result) == COMMAND_COMPLETE && status_byte(scmd->result) == RESERVATION_CONFLICT) return 0; /* fall through */ case DID_SOFT_ERROR: - return blk_failfast_driver(scmd->request); + return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER); } switch (status_byte(scmd->result)) { @@ -1336,7 +1336,7 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd) * assume caller has checked sense and determinted * the check condition was retryable. */ - return blk_failfast_dev(scmd->request); + return (scmd->request->cmd_flags & REQ_FAILFAST_DEV); } return 0; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1646fe7cbd4b..5f1160841b0e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -722,7 +722,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) sense_deferred = scsi_sense_is_deferred(&sshdr); } - if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ + if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ req->errors = result; if (result) { if (sense_valid && req->sense) { @@ -757,7 +757,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } } - BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ + /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ + BUG_ON(blk_bidi_rq(req)); /* * Next deal with any sectors which we were able to correctly diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 8802e48bc063..a3fdf4dc59da 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -485,7 +485,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) * Discard request come in as REQ_TYPE_FS but we turn them into * block PC requests to make life easier. */ - if (blk_discard_rq(rq)) + if (rq->cmd_flags & REQ_DISCARD) ret = sd_prepare_discard(rq); if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { @@ -636,7 +636,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD; SCpnt->cmnd[7] = 0x18; SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32; - SCpnt->cmnd[10] = protect | (blk_fua_rq(rq) ? 0x8 : 0); + SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); /* LBA */ SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; @@ -661,7 +661,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) SCpnt->cmnd[31] = (unsigned char) this_count & 0xff; } else if (block > 0xffffffff) { SCpnt->cmnd[0] += READ_16 - READ_6; - SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0); + SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; @@ -682,7 +682,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) this_count = 0xffff; SCpnt->cmnd[0] += READ_10 - READ_6; - SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0); + SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; @@ -691,7 +691,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; } else { - if (unlikely(blk_fua_rq(rq))) { + if (unlikely(rq->cmd_flags & REQ_FUA)) { /* * This happens only if this drive failed * 10byte rw command with ILLEGAL_REQUEST @@ -1112,7 +1112,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) u64 bad_lba; int info_valid; - if (!blk_fs_request(scmd->request)) + if (scmd->request->cmd_type != REQ_TYPE_FS) return 0; info_valid = scsi_get_sense_info_fld(scmd->sense_buffer, diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index b5838d547c68..713620ed70d9 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c @@ -2022,7 +2022,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != cmd)) { - if(blk_fs_request(cmd->request)) { + if (cmd->request->cmd_type == REQ_TYPE_FS) { sun3scsi_dma_setup(d, count, rq_data_dir(cmd->request)); sun3_dma_setup_done = cmd; diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index e606cf0a2eb7..613f5880d135 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -524,7 +524,7 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, struct scsi_cmnd *cmd, int write_flag) { - if(blk_fs_request(cmd->request)) + if (cmd->request->cmd_type == REQ_TYPE_FS) return wanted; else return 0; diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index aaa4fd0dd1b9..7c526b8e30ac 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c @@ -458,7 +458,7 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, struct scsi_cmnd *cmd, int write_flag) { - if(blk_fs_request(cmd->request)) + if (cmd->request->cmd_type == REQ_TYPE_FS) return wanted; else return 0; diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c index 61bd0be5fb18..a9aff90e58e0 100644 --- a/drivers/staging/hv/blkvsc_drv.c +++ b/drivers/staging/hv/blkvsc_drv.c @@ -823,7 +823,8 @@ static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req) blkvsc_req->cmnd[0] = READ_16; } - blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0; + blkvsc_req->cmnd[1] |= + (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0; *(unsigned long long *)&blkvsc_req->cmnd[2] = cpu_to_be64(blkvsc_req->sector_start); @@ -839,7 +840,8 @@ static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req) blkvsc_req->cmnd[0] = READ_10; } - blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0; + blkvsc_req->cmnd[1] |= + (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0; *(unsigned int *)&blkvsc_req->cmnd[2] = cpu_to_be32(blkvsc_req->sector_start); @@ -1286,7 +1288,7 @@ static void blkvsc_request(struct request_queue *queue) DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req); blkdev = req->rq_disk->private_data; - if (blkdev->shutting_down || !blk_fs_request(req) || + if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS || blkdev->media_not_present) { __blk_end_request_cur(req, 0); continue; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d7ae241a9e55..3ecd28ef9ba4 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -604,33 +604,20 @@ enum { test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) -#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) -#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) -#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) -#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) - -#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) -#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) -#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) -#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ - blk_failfast_transport(rq) || \ - blk_failfast_driver(rq)) -#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) -#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) -#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) - -#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) - -#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) -#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) +#define blk_noretry_request(rq) \ + ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ + REQ_FAILFAST_DRIVER)) + +#define blk_account_rq(rq) \ + (((rq)->cmd_flags & REQ_STARTED) && \ + ((rq)->cmd_type == REQ_TYPE_FS || \ + ((rq)->cmd_flags & REQ_DISCARD))) + #define blk_pm_request(rq) \ - (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) + ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ + (rq)->cmd_type == REQ_TYPE_PM_RESUME) #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) -#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) -#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) -#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) -#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) /* rq->queuelist of dequeued request must be list_empty() */ #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) @@ -652,9 +639,6 @@ static inline bool rq_is_sync(struct request *rq) return rw_is_sync(rq->cmd_flags); } -#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) -#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) - static inline int blk_queue_full(struct request_queue *q, int sync) { if (sync) @@ -687,7 +671,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) #define rq_mergeable(rq) \ (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ - (blk_discard_rq(rq) || blk_fs_request((rq)))) + (((rq)->cmd_flags & REQ_DISCARD) || \ + (rq)->cmd_type == REQ_TYPE_FS)) /* * q->prep_rq_fn return values diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 416bf62d6d46..23faa67e8022 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -224,7 +224,7 @@ static inline int blk_trace_init_sysfs(struct device *dev) static inline int blk_cmd_buf_len(struct request *rq) { - return blk_pc_request(rq) ? rq->cmd_len * 3 : 1; + return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1; } extern void blk_dump_cmd(char *buf, struct request *rq); diff --git a/include/trace/events/block.h b/include/trace/events/block.h index d870a918559c..d8ce278515c3 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -25,8 +25,10 @@ DECLARE_EVENT_CLASS(block_rq_with_error, TP_fast_assign( __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); + __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_pos(rq); + __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_sectors(rq); __entry->errors = rq->errors; blk_fill_rwbs_rq(__entry->rwbs, rq); @@ -109,9 +111,12 @@ DECLARE_EVENT_CLASS(block_rq, TP_fast_assign( __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); - __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; + __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_pos(rq); + __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + 0 : blk_rq_sectors(rq); + __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + blk_rq_bytes(rq) : 0; blk_fill_rwbs_rq(__entry->rwbs, rq); blk_dump_cmd(__get_str(cmd), rq); diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 638711c17504..4f149944cb89 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -661,10 +661,10 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, if (likely(!bt)) return; - if (blk_discard_rq(rq)) + if (rq->cmd_flags & REQ_DISCARD) rw |= (1 << BIO_RW_DISCARD); - if (blk_pc_request(rq)) { + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { what |= BLK_TC_ACT(BLK_TC_PC); __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, what, rq->errors, rq->cmd_len, rq->cmd); @@ -925,7 +925,7 @@ void blk_add_driver_data(struct request_queue *q, if (likely(!bt)) return; - if (blk_pc_request(rq)) + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, BLK_TA_DRV_DATA, rq->errors, len, data); else @@ -1730,7 +1730,7 @@ void blk_dump_cmd(char *buf, struct request *rq) int len = rq->cmd_len; unsigned char *cmd = rq->cmd; - if (!blk_pc_request(rq)) { + if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { buf[0] = '\0'; return; } @@ -1779,7 +1779,7 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq) int rw = rq->cmd_flags & 0x03; int bytes; - if (blk_discard_rq(rq)) + if (rq->cmd_flags & REQ_DISCARD) rw |= (1 << BIO_RW_DISCARD); bytes = blk_rq_bytes(rq); -- cgit v1.2.3 From 7b6d91daee5cac6402186ff224c3af39d79f4a0e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 7 Aug 2010 18:20:39 +0200 Subject: block: unify flags for struct bio and struct request Remove the current bio flags and reuse the request flags for the bio, too. This allows to more easily trace the type of I/O from the filesystem down to the block driver. There were two flags in the bio that were missing in the requests: BIO_RW_UNPLUG and BIO_RW_AHEAD. Also I've renamed two request flags that had a superflous RW in them. Note that the flags are in bio.h despite having the REQ_ name - as blkdev.h includes bio.h that is the only way to go for now. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-barrier.c | 2 +- block/blk-core.c | 37 +++-------- block/blk-map.c | 2 +- block/blk-merge.c | 2 +- block/cfq-iosched.c | 14 ++--- block/elevator.c | 3 +- drivers/ata/libata-scsi.c | 2 +- drivers/block/aoe/aoeblk.c | 2 +- drivers/block/brd.c | 2 +- drivers/block/drbd/drbd_actlog.c | 8 +-- drivers/block/drbd/drbd_main.c | 6 +- drivers/block/drbd/drbd_receiver.c | 22 +++---- drivers/block/drbd/drbd_req.c | 2 +- drivers/block/loop.c | 2 +- drivers/block/pktcdvd.c | 2 +- drivers/block/umem.c | 2 +- drivers/ide/ide-cd_ioctl.c | 2 +- drivers/ide/ide-floppy.c | 2 +- drivers/md/dm-io.c | 12 ++-- drivers/md/dm-kcopyd.c | 2 +- drivers/md/dm-raid1.c | 2 +- drivers/md/dm-stripe.c | 2 +- drivers/md/dm.c | 14 ++--- drivers/md/linear.c | 2 +- drivers/md/md.c | 10 +-- drivers/md/md.h | 4 +- drivers/md/multipath.c | 8 +-- drivers/md/raid0.c | 2 +- drivers/md/raid1.c | 22 +++---- drivers/md/raid10.c | 12 ++-- drivers/md/raid5.c | 2 +- drivers/scsi/osd/osd_initiator.c | 8 +-- fs/bio.c | 5 +- fs/btrfs/disk-io.c | 8 +-- fs/btrfs/inode.c | 6 +- fs/btrfs/volumes.c | 18 +++--- fs/exofs/ios.c | 2 +- fs/gfs2/log.c | 4 +- fs/gfs2/meta_io.c | 8 +-- fs/gfs2/ops_fstype.c | 2 +- fs/nilfs2/segbuf.c | 2 +- include/linux/bio.h | 125 +++++++++++++++++++++++-------------- include/linux/blkdev.h | 66 +------------------- include/linux/fs.h | 38 +++++------ kernel/power/block_io.c | 2 +- kernel/trace/blktrace.c | 27 ++++---- mm/page_io.c | 2 +- 47 files changed, 242 insertions(+), 289 deletions(-) (limited to 'block/blk-core.c') diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 74e404393172..7c6f4a714687 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -203,7 +203,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) /* initialize proxy request and queue it */ blk_rq_init(q, rq); if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) - rq->cmd_flags |= REQ_RW; + rq->cmd_flags |= REQ_WRITE; if (q->ordered & QUEUE_ORDERED_DO_FUA) rq->cmd_flags |= REQ_FUA; init_request_from_bio(rq, q->orig_bar_rq->bio); diff --git a/block/blk-core.c b/block/blk-core.c index dca43a31e725..66c3cfe94d0a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1140,25 +1140,9 @@ void init_request_from_bio(struct request *req, struct bio *bio) req->cpu = bio->bi_comp_cpu; req->cmd_type = REQ_TYPE_FS; - /* - * Inherit FAILFAST from bio (for read-ahead, and explicit - * FAILFAST). FAILFAST flags are identical for req and bio. - */ - if (bio_rw_flagged(bio, BIO_RW_AHEAD)) + req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; + if (bio->bi_rw & REQ_RAHEAD) req->cmd_flags |= REQ_FAILFAST_MASK; - else - req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; - - if (bio_rw_flagged(bio, BIO_RW_DISCARD)) - req->cmd_flags |= REQ_DISCARD; - if (bio_rw_flagged(bio, BIO_RW_BARRIER)) - req->cmd_flags |= REQ_HARDBARRIER; - if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) - req->cmd_flags |= REQ_RW_SYNC; - if (bio_rw_flagged(bio, BIO_RW_META)) - req->cmd_flags |= REQ_RW_META; - if (bio_rw_flagged(bio, BIO_RW_NOIDLE)) - req->cmd_flags |= REQ_NOIDLE; req->errors = 0; req->__sector = bio->bi_sector; @@ -1181,12 +1165,12 @@ static int __make_request(struct request_queue *q, struct bio *bio) int el_ret; unsigned int bytes = bio->bi_size; const unsigned short prio = bio_prio(bio); - const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); - const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); + const bool sync = (bio->bi_rw & REQ_SYNC); + const bool unplug = (bio->bi_rw & REQ_UNPLUG); const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; int rw_flags; - if (bio_rw_flagged(bio, BIO_RW_BARRIER) && + if ((bio->bi_rw & REQ_HARDBARRIER) && (q->next_ordered == QUEUE_ORDERED_NONE)) { bio_endio(bio, -EOPNOTSUPP); return 0; @@ -1200,7 +1184,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) spin_lock_irq(q->queue_lock); - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q)) + if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q)) goto get_rq; el_ret = elv_merge(q, &req, bio); @@ -1275,7 +1259,7 @@ get_rq: */ rw_flags = bio_data_dir(bio); if (sync) - rw_flags |= REQ_RW_SYNC; + rw_flags |= REQ_SYNC; /* * Grab a free request. This is might sleep but can not fail. @@ -1464,7 +1448,7 @@ static inline void __generic_make_request(struct bio *bio) goto end_io; } - if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && + if (unlikely(!(bio->bi_rw & REQ_DISCARD) && nr_sectors > queue_max_hw_sectors(q))) { printk(KERN_ERR "bio too big device %s (%u > %u)\n", bdevname(bio->bi_bdev, b), @@ -1497,8 +1481,7 @@ static inline void __generic_make_request(struct bio *bio) if (bio_check_eod(bio, nr_sectors)) goto end_io; - if (bio_rw_flagged(bio, BIO_RW_DISCARD) && - !blk_queue_discard(q)) { + if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) { err = -EOPNOTSUPP; goto end_io; } @@ -2365,7 +2348,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio) { /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ - rq->cmd_flags |= bio->bi_rw & REQ_RW; + rq->cmd_flags |= bio->bi_rw & REQ_WRITE; if (bio_has_data(bio)) { rq->nr_phys_segments = bio_phys_segments(q, bio); diff --git a/block/blk-map.c b/block/blk-map.c index 9083cf0180cc..c65d7593f7f1 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, return PTR_ERR(bio); if (rq_data_dir(rq) == WRITE) - bio->bi_rw |= (1 << BIO_RW); + bio->bi_rw |= (1 << REQ_WRITE); if (do_copy) rq->cmd_flags |= REQ_COPY_USER; diff --git a/block/blk-merge.c b/block/blk-merge.c index 87e4fb7d0e98..4852475521ea 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -180,7 +180,7 @@ new_segment: } if (q->dma_drain_size && q->dma_drain_needed(rq)) { - if (rq->cmd_flags & REQ_RW) + if (rq->cmd_flags & REQ_WRITE) memset(q->dma_drain_buffer, 0, q->dma_drain_size); sg->page_link &= ~0x02; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d4edeb8fceb8..eb4086f7dfef 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -458,7 +458,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) */ static inline bool cfq_bio_sync(struct bio *bio) { - return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); + return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); } /* @@ -646,10 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, return rq1; else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) return rq2; - if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META)) + if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) return rq1; - else if ((rq2->cmd_flags & REQ_RW_META) && - !(rq1->cmd_flags & REQ_RW_META)) + else if ((rq2->cmd_flags & REQ_META) && + !(rq1->cmd_flags & REQ_META)) return rq2; s1 = blk_rq_pos(rq1); @@ -1485,7 +1485,7 @@ static void cfq_remove_request(struct request *rq) cfqq->cfqd->rq_queued--; cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), rq_is_sync(rq)); - if (rq->cmd_flags & REQ_RW_META) { + if (rq->cmd_flags & REQ_META) { WARN_ON(!cfqq->meta_pending); cfqq->meta_pending--; } @@ -3177,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, * So both queues are sync. Let the new request get disk time if * it's a metadata request and the current queue is doing regular IO. */ - if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending) + if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending) return true; /* @@ -3231,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_io_context *cic = RQ_CIC(rq); cfqd->rq_queued++; - if (rq->cmd_flags & REQ_RW_META) + if (rq->cmd_flags & REQ_META) cfqq->meta_pending++; cfq_update_io_thinktime(cfqd, cic); diff --git a/block/elevator.c b/block/elevator.c index aa99b59c03d6..816a7c8d6394 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -79,8 +79,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio) /* * Don't merge file system requests and discard requests */ - if (bio_rw_flagged(bio, BIO_RW_DISCARD) != - bio_rw_flagged(rq->bio, BIO_RW_DISCARD)) + if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD)) return 0; /* diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a5c08b082edb..0a8cd3484791 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1114,7 +1114,7 @@ static int atapi_drain_needed(struct request *rq) if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) return 0; - if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW)) + if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE)) return 0; return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 035cefe4045a..65deffde60ac 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -173,7 +173,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) BUG(); bio_endio(bio, -ENXIO); return 0; - } else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { + } else if (bio->bi_rw & REQ_HARDBARRIER) { bio_endio(bio, -EOPNOTSUPP); return 0; } else if (bio->bi_io_vec == NULL) { diff --git a/drivers/block/brd.c b/drivers/block/brd.c index f1bf79d9bc0a..1b218c6b6820 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -340,7 +340,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio) get_capacity(bdev->bd_disk)) goto out; - if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { + if (unlikely(bio->bi_rw & REQ_DISCARD)) { err = 0; discard_from_brd(brd, sector, bio->bi_size); goto out; diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index df018990c422..9400845d602e 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -79,8 +79,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, md_io.error = 0; if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) - rw |= (1 << BIO_RW_BARRIER); - rw |= ((1<bi_rw & REQ_HARDBARRIER) && !ok)) { /* Try again with no barrier */ dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); set_bit(MD_NO_BARRIER, &mdev->flags); - rw &= ~(1 << BIO_RW_BARRIER); + rw &= ~REQ_HARDBARRIER; bio_put(bio); goto retry; } diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 7258c95e895e..e2ab13d99d69 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2425,15 +2425,15 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) /* NOTE: no need to check if barriers supported here as we would * not pass the test in make_request_common in that case */ - if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) { + if (req->master_bio->bi_rw & REQ_HARDBARRIER) { dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n"); /* dp_flags |= DP_HARDBARRIER; */ } - if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO)) + if (req->master_bio->bi_rw & REQ_SYNC) dp_flags |= DP_RW_SYNC; /* for now handle SYNCIO and UNPLUG * as if they still were one and the same flag */ - if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG)) + if (req->master_bio->bi_rw & REQ_UNPLUG) dp_flags |= DP_RW_SYNC; if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn <= C_PAUSED_SYNC_T) diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index dff48701b84d..cba1deb7b271 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1180,7 +1180,7 @@ next_bio: bio->bi_sector = sector; bio->bi_bdev = mdev->ldev->backing_bdev; /* we special case some flags in the multi-bio case, see below - * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */ + * (REQ_UNPLUG, REQ_HARDBARRIER) */ bio->bi_rw = rw; bio->bi_private = e; bio->bi_end_io = drbd_endio_sec; @@ -1209,16 +1209,16 @@ next_bio: bios = bios->bi_next; bio->bi_next = NULL; - /* strip off BIO_RW_UNPLUG unless it is the last bio */ + /* strip off REQ_UNPLUG unless it is the last bio */ if (bios) - bio->bi_rw &= ~(1<bi_rw &= ~REQ_UNPLUG; drbd_generic_make_request(mdev, fault_type, bio); - /* strip off BIO_RW_BARRIER, + /* strip off REQ_HARDBARRIER, * unless it is the first or last bio */ if (bios && bios->bi_next) - bios->bi_rw &= ~(1<bi_rw &= ~REQ_HARDBARRIER; } while (bios); maybe_kick_lo(mdev); return 0; @@ -1233,7 +1233,7 @@ fail: } /** - * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set + * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set * @mdev: DRBD device. * @w: work object. * @cancel: The connection will be closed anyways (unused in this callback) @@ -1245,7 +1245,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) so that we can finish that epoch in drbd_may_finish_epoch(). That is necessary if we already have a long chain of Epochs, before - we realize that BIO_RW_BARRIER is actually not supported */ + we realize that REQ_HARDBARRIER is actually not supported */ /* As long as the -ENOTSUPP on the barrier is reported immediately that will never trigger. If it is reported late, we will just @@ -1824,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); if (epoch == e->epoch) { set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); - rw |= (1<flags |= EE_IS_BARRIER; } else { if (atomic_read(&epoch->epoch_size) > 1 || !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); - rw |= (1<flags |= EE_IS_BARRIER; } } @@ -1841,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) dp_flags = be32_to_cpu(p->dp_flags); if (dp_flags & DP_HARDBARRIER) { dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); - /* rw |= (1<flags |= EE_MAY_SET_IN_SYNC; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 654f1ef5cbb0..f761d98a4e90 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -997,7 +997,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) * because of those XXX, this is not yet enabled, * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit. */ - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) { + if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) { /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */ bio_endio(bio, -EOPNOTSUPP); return 0; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 6120922f459f..fedfdb7d3cdf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -476,7 +476,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; if (bio_rw(bio) == WRITE) { - bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER); + bool barrier = (bio->bi_rw & REQ_HARDBARRIER); struct file *file = lo->lo_backing_file; if (barrier) { diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 8a549db2aa78..9f3e4454274b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1221,7 +1221,7 @@ static int pkt_start_recovery(struct packet_data *pkt) pkt->bio->bi_flags = 1 << BIO_UPTODATE; pkt->bio->bi_idx = 0; - BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW)); + BUG_ON(pkt->bio->bi_rw != REQ_WRITE); BUG_ON(pkt->bio->bi_vcnt != pkt->frames); BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE); BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write); diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 2f9470ff8f7c..8be57151f5d6 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -478,7 +478,7 @@ static void process_page(unsigned long data) le32_to_cpu(desc->local_addr)>>9, le32_to_cpu(desc->transfer_size)); dump_dmastat(card, control); - } else if (test_bit(BIO_RW, &bio->bi_rw) && + } else if ((bio->bi_rw & REQ_WRITE) && le32_to_cpu(desc->local_addr) >> 9 == card->init_size) { card->init_size += le32_to_cpu(desc->transfer_size) >> 9; diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index 02712bf045c1..766b3deeb23c 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c @@ -454,7 +454,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi, touch it at all. */ if (cgc->data_direction == CGC_DATA_WRITE) - flags |= REQ_RW; + flags |= REQ_WRITE; if (cgc->sense) memset(cgc->sense, 0, sizeof(struct request_sense)); diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index c7d0737bb18a..5406b6ea3ad1 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c @@ -207,7 +207,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive, memcpy(rq->cmd, pc->c, 12); pc->rq = rq; - if (rq->cmd_flags & REQ_RW) + if (rq->cmd_flags & REQ_WRITE) pc->flags |= PC_FLAG_WRITING; pc->flags |= PC_FLAG_DMA_OK; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 10f457ca6af2..0590c75b0ab6 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions, BUG_ON(num_regions > DM_IO_MAX_REGIONS); if (sync) - rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); + rw |= REQ_SYNC | REQ_UNPLUG; /* * For multiple regions we need to be careful to rewind @@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions, */ for (i = 0; i < num_regions; i++) { *dp = old_pages; - if (where[i].count || (rw & (1 << BIO_RW_BARRIER))) + if (where[i].count || (rw & REQ_HARDBARRIER)) do_region(rw, i, where + i, dp, io); } @@ -412,8 +412,8 @@ retry: } set_current_state(TASK_RUNNING); - if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { - rw &= ~(1 << BIO_RW_BARRIER); + if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) { + rw &= ~REQ_HARDBARRIER; goto retry; } @@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp) * New collapsed (a)synchronous interface. * * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug - * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in - * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to + * the queue with blk_unplug() some time later or set REQ_SYNC in +io_req->bi_rw. If you fail to do one of these, the IO will be submitted to * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. */ int dm_io(struct dm_io_request *io_req, unsigned num_regions, diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index addf83475040..d8587bac5682 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job) { int r; struct dm_io_request io_req = { - .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG), + .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG, .mem.type = DM_IO_PAGE_LIST, .mem.ptr.pl = job->pages, .mem.offset = job->offset, diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index ddda531723dc..74136262d654 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, if (error == -EOPNOTSUPP) goto out; - if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) + if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) goto out; if (unlikely(error)) { diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index e610725db766..d6e28d732b4d 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -284,7 +284,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, if (!error) return 0; /* I/O complete */ - if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) + if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) return error; if (error == -EOPNOTSUPP) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1e0e6dd51501..d6f77baeafd6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -614,7 +614,7 @@ static void dec_pending(struct dm_io *io, int error) */ spin_lock_irqsave(&md->deferred_lock, flags); if (__noflush_suspending(md)) { - if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER)) + if (!(io->bio->bi_rw & REQ_HARDBARRIER)) bio_list_add_head(&md->deferred, io->bio); } else @@ -626,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error) io_error = io->error; bio = io->bio; - if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { + if (bio->bi_rw & REQ_HARDBARRIER) { /* * There can be just one barrier request so we use * a per-device variable for error reporting. @@ -1106,7 +1106,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, clone->bi_sector = sector; clone->bi_bdev = bio->bi_bdev; - clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); + clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER; clone->bi_vcnt = 1; clone->bi_size = to_bytes(len); clone->bi_io_vec->bv_offset = offset; @@ -1133,7 +1133,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); __bio_clone(clone, bio); - clone->bi_rw &= ~(1 << BIO_RW_BARRIER); + clone->bi_rw &= ~REQ_HARDBARRIER; clone->bi_destructor = dm_bio_destructor; clone->bi_sector = sector; clone->bi_idx = idx; @@ -1301,7 +1301,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) ci.map = dm_get_live_table(md); if (unlikely(!ci.map)) { - if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) + if (!(bio->bi_rw & REQ_HARDBARRIER)) bio_io_error(bio); else if (!md->barrier_error) @@ -1414,7 +1414,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio) * we have to queue this io for later. */ if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || - unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { + unlikely(bio->bi_rw & REQ_HARDBARRIER)) { up_read(&md->io_lock); if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && @@ -2296,7 +2296,7 @@ static void dm_wq_work(struct work_struct *work) if (dm_request_based(md)) generic_make_request(c); else { - if (bio_rw_flagged(c, BIO_RW_BARRIER)) + if (c->bi_rw & REQ_HARDBARRIER) process_barrier(md, c); else __split_and_process_bio(md, c); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 7e0e057db9a7..ba19060bcf3f 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio) dev_info_t *tmp_dev; sector_t start_sector; - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { + if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { md_barrier_request(mddev, bio); return 0; } diff --git a/drivers/md/md.c b/drivers/md/md.c index cb20d0b0555a..1893af678779 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -353,7 +353,7 @@ static void md_submit_barrier(struct work_struct *ws) /* an empty barrier - all done */ bio_endio(bio, 0); else { - bio->bi_rw &= ~(1<bi_rw &= ~REQ_HARDBARRIER; if (mddev->pers->make_request(mddev, bio)) generic_make_request(bio); mddev->barrier = POST_REQUEST_BARRIER; @@ -675,11 +675,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, * if zero is reached. * If an error occurred, call md_error * - * As we might need to resubmit the request if BIO_RW_BARRIER + * As we might need to resubmit the request if REQ_HARDBARRIER * causes ENOTSUPP, we allocate a spare bio... */ struct bio *bio = bio_alloc(GFP_NOIO, 1); - int rw = (1<bi_bdev = rdev->bdev; bio->bi_sector = sector; @@ -691,7 +691,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, atomic_inc(&mddev->pending_writes); if (!test_bit(BarriersNotsupp, &rdev->flags)) { struct bio *rbio; - rw |= (1<bi_private = bio; rbio->bi_end_io = super_written_barrier; @@ -736,7 +736,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size, struct completion event; int ret; - rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); + rw |= REQ_SYNC | REQ_UNPLUG; bio->bi_bdev = bdev; bio->bi_sector = sector; diff --git a/drivers/md/md.h b/drivers/md/md.h index 10597bfec000..fc56e0f21c80 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -67,7 +67,7 @@ struct mdk_rdev_s #define Faulty 1 /* device is known to have a fault */ #define In_sync 2 /* device is in_sync with rest of array */ #define WriteMostly 4 /* Avoid reading if at all possible */ -#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ +#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */ #define AllReserved 6 /* If whole device is reserved for * one array */ #define AutoDetected 7 /* added by auto-detect */ @@ -254,7 +254,7 @@ struct mddev_s * fails. Only supported */ struct bio *biolist; /* bios that need to be retried - * because BIO_RW_BARRIER is not supported + * because REQ_HARDBARRIER is not supported */ atomic_t recovery_active; /* blocks scheduled, but not written */ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 410fb60699ac..0307d217e7a4 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error) if (uptodate) multipath_end_bh_io(mp_bh, 0); - else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) { + else if (!(bio->bi_rw & REQ_RAHEAD)) { /* * oops, IO error: */ @@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) struct multipath_bh * mp_bh; struct multipath_info *multipath; - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { + if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { md_barrier_request(mddev, bio); return 0; } @@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) mp_bh->bio = *bio; mp_bh->bio.bi_sector += multipath->rdev->data_offset; mp_bh->bio.bi_bdev = multipath->rdev->bdev; - mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); + mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_private = mp_bh; generic_make_request(&mp_bh->bio); @@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev) *bio = *(mp_bh->master_bio); bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; - bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); + bio->bi_rw |= REQ_FAILFAST_TRANSPORT; bio->bi_end_io = multipath_end_request; bio->bi_private = mp_bh; generic_make_request(bio); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 563abed5a2cb..6f7af46d623c 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio) struct strip_zone *zone; mdk_rdev_t *tmp_dev; - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { + if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { md_barrier_request(mddev, bio); return 0; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a948da8012de..73cc74ffc26b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -787,7 +787,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) struct bio_list bl; struct page **behind_pages = NULL; const int rw = bio_data_dir(bio); - const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); + const bool do_sync = (bio->bi_rw & REQ_SYNC); bool do_barriers; mdk_rdev_t *blocked_rdev; @@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) finish_wait(&conf->wait_barrier, &w); } if (unlikely(!mddev->barriers_work && - bio_rw_flagged(bio, BIO_RW_BARRIER))) { + (bio->bi_rw & REQ_HARDBARRIER))) { if (rw == WRITE) md_write_end(mddev); bio_endio(bio, -EOPNOTSUPP); @@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_end_io = raid1_end_read_request; - read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); + read_bio->bi_rw = READ | do_sync; read_bio->bi_private = r1_bio; generic_make_request(read_bio); @@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) atomic_set(&r1_bio->remaining, 0); atomic_set(&r1_bio->behind_remaining, 0); - do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER); + do_barriers = bio->bi_rw & REQ_HARDBARRIER; if (do_barriers) set_bit(R1BIO_Barrier, &r1_bio->state); @@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; - mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) | - (do_sync << BIO_RW_SYNCIO); + mbio->bi_rw = WRITE | do_barriers | do_sync; mbio->bi_private = r1_bio; if (behind_pages) { @@ -1633,7 +1632,7 @@ static void raid1d(mddev_t *mddev) sync_request_write(mddev, r1_bio); unplug = 1; } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { - /* some requests in the r1bio were BIO_RW_BARRIER + /* some requests in the r1bio were REQ_HARDBARRIER * requests which failed with -EOPNOTSUPP. Hohumm.. * Better resubmit without the barrier. * We know which devices to resubmit for, because @@ -1641,7 +1640,7 @@ static void raid1d(mddev_t *mddev) * We already have a nr_pending reference on these rdevs. */ int i; - const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); + const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC); clear_bit(R1BIO_BarrierRetry, &r1_bio->state); clear_bit(R1BIO_Barrier, &r1_bio->state); for (i=0; i < conf->raid_disks; i++) @@ -1662,8 +1661,7 @@ static void raid1d(mddev_t *mddev) conf->mirrors[i].rdev->data_offset; bio->bi_bdev = conf->mirrors[i].rdev->bdev; bio->bi_end_io = raid1_end_write_request; - bio->bi_rw = WRITE | - (do_sync << BIO_RW_SYNCIO); + bio->bi_rw = WRITE | do_sync; bio->bi_private = r1_bio; r1_bio->bios[i] = bio; generic_make_request(bio); @@ -1698,7 +1696,7 @@ static void raid1d(mddev_t *mddev) (unsigned long long)r1_bio->sector); raid_end_bio_io(r1_bio); } else { - const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); + const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; r1_bio->bios[r1_bio->read_disk] = mddev->ro ? IO_BLOCKED : NULL; r1_bio->read_disk = disk; @@ -1715,7 +1713,7 @@ static void raid1d(mddev_t *mddev) bio->bi_sector = r1_bio->sector + rdev->data_offset; bio->bi_bdev = rdev->bdev; bio->bi_end_io = raid1_end_read_request; - bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); + bio->bi_rw = READ | do_sync; bio->bi_private = r1_bio; unplug = 1; generic_make_request(bio); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 42e64e4e5e25..62ecb6650fd0 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -799,12 +799,12 @@ static int make_request(mddev_t *mddev, struct bio * bio) int i; int chunk_sects = conf->chunk_mask + 1; const int rw = bio_data_dir(bio); - const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); + const bool do_sync = (bio->bi_rw & REQ_SYNC); struct bio_list bl; unsigned long flags; mdk_rdev_t *blocked_rdev; - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { + if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { md_barrier_request(mddev, bio); return 0; } @@ -879,7 +879,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) mirror->rdev->data_offset; read_bio->bi_bdev = mirror->rdev->bdev; read_bio->bi_end_io = raid10_end_read_request; - read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); + read_bio->bi_rw = READ | do_sync; read_bio->bi_private = r10_bio; generic_make_request(read_bio); @@ -947,7 +947,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) conf->mirrors[d].rdev->data_offset; mbio->bi_bdev = conf->mirrors[d].rdev->bdev; mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO); + mbio->bi_rw = WRITE | do_sync; mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); @@ -1716,7 +1716,7 @@ static void raid10d(mddev_t *mddev) raid_end_bio_io(r10_bio); bio_put(bio); } else { - const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO); + const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); bio_put(bio); rdev = conf->mirrors[mirror].rdev; if (printk_ratelimit()) @@ -1730,7 +1730,7 @@ static void raid10d(mddev_t *mddev) bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset; bio->bi_bdev = rdev->bdev; - bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); + bio->bi_rw = READ | do_sync; bio->bi_private = r10_bio; bio->bi_end_io = raid10_end_read_request; unplug = 1; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 96c690279fc6..20ac2f14376a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3958,7 +3958,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) const int rw = bio_data_dir(bi); int remaining; - if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { + if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) { /* Drain all pending writes. We only really need * to ensure they have been submitted, but this is * easier. diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index ee4b6914667f..fda4de3440c4 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -716,7 +716,7 @@ static int _osd_req_list_objects(struct osd_request *or, return PTR_ERR(bio); } - bio->bi_rw &= ~(1 << BIO_RW); + bio->bi_rw &= ~REQ_WRITE; or->in.bio = bio; or->in.total_bytes = bio->bi_size; return 0; @@ -814,7 +814,7 @@ void osd_req_write(struct osd_request *or, { _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); WARN_ON(or->out.bio || or->out.total_bytes); - WARN_ON(0 == bio_rw_flagged(bio, BIO_RW)); + WARN_ON(0 == (bio->bi_rw & REQ_WRITE)); or->out.bio = bio; or->out.total_bytes = len; } @@ -829,7 +829,7 @@ int osd_req_write_kern(struct osd_request *or, if (IS_ERR(bio)) return PTR_ERR(bio); - bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */ + bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */ osd_req_write(or, obj, offset, bio, len); return 0; } @@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or, { _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); WARN_ON(or->in.bio || or->in.total_bytes); - WARN_ON(1 == bio_rw_flagged(bio, BIO_RW)); + WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); or->in.bio = bio; or->in.total_bytes = len; } diff --git a/fs/bio.c b/fs/bio.c index e7bf6ca64dcf..8abb2dfb2e7c 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -843,7 +843,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, if (!bio) goto out_bmd; - bio->bi_rw |= (!write_to_vm << BIO_RW); + if (!write_to_vm) + bio->bi_rw |= REQ_WRITE; ret = 0; @@ -1024,7 +1025,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, * set data direction, and check if mapped pages need bouncing */ if (!write_to_vm) - bio->bi_rw |= (1 << BIO_RW); + bio->bi_rw |= REQ_WRITE; bio->bi_bdev = bdev; bio->bi_flags |= (1 << BIO_USER_MAPPED); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 34f7c375567e..64f10082f048 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -480,7 +480,7 @@ static void end_workqueue_bio(struct bio *bio, int err) end_io_wq->work.func = end_workqueue_fn; end_io_wq->work.flags = 0; - if (bio->bi_rw & (1 << BIO_RW)) { + if (bio->bi_rw & REQ_WRITE) { if (end_io_wq->metadata) btrfs_queue_worker(&fs_info->endio_meta_write_workers, &end_io_wq->work); @@ -604,7 +604,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, atomic_inc(&fs_info->nr_async_submits); - if (rw & (1 << BIO_RW_SYNCIO)) + if (rw & REQ_SYNC) btrfs_set_work_high_prio(&async->work); btrfs_queue_worker(&fs_info->workers, &async->work); @@ -668,7 +668,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, bio, 1); BUG_ON(ret); - if (!(rw & (1 << BIO_RW))) { + if (!(rw & REQ_WRITE)) { /* * called for a read, do the setup so that checksum validation * can happen in the async kernel threads @@ -1427,7 +1427,7 @@ static void end_workqueue_fn(struct btrfs_work *work) * ram and up to date before trying to verify things. For * blocksize <= pagesize, it is basically a noop */ - if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata && + if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata && !bio_ready_for_csum(bio)) { btrfs_queue_worker(&fs_info->endio_meta_workers, &end_io_wq->work); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1bff92ad4744..e975d7180a88 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1429,7 +1429,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); BUG_ON(ret); - if (!(rw & (1 << BIO_RW))) { + if (!(rw & REQ_WRITE)) { if (bio_flags & EXTENT_BIO_COMPRESSED) { return btrfs_submit_compressed_read(inode, bio, mirror_num, bio_flags); @@ -1841,7 +1841,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, bio->bi_size = 0; bio_add_page(bio, page, failrec->len, start - page_offset(page)); - if (failed_bio->bi_rw & (1 << BIO_RW)) + if (failed_bio->bi_rw & REQ_WRITE) rw = WRITE; else rw = READ; @@ -5642,7 +5642,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, struct bio_vec *bvec = bio->bi_io_vec; u64 start; int skip_sum; - int write = rw & (1 << BIO_RW); + int write = rw & REQ_WRITE; int ret = 0; skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d6e3af8be95b..dd318ff280b2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -258,7 +258,7 @@ loop_lock: BUG_ON(atomic_read(&cur->bi_cnt) == 0); - if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) + if (cur->bi_rw & REQ_SYNC) num_sync_run++; submit_bio(cur->bi_rw, cur); @@ -2651,7 +2651,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, int max_errors = 0; struct btrfs_multi_bio *multi = NULL; - if (multi_ret && !(rw & (1 << BIO_RW))) + if (multi_ret && !(rw & REQ_WRITE)) stripes_allocated = 1; again: if (multi_ret) { @@ -2687,7 +2687,7 @@ again: mirror_num = 0; /* if our multi bio struct is too small, back off and try again */ - if (rw & (1 << BIO_RW)) { + if (rw & REQ_WRITE) { if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) { stripes_required = map->num_stripes; @@ -2697,7 +2697,7 @@ again: max_errors = 1; } } - if (multi_ret && (rw & (1 << BIO_RW)) && + if (multi_ret && (rw & REQ_WRITE) && stripes_allocated < stripes_required) { stripes_allocated = map->num_stripes; free_extent_map(em); @@ -2733,7 +2733,7 @@ again: num_stripes = 1; stripe_index = 0; if (map->type & BTRFS_BLOCK_GROUP_RAID1) { - if (unplug_page || (rw & (1 << BIO_RW))) + if (unplug_page || (rw & REQ_WRITE)) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; @@ -2744,7 +2744,7 @@ again: } } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { - if (rw & (1 << BIO_RW)) + if (rw & REQ_WRITE) num_stripes = map->num_stripes; else if (mirror_num) stripe_index = mirror_num - 1; @@ -2755,7 +2755,7 @@ again: stripe_index = do_div(stripe_nr, factor); stripe_index *= map->sub_stripes; - if (unplug_page || (rw & (1 << BIO_RW))) + if (unplug_page || (rw & REQ_WRITE)) num_stripes = map->sub_stripes; else if (mirror_num) stripe_index += mirror_num - 1; @@ -2945,7 +2945,7 @@ static noinline int schedule_bio(struct btrfs_root *root, struct btrfs_pending_bios *pending_bios; /* don't bother with additional async steps for reads, right now */ - if (!(rw & (1 << BIO_RW))) { + if (!(rw & REQ_WRITE)) { bio_get(bio); submit_bio(rw, bio); bio_put(bio); @@ -2964,7 +2964,7 @@ static noinline int schedule_bio(struct btrfs_root *root, bio->bi_rw |= rw; spin_lock(&device->io_lock); - if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) + if (bio->bi_rw & REQ_SYNC) pending_bios = &device->pending_sync_bios; else pending_bios = &device->pending_bios; diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c index 4337cad7777b..e2732203fa93 100644 --- a/fs/exofs/ios.c +++ b/fs/exofs/ios.c @@ -599,7 +599,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp) } else { bio = master_dev->bio; /* FIXME: bio_set_dir() */ - bio->bi_rw |= (1 << BIO_RW); + bio->bi_rw |= REQ_WRITE; } osd_req_write(or, &ios->obj, per_dev->offset, bio, diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index efc3539ac5a1..cde1248a6225 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -595,7 +595,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) goto skip_barrier; get_bh(bh); - submit_bh(WRITE_BARRIER | (1 << BIO_RW_META), bh); + submit_bh(WRITE_BARRIER | REQ_META, bh); wait_on_buffer(bh); if (buffer_eopnotsupp(bh)) { clear_buffer_eopnotsupp(bh); @@ -605,7 +605,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull) lock_buffer(bh); skip_barrier: get_bh(bh); - submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh); + submit_bh(WRITE_SYNC | REQ_META, bh); wait_on_buffer(bh); } if (!buffer_uptodate(bh)) diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 18176d0b75d7..f3b071f921aa 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -36,8 +36,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb { struct buffer_head *bh, *head; int nr_underway = 0; - int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ? - WRITE_SYNC_PLUG : WRITE)); + int write_op = REQ_META | + (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); BUG_ON(!PageLocked(page)); BUG_ON(!page_has_buffers(page)); @@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, } bh->b_end_io = end_buffer_read_sync; get_bh(bh); - submit_bh(READ_SYNC | (1 << BIO_RW_META), bh); + submit_bh(READ_SYNC | REQ_META, bh); if (!(flags & DIO_WAIT)) return 0; @@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) if (buffer_uptodate(first_bh)) goto out; if (!buffer_locked(first_bh)) - ll_rw_block(READ_SYNC | (1 << BIO_RW_META), 1, &first_bh); + ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); dblock++; extlen--; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 3593b3a7290e..fd4f8946abf5 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -275,7 +275,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector) bio->bi_end_io = end_bio_io_page; bio->bi_private = page; - submit_bio(READ_SYNC | (1 << BIO_RW_META), bio); + submit_bio(READ_SYNC | REQ_META, bio); wait_on_page_locked(page); bio_put(bio); if (!PageUptodate(page)) { diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 2e6a2723b8fa..4588fb9e93df 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -508,7 +508,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, * Last BIO is always sent through the following * submission. */ - rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); + rw |= REQ_SYNC | REQ_UNPLUG; res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 7fc5606e6ea5..4d379c8250ae 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -138,55 +138,83 @@ struct bio { #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) /* - * bio bi_rw flags - * - * bit 0 -- data direction - * If not set, bio is a read from device. If set, it's a write to device. - * bit 1 -- fail fast device errors - * bit 2 -- fail fast transport errors - * bit 3 -- fail fast driver errors - * bit 4 -- rw-ahead when set - * bit 5 -- barrier - * Insert a serialization point in the IO queue, forcing previously - * submitted IO to be completed before this one is issued. - * bit 6 -- synchronous I/O hint. - * bit 7 -- Unplug the device immediately after submitting this bio. - * bit 8 -- metadata request - * Used for tracing to differentiate metadata and data IO. May also - * get some preferential treatment in the IO scheduler - * bit 9 -- discard sectors - * Informs the lower level device that this range of sectors is no longer - * used by the file system and may thus be freed by the device. Used - * for flash based storage. - * Don't want driver retries for any fast fail whatever the reason. - * bit 10 -- Tell the IO scheduler not to wait for more requests after this - one has been submitted, even if it is a SYNC request. + * Request flags. For use in the cmd_flags field of struct request, and in + * bi_rw of struct bio. Note that some flags are only valid in either one. */ -enum bio_rw_flags { - BIO_RW, - BIO_RW_FAILFAST_DEV, - BIO_RW_FAILFAST_TRANSPORT, - BIO_RW_FAILFAST_DRIVER, - /* above flags must match REQ_* */ - BIO_RW_AHEAD, - BIO_RW_BARRIER, - BIO_RW_SYNCIO, - BIO_RW_UNPLUG, - BIO_RW_META, - BIO_RW_DISCARD, - BIO_RW_NOIDLE, +enum rq_flag_bits { + /* common flags */ + __REQ_WRITE, /* not set, read. set, write */ + __REQ_FAILFAST_DEV, /* no driver retries of device errors */ + __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ + __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ + + __REQ_HARDBARRIER, /* may not be passed by drive either */ + __REQ_SYNC, /* request is sync (sync write or read) */ + __REQ_META, /* metadata io request */ + __REQ_DISCARD, /* request to discard sectors */ + __REQ_NOIDLE, /* don't anticipate more IO after this one */ + + /* bio only flags */ + __REQ_UNPLUG, /* unplug the immediately after submission */ + __REQ_RAHEAD, /* read ahead, can fail anytime */ + + /* request only flags */ + __REQ_SORTED, /* elevator knows about this request */ + __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ + __REQ_FUA, /* forced unit access */ + __REQ_NOMERGE, /* don't touch this for merging */ + __REQ_STARTED, /* drive already may have started this one */ + __REQ_DONTPREP, /* don't call prep for this one */ + __REQ_QUEUED, /* uses queueing */ + __REQ_ELVPRIV, /* elevator private data attached */ + __REQ_FAILED, /* set if the request failed */ + __REQ_QUIET, /* don't worry about errors */ + __REQ_PREEMPT, /* set for "ide_preempt" requests */ + __REQ_ORDERED_COLOR, /* is before or after barrier */ + __REQ_ALLOCED, /* request came from our alloc pool */ + __REQ_COPY_USER, /* contains copies of user pages */ + __REQ_INTEGRITY, /* integrity metadata has been remapped */ + __REQ_IO_STAT, /* account I/O stat */ + __REQ_MIXED_MERGE, /* merge of different types, fail separately */ + __REQ_NR_BITS, /* stops here */ }; -/* - * First four bits must match between bio->bi_rw and rq->cmd_flags, make - * that explicit here. - */ -#define BIO_RW_RQ_MASK 0xf - -static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) -{ - return (bio->bi_rw & (1 << flag)) != 0; -} +#define REQ_WRITE (1 << __REQ_WRITE) +#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) +#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) +#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) +#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) +#define REQ_SYNC (1 << __REQ_SYNC) +#define REQ_META (1 << __REQ_META) +#define REQ_DISCARD (1 << __REQ_DISCARD) +#define REQ_NOIDLE (1 << __REQ_NOIDLE) + +#define REQ_FAILFAST_MASK \ + (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) +#define REQ_COMMON_MASK \ + (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ + REQ_META| REQ_DISCARD | REQ_NOIDLE) + +#define REQ_UNPLUG (1 << __REQ_UNPLUG) +#define REQ_RAHEAD (1 << __REQ_RAHEAD) + +#define REQ_SORTED (1 << __REQ_SORTED) +#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) +#define REQ_FUA (1 << __REQ_FUA) +#define REQ_NOMERGE (1 << __REQ_NOMERGE) +#define REQ_STARTED (1 << __REQ_STARTED) +#define REQ_DONTPREP (1 << __REQ_DONTPREP) +#define REQ_QUEUED (1 << __REQ_QUEUED) +#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) +#define REQ_FAILED (1 << __REQ_FAILED) +#define REQ_QUIET (1 << __REQ_QUIET) +#define REQ_PREEMPT (1 << __REQ_PREEMPT) +#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) +#define REQ_ALLOCED (1 << __REQ_ALLOCED) +#define REQ_COPY_USER (1 << __REQ_COPY_USER) +#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) +#define REQ_IO_STAT (1 << __REQ_IO_STAT) +#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) /* * upper 16 bits of bi_rw define the io priority of this bio @@ -211,7 +239,10 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) #define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) #define bio_sectors(bio) ((bio)->bi_size >> 9) -#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD)) +#define bio_empty_barrier(bio) \ + ((bio->bi_rw & REQ_HARDBARRIER) && \ + !bio_has_data(bio) && \ + !(bio->bi_rw & REQ_DISCARD)) static inline unsigned int bio_cur_bytes(struct bio *bio) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3ecd28ef9ba4..3fc0f5908619 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -84,70 +84,6 @@ enum { REQ_LB_OP_FLUSH = 0x41, /* flush request */ }; -/* - * request type modified bits. first four bits match BIO_RW* bits, important - */ -enum rq_flag_bits { - __REQ_RW, /* not set, read. set, write */ - __REQ_FAILFAST_DEV, /* no driver retries of device errors */ - __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ - __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ - /* above flags must match BIO_RW_* */ - __REQ_DISCARD, /* request to discard sectors */ - __REQ_SORTED, /* elevator knows about this request */ - __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ - __REQ_HARDBARRIER, /* may not be passed by drive either */ - __REQ_FUA, /* forced unit access */ - __REQ_NOMERGE, /* don't touch this for merging */ - __REQ_STARTED, /* drive already may have started this one */ - __REQ_DONTPREP, /* don't call prep for this one */ - __REQ_QUEUED, /* uses queueing */ - __REQ_ELVPRIV, /* elevator private data attached */ - __REQ_FAILED, /* set if the request failed */ - __REQ_QUIET, /* don't worry about errors */ - __REQ_PREEMPT, /* set for "ide_preempt" requests */ - __REQ_ORDERED_COLOR, /* is before or after barrier */ - __REQ_RW_SYNC, /* request is sync (sync write or read) */ - __REQ_ALLOCED, /* request came from our alloc pool */ - __REQ_RW_META, /* metadata io request */ - __REQ_COPY_USER, /* contains copies of user pages */ - __REQ_INTEGRITY, /* integrity metadata has been remapped */ - __REQ_NOIDLE, /* Don't anticipate more IO after this one */ - __REQ_IO_STAT, /* account I/O stat */ - __REQ_MIXED_MERGE, /* merge of different types, fail separately */ - __REQ_NR_BITS, /* stops here */ -}; - -#define REQ_RW (1 << __REQ_RW) -#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) -#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) -#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) -#define REQ_DISCARD (1 << __REQ_DISCARD) -#define REQ_SORTED (1 << __REQ_SORTED) -#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) -#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) -#define REQ_FUA (1 << __REQ_FUA) -#define REQ_NOMERGE (1 << __REQ_NOMERGE) -#define REQ_STARTED (1 << __REQ_STARTED) -#define REQ_DONTPREP (1 << __REQ_DONTPREP) -#define REQ_QUEUED (1 << __REQ_QUEUED) -#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) -#define REQ_FAILED (1 << __REQ_FAILED) -#define REQ_QUIET (1 << __REQ_QUIET) -#define REQ_PREEMPT (1 << __REQ_PREEMPT) -#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) -#define REQ_RW_SYNC (1 << __REQ_RW_SYNC) -#define REQ_ALLOCED (1 << __REQ_ALLOCED) -#define REQ_RW_META (1 << __REQ_RW_META) -#define REQ_COPY_USER (1 << __REQ_COPY_USER) -#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) -#define REQ_NOIDLE (1 << __REQ_NOIDLE) -#define REQ_IO_STAT (1 << __REQ_IO_STAT) -#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) - -#define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ - REQ_FAILFAST_DRIVER) - #define BLK_MAX_CDB 16 /* @@ -631,7 +567,7 @@ enum { */ static inline bool rw_is_sync(unsigned int rw_flags) { - return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); + return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); } static inline bool rq_is_sync(struct request *rq) diff --git a/include/linux/fs.h b/include/linux/fs.h index 598878831497..c5c92943c767 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -144,29 +144,31 @@ struct inodes_stat_t { * of this IO. * */ -#define RW_MASK 1 -#define RWA_MASK 2 -#define READ 0 -#define WRITE 1 -#define READA 2 /* read-ahead - don't block if no resources */ -#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ -#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) -#define READ_META (READ | (1 << BIO_RW_META)) -#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) -#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) -#define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO)) -#define WRITE_META (WRITE | (1 << BIO_RW_META)) -#define SWRITE_SYNC_PLUG \ - (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) -#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) -#define WRITE_BARRIER (WRITE_SYNC | (1 << BIO_RW_BARRIER)) +#define RW_MASK 1 +#define RWA_MASK 2 + +#define READ 0 +#define WRITE 1 +#define READA 2 /* readahead - don't block if no resources */ +#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ + +#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) +#define READ_META (READ | REQ_META) +#define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE) +#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) +#define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) +#define WRITE_META (WRITE | REQ_META) +#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ + REQ_HARDBARRIER) +#define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE) +#define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) /* * These aren't really reads or writes, they pass down information about * parts of device that are now unused by the file system. */ -#define DISCARD_NOBARRIER (WRITE | (1 << BIO_RW_DISCARD)) -#define DISCARD_BARRIER (DISCARD_NOBARRIER | (1 << BIO_RW_BARRIER)) +#define DISCARD_NOBARRIER (WRITE | REQ_DISCARD) +#define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER) #define SEL_IN 1 #define SEL_OUT 2 diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c index 97024fd40cd5..83bbc7c02df9 100644 --- a/kernel/power/block_io.c +++ b/kernel/power/block_io.c @@ -28,7 +28,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector, struct page *page, struct bio **bio_chain) { - const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); + const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG; struct bio *bio; bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 4f149944cb89..3b4a695051b6 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -169,9 +169,12 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; +#define BLK_TC_HARDBARRIER BLK_TC_BARRIER +#define BLK_TC_RAHEAD BLK_TC_AHEAD + /* The ilog2() calls fall out because they're constant */ -#define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \ - (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name)) +#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ + (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) /* * The worker for the various blk_add_trace*() types. Fills out a @@ -194,9 +197,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, return; what |= ddir_act[rw & WRITE]; - what |= MASK_TC_BIT(rw, BARRIER); - what |= MASK_TC_BIT(rw, SYNCIO); - what |= MASK_TC_BIT(rw, AHEAD); + what |= MASK_TC_BIT(rw, HARDBARRIER); + what |= MASK_TC_BIT(rw, SYNC); + what |= MASK_TC_BIT(rw, RAHEAD); what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, DISCARD); @@ -662,7 +665,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, return; if (rq->cmd_flags & REQ_DISCARD) - rw |= (1 << BIO_RW_DISCARD); + rw |= REQ_DISCARD; if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { what |= BLK_TC_ACT(BLK_TC_PC); @@ -1755,20 +1758,20 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) if (rw & WRITE) rwbs[i++] = 'W'; - else if (rw & 1 << BIO_RW_DISCARD) + else if (rw & REQ_DISCARD) rwbs[i++] = 'D'; else if (bytes) rwbs[i++] = 'R'; else rwbs[i++] = 'N'; - if (rw & 1 << BIO_RW_AHEAD) + if (rw & REQ_RAHEAD) rwbs[i++] = 'A'; - if (rw & 1 << BIO_RW_BARRIER) + if (rw & REQ_HARDBARRIER) rwbs[i++] = 'B'; - if (rw & 1 << BIO_RW_SYNCIO) + if (rw & REQ_SYNC) rwbs[i++] = 'S'; - if (rw & 1 << BIO_RW_META) + if (rw & REQ_META) rwbs[i++] = 'M'; rwbs[i] = '\0'; @@ -1780,7 +1783,7 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq) int bytes; if (rq->cmd_flags & REQ_DISCARD) - rw |= (1 << BIO_RW_DISCARD); + rw |= REQ_DISCARD; bytes = blk_rq_bytes(rq); diff --git a/mm/page_io.c b/mm/page_io.c index 31a3b962230a..2dee975bf469 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -106,7 +106,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) goto out; } if (wbc->sync_mode == WB_SYNC_ALL) - rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); + rw |= REQ_SYNC | REQ_UNPLUG; count_vm_event(PSWPOUT); set_page_writeback(page); unlock_page(page); -- cgit v1.2.3 From 66ac0280197981f88774e74b60c8e5f9f07c1dba Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 18 Jun 2010 16:59:42 +0200 Subject: block: don't allocate a payload for discard request Allocating a fixed payload for discard requests always was a horrible hack, and it's not coming to byte us when adding support for discard in DM/MD. So change the code to leave the allocation of a payload to the lowlevel driver. Unfortunately that means we'll need another hack, which allows us to update the various block layer length fields indicating that we have a payload. Instead of hiding this in sd.c, which we already partially do for UNMAP support add a documented helper in the core block layer for it. Signed-off-by: Christoph Hellwig Acked-by: Mike Snitzer Signed-off-by: Jens Axboe --- block/blk-core.c | 32 +++++++++++++++++++++++++++++++ block/blk-lib.c | 33 ++++++-------------------------- drivers/scsi/sd.c | 52 +++++++++++++++++++++++++++++++++----------------- include/linux/blkdev.h | 2 ++ 4 files changed, 74 insertions(+), 45 deletions(-) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index 66c3cfe94d0a..3531d8e1da04 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1135,6 +1135,38 @@ void blk_put_request(struct request *req) } EXPORT_SYMBOL(blk_put_request); +/** + * blk_add_request_payload - add a payload to a request + * @rq: request to update + * @page: page backing the payload + * @len: length of the payload. + * + * This allows to later add a payload to an already submitted request by + * a block driver. The driver needs to take care of freeing the payload + * itself. + * + * Note that this is a quite horrible hack and nothing but handling of + * discard requests should ever use it. + */ +void blk_add_request_payload(struct request *rq, struct page *page, + unsigned int len) +{ + struct bio *bio = rq->bio; + + bio->bi_io_vec->bv_page = page; + bio->bi_io_vec->bv_offset = 0; + bio->bi_io_vec->bv_len = len; + + bio->bi_size = len; + bio->bi_vcnt = 1; + bio->bi_phys_segments = 1; + + rq->__data_len = rq->resid_len = len; + rq->nr_phys_segments = 1; + rq->buffer = bio_data(bio); +} +EXPORT_SYMBOL_GPL(blk_add_request_payload); + void init_request_from_bio(struct request *req, struct bio *bio) { req->cpu = bio->bi_comp_cpu; diff --git a/block/blk-lib.c b/block/blk-lib.c index d0216b9f22d4..e16185b0d8e1 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -19,7 +19,6 @@ static void blkdev_discard_end_io(struct bio *bio, int err) if (bio->bi_private) complete(bio->bi_private); - __free_page(bio_page(bio)); bio_put(bio); } @@ -43,7 +42,6 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, int type = flags & BLKDEV_IFL_BARRIER ? DISCARD_BARRIER : DISCARD_NOBARRIER; struct bio *bio; - struct page *page; int ret = 0; if (!q) @@ -53,35 +51,21 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, return -EOPNOTSUPP; while (nr_sects && !ret) { - unsigned int sector_size = q->limits.logical_block_size; unsigned int max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); bio = bio_alloc(gfp_mask, 1); - if (!bio) - goto out; + if (!bio) { + ret = -ENOMEM; + break; + } + bio->bi_sector = sector; bio->bi_end_io = blkdev_discard_end_io; bio->bi_bdev = bdev; if (flags & BLKDEV_IFL_WAIT) bio->bi_private = &wait; - /* - * Add a zeroed one-sector payload as that's what - * our current implementations need. If we'll ever need - * more the interface will need revisiting. - */ - page = alloc_page(gfp_mask | __GFP_ZERO); - if (!page) - goto out_free_bio; - if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size) - goto out_free_page; - - /* - * And override the bio size - the way discard works we - * touch many more blocks on disk than the actual payload - * length. - */ if (nr_sects > max_discard_sectors) { bio->bi_size = max_discard_sectors << 9; nr_sects -= max_discard_sectors; @@ -103,13 +87,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, ret = -EIO; bio_put(bio); } + return ret; -out_free_page: - __free_page(page); -out_free_bio: - bio_put(bio); -out: - return -ENOMEM; } EXPORT_SYMBOL(blkdev_issue_discard); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a3fdf4dc59da..86da819c70eb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -411,22 +411,25 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif) } /** - * sd_prepare_discard - unmap blocks on thinly provisioned device + * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device + * @sdp: scsi device to operate one * @rq: Request to prepare * * Will issue either UNMAP or WRITE SAME(16) depending on preference * indicated by target device. **/ -static int sd_prepare_discard(struct request *rq) +static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) { struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); struct bio *bio = rq->bio; sector_t sector = bio->bi_sector; - unsigned int num = bio_sectors(bio); + unsigned int nr_sectors = bio_sectors(bio); + unsigned int len; + struct page *page; if (sdkp->device->sector_size == 4096) { sector >>= 3; - num >>= 3; + nr_sectors >>= 3; } rq->cmd_type = REQ_TYPE_BLOCK_PC; @@ -434,31 +437,35 @@ static int sd_prepare_discard(struct request *rq) memset(rq->cmd, 0, rq->cmd_len); + page = alloc_page(GFP_ATOMIC | __GFP_ZERO); + if (!page) + return BLKPREP_DEFER; + if (sdkp->unmap) { - char *buf = kmap_atomic(bio_page(bio), KM_USER0); + char *buf = page_address(page); + rq->cmd_len = 10; rq->cmd[0] = UNMAP; rq->cmd[8] = 24; - rq->cmd_len = 10; - - /* Ensure that data length matches payload */ - rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24; put_unaligned_be16(6 + 16, &buf[0]); put_unaligned_be16(16, &buf[2]); put_unaligned_be64(sector, &buf[8]); - put_unaligned_be32(num, &buf[16]); + put_unaligned_be32(nr_sectors, &buf[16]); - kunmap_atomic(buf, KM_USER0); + len = 24; } else { + rq->cmd_len = 16; rq->cmd[0] = WRITE_SAME_16; rq->cmd[1] = 0x8; /* UNMAP */ put_unaligned_be64(sector, &rq->cmd[2]); - put_unaligned_be32(num, &rq->cmd[10]); - rq->cmd_len = 16; + put_unaligned_be32(nr_sectors, &rq->cmd[10]); + + len = sdkp->device->sector_size; } - return BLKPREP_OK; + blk_add_request_payload(rq, page, len); + return scsi_setup_blk_pc_cmnd(sdp, rq); } /** @@ -485,10 +492,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) * Discard request come in as REQ_TYPE_FS but we turn them into * block PC requests to make life easier. */ - if (rq->cmd_flags & REQ_DISCARD) - ret = sd_prepare_discard(rq); - - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { + if (rq->cmd_flags & REQ_DISCARD) { + ret = scsi_setup_discard_cmnd(sdp, rq); + goto out; + } else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { ret = scsi_setup_blk_pc_cmnd(sdp, rq); goto out; } else if (rq->cmd_type != REQ_TYPE_FS) { @@ -1163,6 +1170,15 @@ static int sd_done(struct scsi_cmnd *SCpnt) int sense_valid = 0; int sense_deferred = 0; + /* + * If this is a discard request that originated from the kernel + * we need to free our payload here. Note that we need to check + * the request flag as the normal payload rules apply for + * pass-through UNMAP / WRITE SAME requests. + */ + if (SCpnt->request->cmd_flags & REQ_DISCARD) + __free_page(bio_page(SCpnt->request->bio)); + if (result) { sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); if (sense_valid) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3fc0f5908619..204fbe22354d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -705,6 +705,8 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *, gfp_t); extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); +extern void blk_add_request_payload(struct request *rq, struct page *page, + unsigned int len); extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); extern int blk_lld_busy(struct request_queue *q); extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, -- cgit v1.2.3 From 3ffb52e73b47d6ad86b645942ff49035efdbcd31 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 29 Jun 2010 13:33:38 +0200 Subject: block: fixup missing conversion from BIO_RW_DISCARD to REQ_DISCARD Didn't cause a merge conflict, so fixed this one up manually post merge. Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index 3531d8e1da04..3c3789492c10 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1598,7 +1598,7 @@ void submit_bio(int rw, struct bio *bio) * If it's a regular read/write or a barrier with data attached, * go through the normal accounting stuff before submission. */ - if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) { + if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { if (rw & WRITE) { count_vm_events(PGPGOUT, count); } else { -- cgit v1.2.3 From 28018c242a4ec7017bbbf81d2d3952f820a27118 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 1 Jul 2010 19:49:17 +0900 Subject: block: implement an unprep function corresponding directly to prep Reviewed-by: FUJITA Tomonori Signed-off-by: Jens Axboe --- block/blk-core.c | 25 +++++++++++++++++++++++++ block/blk-settings.c | 17 +++++++++++++++++ drivers/scsi/scsi_lib.c | 2 +- include/linux/blkdev.h | 4 ++++ 4 files changed, 47 insertions(+), 1 deletion(-) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index 3c3789492c10..5ab3ac22930c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -608,6 +608,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, q->request_fn = rfn; q->prep_rq_fn = NULL; + q->unprep_rq_fn = NULL; q->unplug_fn = generic_unplug_device; q->queue_flags = QUEUE_FLAG_DEFAULT; q->queue_lock = lock; @@ -2133,6 +2134,26 @@ static bool blk_update_bidi_request(struct request *rq, int error, return false; } +/** + * blk_unprep_request - unprepare a request + * @req: the request + * + * This function makes a request ready for complete resubmission (or + * completion). It happens only after all error handling is complete, + * so represents the appropriate moment to deallocate any resources + * that were allocated to the request in the prep_rq_fn. The queue + * lock is held when calling this. + */ +void blk_unprep_request(struct request *req) +{ + struct request_queue *q = req->q; + + req->cmd_flags &= ~REQ_DONTPREP; + if (q->unprep_rq_fn) + q->unprep_rq_fn(q, req); +} +EXPORT_SYMBOL_GPL(blk_unprep_request); + /* * queue lock must be held */ @@ -2148,6 +2169,10 @@ static void blk_finish_request(struct request *req, int error) blk_delete_timer(req); + if (req->cmd_flags & REQ_DONTPREP) + blk_unprep_request(req); + + blk_account_io_done(req); if (req->end_io) diff --git a/block/blk-settings.c b/block/blk-settings.c index f5ed5a1187ba..a234f4bf1d6f 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -36,6 +36,23 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) } EXPORT_SYMBOL(blk_queue_prep_rq); +/** + * blk_queue_unprep_rq - set an unprepare_request function for queue + * @q: queue + * @ufn: unprepare_request function + * + * It's possible for a queue to register an unprepare_request callback + * which is invoked before the request is finally completed. The goal + * of the function is to deallocate any data that was allocated in the + * prepare_request callback. + * + */ +void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) +{ + q->unprep_rq_fn = ufn; +} +EXPORT_SYMBOL(blk_queue_unprep_rq); + /** * blk_queue_merge_bvec - set a merge_bvec function for queue * @q: queue diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 5f1160841b0e..ee836193f531 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -85,7 +85,7 @@ static void scsi_unprep_request(struct request *req) { struct scsi_cmnd *cmd = req->special; - req->cmd_flags &= ~REQ_DONTPREP; + blk_unprep_request(req); req->special = NULL; scsi_put_command(cmd); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 204fbe22354d..6bba04c7ec48 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -200,6 +200,7 @@ struct request_pm_state typedef void (request_fn_proc) (struct request_queue *q); typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); typedef int (prep_rq_fn) (struct request_queue *, struct request *); +typedef void (unprep_rq_fn) (struct request_queue *, struct request *); typedef void (unplug_fn) (struct request_queue *); struct bio_vec; @@ -282,6 +283,7 @@ struct request_queue request_fn_proc *request_fn; make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; + unprep_rq_fn *unprep_rq_fn; unplug_fn *unplug_fn; merge_bvec_fn *merge_bvec_fn; prepare_flush_fn *prepare_flush_fn; @@ -841,6 +843,7 @@ extern void blk_complete_request(struct request *); extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); extern void blk_abort_queue(struct request_queue *); +extern void blk_unprep_request(struct request *); /* * Access functions for manipulating queue properties @@ -885,6 +888,7 @@ extern int blk_queue_dma_drain(struct request_queue *q, extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); +extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); extern void blk_queue_dma_alignment(struct request_queue *, int); extern void blk_queue_update_dma_alignment(struct request_queue *, int); -- cgit v1.2.3 From 3383977fadc4027f20907f6208a9033cd043adab Mon Sep 17 00:00:00 2001 From: ike Snitzer Date: Sun, 8 Aug 2010 12:11:33 -0400 Subject: block: update request stacking methods to support discards Propagate REQ_DISCARD in cmd_flags when cloning a discard request. Skip blk_rq_check_limits's existing checks for discard requests because discard limits will have already been checked in blkdev_issue_discard. Signed-off-by: Mike Snitzer Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index 5ab3ac22930c..7da630e25ae7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1644,6 +1644,9 @@ EXPORT_SYMBOL(submit_bio); */ int blk_rq_check_limits(struct request_queue *q, struct request *rq) { + if (rq->cmd_flags & REQ_DISCARD) + return 0; + if (blk_rq_sectors(rq) > queue_max_sectors(q) || blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { printk(KERN_ERR "%s: over max size limit.\n", __func__); @@ -2492,6 +2495,8 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { dst->cpu = src->cpu; dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); + if (src->cmd_flags & REQ_DISCARD) + dst->cmd_flags |= REQ_DISCARD; dst->cmd_type = src->cmd_type; dst->__sector = blk_rq_pos(src); dst->__data_len = blk_rq_bytes(src); -- cgit v1.2.3