block: remove wrappers for request type/flags
Remove all the trivial wrappers for the cmd_type and cmd_flags fields in struct requests. This allows much easier grepping for different request types instead of unwinding through macros. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
Родитель
7e005f7979
Коммит
33659ebbae
|
@ -79,7 +79,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
|
|||
*
|
||||
* http://thread.gmane.org/gmane.linux.kernel/537473
|
||||
*/
|
||||
if (!blk_fs_request(rq))
|
||||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
return QUEUE_ORDSEQ_DRAIN;
|
||||
|
||||
if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
|
||||
|
@ -236,7 +236,8 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
|
|||
bool blk_do_ordered(struct request_queue *q, struct request **rqp)
|
||||
{
|
||||
struct request *rq = *rqp;
|
||||
const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
|
||||
const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
|
||||
(rq->cmd_flags & REQ_HARDBARRIER);
|
||||
|
||||
if (!q->ordseq) {
|
||||
if (!is_barrier)
|
||||
|
@ -261,7 +262,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
|
|||
*/
|
||||
|
||||
/* Special requests are not subject to ordering rules. */
|
||||
if (!blk_fs_request(rq) &&
|
||||
if (rq->cmd_type != REQ_TYPE_FS &&
|
||||
rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
|
||||
return true;
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
|
|||
printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
|
||||
rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
|
||||
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
printk(KERN_INFO " cdb: ");
|
||||
for (bit = 0; bit < BLK_MAX_CDB; bit++)
|
||||
printk("%02x ", rq->cmd[bit]);
|
||||
|
@ -1796,7 +1796,7 @@ struct request *blk_peek_request(struct request_queue *q)
|
|||
* sees this request (possibly after
|
||||
* requeueing). Notify IO scheduler.
|
||||
*/
|
||||
if (blk_sorted_rq(rq))
|
||||
if (rq->cmd_flags & REQ_SORTED)
|
||||
elv_activate_rq(q, rq);
|
||||
|
||||
/*
|
||||
|
@ -1984,10 +1984,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
|||
* TODO: tj: This is too subtle. It would be better to let
|
||||
* low level drivers do what they see fit.
|
||||
*/
|
||||
if (blk_fs_request(req))
|
||||
if (req->cmd_type == REQ_TYPE_FS)
|
||||
req->errors = 0;
|
||||
|
||||
if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
|
||||
if (error && req->cmd_type == REQ_TYPE_FS &&
|
||||
!(req->cmd_flags & REQ_QUIET)) {
|
||||
printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
|
||||
req->rq_disk ? req->rq_disk->disk_name : "?",
|
||||
(unsigned long long)blk_rq_pos(req));
|
||||
|
@ -2074,7 +2075,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
|||
req->buffer = bio_data(req->bio);
|
||||
|
||||
/* update sector only for requests with clear definition of sector */
|
||||
if (blk_fs_request(req) || blk_discard_rq(req))
|
||||
if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
|
||||
req->__sector += total_bytes >> 9;
|
||||
|
||||
/* mixed attributes always follow the first bio */
|
||||
|
@ -2127,7 +2128,7 @@ static void blk_finish_request(struct request *req, int error)
|
|||
|
||||
BUG_ON(blk_queued_rq(req));
|
||||
|
||||
if (unlikely(laptop_mode) && blk_fs_request(req))
|
||||
if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
|
||||
laptop_io_completion(&req->q->backing_dev_info);
|
||||
|
||||
blk_delete_timer(req);
|
||||
|
|
|
@ -57,7 +57,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|||
__elv_add_request(q, rq, where, 1);
|
||||
__generic_unplug_device(q);
|
||||
/* the queue is stopped so it won't be plugged+unplugged */
|
||||
if (blk_pm_resume_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
|
||||
q->request_fn(q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
|
|
@ -226,7 +226,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
|||
{
|
||||
unsigned short max_sectors;
|
||||
|
||||
if (unlikely(blk_pc_request(req)))
|
||||
if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
|
||||
max_sectors = queue_max_hw_sectors(q);
|
||||
else
|
||||
max_sectors = queue_max_sectors(q);
|
||||
|
@ -250,7 +250,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
|||
{
|
||||
unsigned short max_sectors;
|
||||
|
||||
if (unlikely(blk_pc_request(req)))
|
||||
if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
|
||||
max_sectors = queue_max_hw_sectors(q);
|
||||
else
|
||||
max_sectors = queue_max_sectors(q);
|
||||
|
|
|
@ -161,8 +161,10 @@ static inline int blk_cpu_to_group(int cpu)
|
|||
*/
|
||||
static inline int blk_do_io_stat(struct request *rq)
|
||||
{
|
||||
return rq->rq_disk && blk_rq_io_stat(rq) &&
|
||||
(blk_fs_request(rq) || blk_discard_rq(rq));
|
||||
return rq->rq_disk &&
|
||||
(rq->cmd_flags & REQ_IO_STAT) &&
|
||||
(rq->cmd_type == REQ_TYPE_FS ||
|
||||
(rq->cmd_flags & REQ_DISCARD));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -646,9 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
|
|||
return rq1;
|
||||
else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
|
||||
return rq2;
|
||||
if (rq_is_meta(rq1) && !rq_is_meta(rq2))
|
||||
if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META))
|
||||
return rq1;
|
||||
else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
|
||||
else if ((rq2->cmd_flags & REQ_RW_META) &&
|
||||
!(rq1->cmd_flags & REQ_RW_META))
|
||||
return rq2;
|
||||
|
||||
s1 = blk_rq_pos(rq1);
|
||||
|
@ -1484,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
|
|||
cfqq->cfqd->rq_queued--;
|
||||
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
|
||||
rq_data_dir(rq), rq_is_sync(rq));
|
||||
if (rq_is_meta(rq)) {
|
||||
if (rq->cmd_flags & REQ_RW_META) {
|
||||
WARN_ON(!cfqq->meta_pending);
|
||||
cfqq->meta_pending--;
|
||||
}
|
||||
|
@ -3176,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
|||
* So both queues are sync. Let the new request get disk time if
|
||||
* it's a metadata request and the current queue is doing regular IO.
|
||||
*/
|
||||
if (rq_is_meta(rq) && !cfqq->meta_pending)
|
||||
if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending)
|
||||
return true;
|
||||
|
||||
/*
|
||||
|
@ -3230,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
|||
struct cfq_io_context *cic = RQ_CIC(rq);
|
||||
|
||||
cfqd->rq_queued++;
|
||||
if (rq_is_meta(rq))
|
||||
if (rq->cmd_flags & REQ_RW_META)
|
||||
cfqq->meta_pending++;
|
||||
|
||||
cfq_update_io_thinktime(cfqd, cic);
|
||||
|
@ -3365,7 +3366,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|||
unsigned long now;
|
||||
|
||||
now = jiffies;
|
||||
cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
|
||||
cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
|
||||
!!(rq->cmd_flags & REQ_NOIDLE));
|
||||
|
||||
cfq_update_hw_tag(cfqd);
|
||||
|
||||
|
@ -3419,11 +3421,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
|||
cfq_slice_expired(cfqd, 1);
|
||||
else if (sync && cfqq_empty &&
|
||||
!cfq_close_cooperator(cfqd, cfqq)) {
|
||||
cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
|
||||
cfqd->noidle_tree_requires_idle |=
|
||||
!(rq->cmd_flags & REQ_NOIDLE);
|
||||
/*
|
||||
* Idling is enabled for SYNC_WORKLOAD.
|
||||
* SYNC_NOIDLE_WORKLOAD idles at the end of the tree
|
||||
* only if we processed at least one !rq_noidle request
|
||||
* only if we processed at least one !REQ_NOIDLE request
|
||||
*/
|
||||
if (cfqd->serving_type == SYNC_WORKLOAD
|
||||
|| cfqd->noidle_tree_requires_idle
|
||||
|
|
|
@ -428,7 +428,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
|
|||
list_for_each_prev(entry, &q->queue_head) {
|
||||
struct request *pos = list_entry_rq(entry);
|
||||
|
||||
if (blk_discard_rq(rq) != blk_discard_rq(pos))
|
||||
if ((rq->cmd_flags & REQ_DISCARD) !=
|
||||
(pos->cmd_flags & REQ_DISCARD))
|
||||
break;
|
||||
if (rq_data_dir(rq) != rq_data_dir(pos))
|
||||
break;
|
||||
|
@ -558,7 +559,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
|
|||
*/
|
||||
if (blk_account_rq(rq)) {
|
||||
q->in_flight[rq_is_sync(rq)]--;
|
||||
if (blk_sorted_rq(rq))
|
||||
if (rq->cmd_flags & REQ_SORTED)
|
||||
elv_deactivate_rq(q, rq);
|
||||
}
|
||||
|
||||
|
@ -644,7 +645,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
|
|||
break;
|
||||
|
||||
case ELEVATOR_INSERT_SORT:
|
||||
BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
|
||||
BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
|
||||
!(rq->cmd_flags & REQ_DISCARD));
|
||||
rq->cmd_flags |= REQ_SORTED;
|
||||
q->nr_sorted++;
|
||||
if (rq_mergeable(rq)) {
|
||||
|
@ -716,7 +718,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
|
|||
/*
|
||||
* toggle ordered color
|
||||
*/
|
||||
if (blk_barrier_rq(rq))
|
||||
if (rq->cmd_flags & REQ_HARDBARRIER)
|
||||
q->ordcolor ^= 1;
|
||||
|
||||
/*
|
||||
|
@ -729,7 +731,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
|
|||
* this request is scheduling boundary, update
|
||||
* end_sector
|
||||
*/
|
||||
if (blk_fs_request(rq) || blk_discard_rq(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS ||
|
||||
(rq->cmd_flags & REQ_DISCARD)) {
|
||||
q->end_sector = rq_end_sector(rq);
|
||||
q->boundary_rq = rq;
|
||||
}
|
||||
|
@ -843,7 +846,8 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
|
|||
*/
|
||||
if (blk_account_rq(rq)) {
|
||||
q->in_flight[rq_is_sync(rq)]--;
|
||||
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
|
||||
if ((rq->cmd_flags & REQ_SORTED) &&
|
||||
e->ops->elevator_completed_req_fn)
|
||||
e->ops->elevator_completed_req_fn(q, rq);
|
||||
}
|
||||
|
||||
|
|
|
@ -1111,7 +1111,7 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
|
|||
*/
|
||||
static int atapi_drain_needed(struct request *rq)
|
||||
{
|
||||
if (likely(!blk_pc_request(rq)))
|
||||
if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
|
||||
return 0;
|
||||
|
||||
if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
|
||||
|
|
|
@ -1783,7 +1783,7 @@ static void cciss_softirq_done(struct request *rq)
|
|||
#endif /* CCISS_DEBUG */
|
||||
|
||||
/* set the residual count for pc requests */
|
||||
if (blk_pc_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
rq->resid_len = cmd->err_info->ResidualCnt;
|
||||
|
||||
blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
|
||||
|
@ -2983,7 +2983,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
|
|||
driver_byte = DRIVER_OK;
|
||||
msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
|
||||
|
||||
if (blk_pc_request(cmd->rq))
|
||||
if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
host_byte = DID_PASSTHROUGH;
|
||||
else
|
||||
host_byte = DID_OK;
|
||||
|
@ -2992,7 +2992,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
|
|||
host_byte, driver_byte);
|
||||
|
||||
if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
|
||||
if (!blk_pc_request(cmd->rq))
|
||||
if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)
|
||||
printk(KERN_WARNING "cciss: cmd %p "
|
||||
"has SCSI Status 0x%x\n",
|
||||
cmd, cmd->err_info->ScsiStatus);
|
||||
|
@ -3002,15 +3002,17 @@ static inline int evaluate_target_status(ctlr_info_t *h,
|
|||
/* check the sense key */
|
||||
sense_key = 0xf & cmd->err_info->SenseInfo[2];
|
||||
/* no status or recovered error */
|
||||
if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
|
||||
if (((sense_key == 0x0) || (sense_key == 0x1)) &&
|
||||
(cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC))
|
||||
error_value = 0;
|
||||
|
||||
if (check_for_unit_attention(h, cmd)) {
|
||||
*retry_cmd = !blk_pc_request(cmd->rq);
|
||||
*retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
|
||||
/* Not SG_IO or similar? */
|
||||
if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) {
|
||||
if (error_value != 0)
|
||||
printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
|
||||
" sense key = 0x%x\n", cmd, sense_key);
|
||||
|
@ -3052,7 +3054,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
|
||||
break;
|
||||
case CMD_DATA_UNDERRUN:
|
||||
if (blk_fs_request(cmd->rq)) {
|
||||
if (cmd->rq->cmd_type == REQ_TYPE_FS) {
|
||||
printk(KERN_WARNING "cciss: cmd %p has"
|
||||
" completed with data underrun "
|
||||
"reported\n", cmd);
|
||||
|
@ -3060,7 +3062,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
}
|
||||
break;
|
||||
case CMD_DATA_OVERRUN:
|
||||
if (blk_fs_request(cmd->rq))
|
||||
if (cmd->rq->cmd_type == REQ_TYPE_FS)
|
||||
printk(KERN_WARNING "cciss: cmd %p has"
|
||||
" completed with data overrun "
|
||||
"reported\n", cmd);
|
||||
|
@ -3070,42 +3072,48 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
"reported invalid\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_PROTOCOL_ERR:
|
||||
printk(KERN_WARNING "cciss: cmd %p has "
|
||||
"protocol error \n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_HARDWARE_ERR:
|
||||
printk(KERN_WARNING "cciss: cmd %p had "
|
||||
" hardware error\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_CONNECTION_LOST:
|
||||
printk(KERN_WARNING "cciss: cmd %p had "
|
||||
"connection lost\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_ABORTED:
|
||||
printk(KERN_WARNING "cciss: cmd %p was "
|
||||
"aborted\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
DID_PASSTHROUGH : DID_ABORT);
|
||||
break;
|
||||
case CMD_ABORT_FAILED:
|
||||
printk(KERN_WARNING "cciss: cmd %p reports "
|
||||
"abort failed\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_UNSOLICITED_ABORT:
|
||||
printk(KERN_WARNING "cciss%d: unsolicited "
|
||||
|
@ -3121,13 +3129,15 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
"many times\n", h->ctlr, cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
DID_PASSTHROUGH : DID_ABORT);
|
||||
break;
|
||||
case CMD_TIMEOUT:
|
||||
printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "cciss: cmd %p returned "
|
||||
|
@ -3135,7 +3145,8 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
cmd->err_info->CommandStatus);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
}
|
||||
|
||||
after_error_processing:
|
||||
|
@ -3294,7 +3305,7 @@ static void do_cciss_request(struct request_queue *q)
|
|||
c->Header.SGList = h->max_cmd_sgentries;
|
||||
set_performant_mode(h, c);
|
||||
|
||||
if (likely(blk_fs_request(creq))) {
|
||||
if (likely(creq->cmd_type == REQ_TYPE_FS)) {
|
||||
if(h->cciss_read == CCISS_READ_10) {
|
||||
c->Request.CDB[1] = 0;
|
||||
c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
|
||||
|
@ -3324,7 +3335,7 @@ static void do_cciss_request(struct request_queue *q)
|
|||
c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
|
||||
c->Request.CDB[14] = c->Request.CDB[15] = 0;
|
||||
}
|
||||
} else if (blk_pc_request(creq)) {
|
||||
} else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
c->Request.CDBLen = creq->cmd_len;
|
||||
memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
|
||||
} else {
|
||||
|
|
|
@ -627,7 +627,7 @@ repeat:
|
|||
req_data_dir(req) == READ ? "read" : "writ",
|
||||
cyl, head, sec, nsect, req->buffer);
|
||||
#endif
|
||||
if (blk_fs_request(req)) {
|
||||
if (req->cmd_type == REQ_TYPE_FS) {
|
||||
switch (rq_data_dir(req)) {
|
||||
case READ:
|
||||
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
|
||||
|
|
|
@ -670,7 +670,7 @@ static void mg_request_poll(struct request_queue *q)
|
|||
break;
|
||||
}
|
||||
|
||||
if (unlikely(!blk_fs_request(host->req))) {
|
||||
if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
|
||||
mg_end_request_cur(host, -EIO);
|
||||
continue;
|
||||
}
|
||||
|
@ -756,7 +756,7 @@ static void mg_request(struct request_queue *q)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (unlikely(!blk_fs_request(req))) {
|
||||
if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
|
||||
mg_end_request_cur(host, -EIO);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -448,7 +448,7 @@ static void nbd_clear_que(struct nbd_device *lo)
|
|||
|
||||
static void nbd_handle_req(struct nbd_device *lo, struct request *req)
|
||||
{
|
||||
if (!blk_fs_request(req))
|
||||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
goto error_out;
|
||||
|
||||
nbd_cmd(req) = NBD_CMD_READ;
|
||||
|
|
|
@ -310,7 +310,8 @@ static void osdblk_rq_fn(struct request_queue *q)
|
|||
break;
|
||||
|
||||
/* filter out block requests we don't understand */
|
||||
if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) {
|
||||
if (rq->cmd_type != REQ_TYPE_FS &&
|
||||
!(rq->cmd_flags & REQ_HARDBARRIER)) {
|
||||
blk_end_request_all(rq, 0);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -439,7 +439,7 @@ static char *pd_buf; /* buffer for request in progress */
|
|||
|
||||
static enum action do_pd_io_start(void)
|
||||
{
|
||||
if (blk_special_request(pd_req)) {
|
||||
if (pd_req->cmd_type == REQ_TYPE_SPECIAL) {
|
||||
phase = pd_special;
|
||||
return pd_special();
|
||||
}
|
||||
|
|
|
@ -196,7 +196,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
|
|||
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
|
||||
|
||||
while ((req = blk_fetch_request(q))) {
|
||||
if (blk_fs_request(req)) {
|
||||
if (req->cmd_type == REQ_TYPE_FS) {
|
||||
if (ps3disk_submit_request_sg(dev, req))
|
||||
break;
|
||||
} else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
|
||||
|
|
|
@ -648,7 +648,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (lun->changed && !blk_pc_request(rq)) {
|
||||
if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC)
|
||||
blk_start_request(rq);
|
||||
ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
|
||||
return 0;
|
||||
|
@ -684,7 +684,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
|
|||
}
|
||||
urq->nsg = n_elem;
|
||||
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
ub_cmd_build_packet(sc, lun, cmd, urq);
|
||||
} else {
|
||||
ub_cmd_build_block(sc, lun, cmd, urq);
|
||||
|
@ -781,7 +781,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
rq = urq->rq;
|
||||
|
||||
if (cmd->error == 0) {
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
if (cmd->act_len >= rq->resid_len)
|
||||
rq->resid_len = 0;
|
||||
else
|
||||
|
@ -795,7 +795,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
}
|
||||
}
|
||||
} else {
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
|
||||
memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
|
||||
rq->sense_len = UB_SENSE_SIZE;
|
||||
|
|
|
@ -361,7 +361,7 @@ static void do_viodasd_request(struct request_queue *q)
|
|||
if (req == NULL)
|
||||
return;
|
||||
/* check that request contains a valid command */
|
||||
if (!blk_fs_request(req)) {
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
viodasd_end_request(req, -EIO, blk_rq_sectors(req));
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -65,13 +65,16 @@ static void blk_done(struct virtqueue *vq)
|
|||
break;
|
||||
}
|
||||
|
||||
if (blk_pc_request(vbr->req)) {
|
||||
switch (vbr->req->cmd_type) {
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
vbr->req->resid_len = vbr->in_hdr.residual;
|
||||
vbr->req->sense_len = vbr->in_hdr.sense_len;
|
||||
vbr->req->errors = vbr->in_hdr.errors;
|
||||
}
|
||||
if (blk_special_request(vbr->req))
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
vbr->req->errors = (error != 0);
|
||||
break;
|
||||
}
|
||||
|
||||
__blk_end_request_all(vbr->req, error);
|
||||
list_del(&vbr->list);
|
||||
|
@ -123,7 +126,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
|
|||
BUG();
|
||||
}
|
||||
|
||||
if (blk_barrier_rq(vbr->req))
|
||||
if (vbr->req->cmd_flags & REQ_HARDBARRIER)
|
||||
vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
|
||||
|
||||
sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
|
||||
|
@ -134,12 +137,12 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
|
|||
* block, and before the normal inhdr we put the sense data and the
|
||||
* inhdr with additional status information before the normal inhdr.
|
||||
*/
|
||||
if (blk_pc_request(vbr->req))
|
||||
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
|
||||
|
||||
num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
|
||||
|
||||
if (blk_pc_request(vbr->req)) {
|
||||
if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
|
||||
sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
|
||||
sizeof(vbr->in_hdr));
|
||||
|
|
|
@ -322,7 +322,7 @@ static void do_xd_request (struct request_queue * q)
|
|||
int res = -EIO;
|
||||
int retry;
|
||||
|
||||
if (!blk_fs_request(req))
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
goto done;
|
||||
if (block + count > get_capacity(req->rq_disk))
|
||||
goto done;
|
||||
|
|
|
@ -238,7 +238,7 @@ static int blkif_queue_request(struct request *req)
|
|||
|
||||
ring_req->operation = rq_data_dir(req) ?
|
||||
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
||||
if (blk_barrier_rq(req))
|
||||
if (req->cmd_flags & REQ_HARDBARRIER)
|
||||
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
|
||||
|
||||
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
|
||||
|
@ -309,7 +309,7 @@ static void do_blkif_request(struct request_queue *rq)
|
|||
|
||||
blk_start_request(req);
|
||||
|
||||
if (!blk_fs_request(req)) {
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
__blk_end_request_all(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -465,7 +465,7 @@ struct request *ace_get_next_request(struct request_queue * q)
|
|||
struct request *req;
|
||||
|
||||
while ((req = blk_peek_request(q)) != NULL) {
|
||||
if (blk_fs_request(req))
|
||||
if (req->cmd_type == REQ_TYPE_FS)
|
||||
break;
|
||||
blk_start_request(req);
|
||||
__blk_end_request_all(req, -EIO);
|
||||
|
|
|
@ -643,7 +643,7 @@ static void gdrom_request(struct request_queue *rq)
|
|||
struct request *req;
|
||||
|
||||
while ((req = blk_fetch_request(rq)) != NULL) {
|
||||
if (!blk_fs_request(req)) {
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
|
||||
__blk_end_request_all(req, -EIO);
|
||||
continue;
|
||||
|
|
|
@ -298,7 +298,7 @@ static void do_viocd_request(struct request_queue *q)
|
|||
struct request *req;
|
||||
|
||||
while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
|
||||
if (!blk_fs_request(req))
|
||||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
__blk_end_request_all(req, -EIO);
|
||||
else if (send_request(req) < 0) {
|
||||
printk(VIOCD_KERN_WARNING
|
||||
|
|
|
@ -190,7 +190,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
|
|||
|
||||
BUG_ON(sense_len > sizeof(*sense));
|
||||
|
||||
if (blk_sense_request(rq) || drive->sense_rq_armed)
|
||||
if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed)
|
||||
return;
|
||||
|
||||
memset(sense, 0, sizeof(*sense));
|
||||
|
@ -307,13 +307,16 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry);
|
|||
|
||||
int ide_cd_get_xferlen(struct request *rq)
|
||||
{
|
||||
if (blk_fs_request(rq))
|
||||
switch (rq->cmd_type)
|
||||
case REQ_TYPE_FS:
|
||||
return 32768;
|
||||
else if (blk_sense_request(rq) || blk_pc_request(rq) ||
|
||||
rq->cmd_type == REQ_TYPE_ATA_PC)
|
||||
case REQ_TYPE_SENSE:
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
case REQ_TYPE_ATA_PC:
|
||||
return blk_rq_bytes(rq);
|
||||
else
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
|
||||
|
||||
|
@ -474,12 +477,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
|||
if (uptodate == 0)
|
||||
drive->failed_pc = NULL;
|
||||
|
||||
if (blk_special_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_SPECIAL)
|
||||
rq->errors = 0;
|
||||
error = 0;
|
||||
} else {
|
||||
|
||||
if (blk_fs_request(rq) == 0 && uptodate <= 0) {
|
||||
if (req->cmd_type != REQ_TYPE_FS && uptodate <= 0) {
|
||||
if (rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
|
|||
if (!sense->valid)
|
||||
break;
|
||||
if (failed_command == NULL ||
|
||||
!blk_fs_request(failed_command))
|
||||
failed_command->cmd_type != REQ_TYPE_FS)
|
||||
break;
|
||||
sector = (sense->information[0] << 24) |
|
||||
(sense->information[1] << 16) |
|
||||
|
@ -292,7 +292,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
"stat 0x%x",
|
||||
rq->cmd[0], rq->cmd_type, err, stat);
|
||||
|
||||
if (blk_sense_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_SENSE) {
|
||||
/*
|
||||
* We got an error trying to get sense info from the drive
|
||||
* (probably while trying to recover from a former error).
|
||||
|
@ -303,7 +303,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
}
|
||||
|
||||
/* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
|
||||
if (blk_pc_request(rq) && !rq->errors)
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors)
|
||||
rq->errors = SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
if (blk_noretry_request(rq))
|
||||
|
@ -311,13 +311,14 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
|
||||
switch (sense_key) {
|
||||
case NOT_READY:
|
||||
if (blk_fs_request(rq) && rq_data_dir(rq) == WRITE) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) {
|
||||
if (ide_cd_breathe(drive, rq))
|
||||
return 1;
|
||||
} else {
|
||||
cdrom_saw_media_change(drive);
|
||||
|
||||
if (blk_fs_request(rq) && !blk_rq_quiet(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_FS &&
|
||||
!(rq->cmd_flags & REQ_QUIET)) {
|
||||
printk(KERN_ERR PFX "%s: tray open\n",
|
||||
drive->name);
|
||||
}
|
||||
|
@ -326,7 +327,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
case UNIT_ATTENTION:
|
||||
cdrom_saw_media_change(drive);
|
||||
|
||||
if (blk_fs_request(rq) == 0)
|
||||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -352,7 +353,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
* No point in retrying after an illegal request or data
|
||||
* protect error.
|
||||
*/
|
||||
if (!blk_rq_quiet(rq))
|
||||
if (!(rq->cmd_flags & REQ_QUIET))
|
||||
ide_dump_status(drive, "command error", stat);
|
||||
do_end_request = 1;
|
||||
break;
|
||||
|
@ -361,20 +362,20 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
* No point in re-trying a zillion times on a bad sector.
|
||||
* If we got here the error is not correctable.
|
||||
*/
|
||||
if (!blk_rq_quiet(rq))
|
||||
if (!(rq->cmd_flags & REQ_QUIET))
|
||||
ide_dump_status(drive, "media error "
|
||||
"(bad sector)", stat);
|
||||
do_end_request = 1;
|
||||
break;
|
||||
case BLANK_CHECK:
|
||||
/* disk appears blank? */
|
||||
if (!blk_rq_quiet(rq))
|
||||
if (!(rq->cmd_flags & REQ_QUIET))
|
||||
ide_dump_status(drive, "media error (blank)",
|
||||
stat);
|
||||
do_end_request = 1;
|
||||
break;
|
||||
default:
|
||||
if (blk_fs_request(rq) == 0)
|
||||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
break;
|
||||
if (err & ~ATA_ABORTED) {
|
||||
/* go to the default handler for other errors */
|
||||
|
@ -385,7 +386,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
do_end_request = 1;
|
||||
}
|
||||
|
||||
if (blk_fs_request(rq) == 0) {
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
rq->cmd_flags |= REQ_FAILED;
|
||||
do_end_request = 1;
|
||||
}
|
||||
|
@ -525,7 +526,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
ide_expiry_t *expiry = NULL;
|
||||
int dma_error = 0, dma, thislen, uptodate = 0;
|
||||
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
|
||||
int sense = blk_sense_request(rq);
|
||||
int sense = (rq->cmd_type == REQ_TYPE_SENSE);
|
||||
unsigned int timeout;
|
||||
u16 len;
|
||||
u8 ireason, stat;
|
||||
|
@ -568,7 +569,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
|
||||
ide_read_bcount_and_ireason(drive, &len, &ireason);
|
||||
|
||||
thislen = blk_fs_request(rq) ? len : cmd->nleft;
|
||||
thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft;
|
||||
if (thislen > len)
|
||||
thislen = len;
|
||||
|
||||
|
@ -577,7 +578,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
|
||||
/* If DRQ is clear, the command has completed. */
|
||||
if ((stat & ATA_DRQ) == 0) {
|
||||
if (blk_fs_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS) {
|
||||
/*
|
||||
* If we're not done reading/writing, complain.
|
||||
* Otherwise, complete the command normally.
|
||||
|
@ -591,7 +592,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
rq->cmd_flags |= REQ_FAILED;
|
||||
uptodate = 0;
|
||||
}
|
||||
} else if (!blk_pc_request(rq)) {
|
||||
} else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
|
||||
ide_cd_request_sense_fixup(drive, cmd);
|
||||
|
||||
uptodate = cmd->nleft ? 0 : 1;
|
||||
|
@ -640,7 +641,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
|
||||
/* pad, if necessary */
|
||||
if (len > 0) {
|
||||
if (blk_fs_request(rq) == 0 || write == 0)
|
||||
if (rq->cmd_type != REQ_TYPE_FS || write == 0)
|
||||
ide_pad_transfer(drive, write, len);
|
||||
else {
|
||||
printk(KERN_ERR PFX "%s: confused, missing data\n",
|
||||
|
@ -649,11 +650,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
}
|
||||
}
|
||||
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
timeout = rq->timeout;
|
||||
} else {
|
||||
timeout = ATAPI_WAIT_PC;
|
||||
if (!blk_fs_request(rq))
|
||||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
expiry = ide_cd_expiry;
|
||||
}
|
||||
|
||||
|
@ -662,7 +663,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
return ide_started;
|
||||
|
||||
out_end:
|
||||
if (blk_pc_request(rq) && rc == 0) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) {
|
||||
rq->resid_len = 0;
|
||||
blk_end_request_all(rq, 0);
|
||||
hwif->rq = NULL;
|
||||
|
@ -670,7 +671,7 @@ out_end:
|
|||
if (sense && uptodate)
|
||||
ide_cd_complete_failed_rq(drive, rq);
|
||||
|
||||
if (blk_fs_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS) {
|
||||
if (cmd->nleft == 0)
|
||||
uptodate = 1;
|
||||
} else {
|
||||
|
@ -682,7 +683,7 @@ out_end:
|
|||
ide_cd_error_cmd(drive, cmd);
|
||||
|
||||
/* make sure it's fully ended */
|
||||
if (blk_fs_request(rq) == 0) {
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
rq->resid_len -= cmd->nbytes - cmd->nleft;
|
||||
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
|
||||
rq->resid_len += cmd->last_xfer_len;
|
||||
|
@ -742,7 +743,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
|
|||
ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
|
||||
rq->cmd[0], rq->cmd_type);
|
||||
|
||||
if (blk_pc_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
else
|
||||
rq->cmd_flags &= ~REQ_FAILED;
|
||||
|
@ -783,21 +784,26 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
|
|||
if (drive->debug_mask & IDE_DBG_RQ)
|
||||
blk_dump_rq_flags(rq, "ide_cd_do_request");
|
||||
|
||||
if (blk_fs_request(rq)) {
|
||||
switch (rq->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
if (cdrom_start_rw(drive, rq) == ide_stopped)
|
||||
goto out_end;
|
||||
} else if (blk_sense_request(rq) || blk_pc_request(rq) ||
|
||||
rq->cmd_type == REQ_TYPE_ATA_PC) {
|
||||
break;
|
||||
case REQ_TYPE_SENSE:
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
case REQ_TYPE_ATA_PC:
|
||||
if (!rq->timeout)
|
||||
rq->timeout = ATAPI_WAIT_PC;
|
||||
|
||||
cdrom_do_block_pc(drive, rq);
|
||||
} else if (blk_special_request(rq)) {
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
/* right now this can only be a reset... */
|
||||
uptodate = 1;
|
||||
goto out_end;
|
||||
} else
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* prepare sense request for this command */
|
||||
ide_prep_sense(drive, rq);
|
||||
|
@ -809,7 +815,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
|
|||
|
||||
cmd.rq = rq;
|
||||
|
||||
if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
|
||||
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
|
||||
ide_map_sg(drive, &cmd);
|
||||
}
|
||||
|
@ -1365,9 +1371,9 @@ static int ide_cdrom_prep_pc(struct request *rq)
|
|||
|
||||
static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
if (blk_fs_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_FS)
|
||||
return ide_cdrom_prep_fs(q, rq);
|
||||
else if (blk_pc_request(rq))
|
||||
else if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
return ide_cdrom_prep_pc(rq);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
|
|||
ide_hwif_t *hwif = drive->hwif;
|
||||
|
||||
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
|
||||
BUG_ON(!blk_fs_request(rq));
|
||||
BUG_ON(rq->cmd_type != REQ_TYPE_FS);
|
||||
|
||||
ledtrig_ide_activity();
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
|
|||
return ide_stopped;
|
||||
|
||||
/* retry only "normal" I/O: */
|
||||
if (!blk_fs_request(rq)) {
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
|
||||
struct ide_cmd *cmd = rq->special;
|
||||
|
||||
|
@ -146,7 +146,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
|
|||
{
|
||||
struct request *rq = drive->hwif->rq;
|
||||
|
||||
if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) {
|
||||
if (rq && rq->cmd_type == REQ_TYPE_SPECIAL &&
|
||||
rq->cmd[0] == REQ_DRIVE_RESET) {
|
||||
if (err <= 0 && rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
|
||||
|
|
|
@ -73,7 +73,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
|
|||
drive->failed_pc = NULL;
|
||||
|
||||
if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
|
||||
(rq && blk_pc_request(rq)))
|
||||
(rq && rq->cmd_type == REQ_TYPE_BLOCK_PC))
|
||||
uptodate = 1; /* FIXME */
|
||||
else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
|
||||
|
||||
|
@ -98,7 +98,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
|
|||
"Aborting request!\n");
|
||||
}
|
||||
|
||||
if (blk_special_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_SPECIAL)
|
||||
rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
|
||||
|
||||
return uptodate;
|
||||
|
@ -247,14 +247,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
|||
} else
|
||||
printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
|
||||
|
||||
if (blk_special_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_SPECIAL) {
|
||||
rq->errors = 0;
|
||||
ide_complete_rq(drive, 0, blk_rq_bytes(rq));
|
||||
return ide_stopped;
|
||||
} else
|
||||
goto out_end;
|
||||
}
|
||||
if (blk_fs_request(rq)) {
|
||||
|
||||
switch (rq->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
|
||||
(blk_rq_sectors(rq) % floppy->bs_factor)) {
|
||||
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
|
||||
|
@ -263,13 +265,18 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
|||
}
|
||||
pc = &floppy->queued_pc;
|
||||
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
|
||||
} else if (blk_special_request(rq) || blk_sense_request(rq)) {
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
case REQ_TYPE_SENSE:
|
||||
pc = (struct ide_atapi_pc *)rq->special;
|
||||
} else if (blk_pc_request(rq)) {
|
||||
break;
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
pc = &floppy->queued_pc;
|
||||
idefloppy_blockpc_cmd(floppy, pc, rq);
|
||||
} else
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
ide_prep_sense(drive, rq);
|
||||
|
||||
|
@ -280,7 +287,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
|||
|
||||
cmd.rq = rq;
|
||||
|
||||
if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
|
||||
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
|
||||
ide_map_sg(drive, &cmd);
|
||||
}
|
||||
|
@ -290,7 +297,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
|||
return ide_floppy_issue_pc(drive, &cmd, pc);
|
||||
out_end:
|
||||
drive->failed_pc = NULL;
|
||||
if (blk_fs_request(rq) == 0 && rq->errors == 0)
|
||||
if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
|
||||
return ide_stopped;
|
||||
|
|
|
@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq);
|
|||
|
||||
void ide_kill_rq(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
u8 drv_req = blk_special_request(rq) && rq->rq_disk;
|
||||
u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk;
|
||||
u8 media = drive->media;
|
||||
|
||||
drive->failed_pc = NULL;
|
||||
|
@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
|
|||
} else {
|
||||
if (media == ide_tape)
|
||||
rq->errors = IDE_DRV_ERROR_GENERAL;
|
||||
else if (blk_fs_request(rq) == 0 && rq->errors == 0)
|
||||
else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
}
|
||||
|
||||
|
@ -307,7 +307,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
|||
{
|
||||
ide_startstop_t startstop;
|
||||
|
||||
BUG_ON(!blk_rq_started(rq));
|
||||
BUG_ON(!(rq->cmd_flags & REQ_STARTED));
|
||||
|
||||
#ifdef DEBUG
|
||||
printk("%s: start_request: current=0x%08lx\n",
|
||||
|
@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
|||
pm->pm_step == IDE_PM_COMPLETED)
|
||||
ide_complete_pm_rq(drive, rq);
|
||||
return startstop;
|
||||
} else if (!rq->rq_disk && blk_special_request(rq))
|
||||
} else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL) {
|
||||
/*
|
||||
* TODO: Once all ULDs have been modified to
|
||||
* check for specific op codes rather than
|
||||
|
|
|
@ -191,10 +191,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
|
|||
|
||||
#ifdef DEBUG_PM
|
||||
printk("%s: completing PM request, %s\n", drive->name,
|
||||
blk_pm_suspend_request(rq) ? "suspend" : "resume");
|
||||
(rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume");
|
||||
#endif
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (blk_pm_suspend_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_PM_SUSPEND)
|
||||
blk_stop_queue(q);
|
||||
else
|
||||
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
|
||||
|
@ -210,11 +210,11 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
|
|||
{
|
||||
struct request_pm_state *pm = rq->special;
|
||||
|
||||
if (blk_pm_suspend_request(rq) &&
|
||||
if (rq->cmd_type == REQ_TYPE_PM_SUSPEND &&
|
||||
pm->pm_step == IDE_PM_START_SUSPEND)
|
||||
/* Mark drive blocked when starting the suspend sequence. */
|
||||
drive->dev_flags |= IDE_DFLAG_BLOCKED;
|
||||
else if (blk_pm_resume_request(rq) &&
|
||||
else if (rq->cmd_type == REQ_TYPE_PM_RESUME &&
|
||||
pm->pm_step == IDE_PM_START_RESUME) {
|
||||
/*
|
||||
* The first thing we do on wakeup is to wait for BSY bit to
|
||||
|
|
|
@ -577,7 +577,8 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
|
|||
rq->cmd[0], (unsigned long long)blk_rq_pos(rq),
|
||||
blk_rq_sectors(rq));
|
||||
|
||||
BUG_ON(!(blk_special_request(rq) || blk_sense_request(rq)));
|
||||
BUG_ON(!(rq->cmd_type == REQ_TYPE_SPECIAL ||
|
||||
rq->cmd_type == REQ_TYPE_SENSE));
|
||||
|
||||
/* Retry a failed packet command */
|
||||
if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
|
||||
|
|
|
@ -792,12 +792,12 @@ static void dm_end_request(struct request *clone, int error)
|
|||
{
|
||||
int rw = rq_data_dir(clone);
|
||||
int run_queue = 1;
|
||||
bool is_barrier = blk_barrier_rq(clone);
|
||||
bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
|
||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
struct mapped_device *md = tio->md;
|
||||
struct request *rq = tio->orig;
|
||||
|
||||
if (blk_pc_request(rq) && !is_barrier) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
|
||||
rq->errors = clone->errors;
|
||||
rq->resid_len = clone->resid_len;
|
||||
|
||||
|
@ -844,7 +844,7 @@ void dm_requeue_unmapped_request(struct request *clone)
|
|||
struct request_queue *q = rq->q;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(blk_barrier_rq(clone))) {
|
||||
if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
|
||||
/*
|
||||
* Barrier clones share an original request.
|
||||
* Leave it to dm_end_request(), which handles this special
|
||||
|
@ -943,7 +943,7 @@ static void dm_complete_request(struct request *clone, int error)
|
|||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
struct request *rq = tio->orig;
|
||||
|
||||
if (unlikely(blk_barrier_rq(clone))) {
|
||||
if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
|
||||
/*
|
||||
* Barrier clones share an original request. So can't use
|
||||
* softirq_done with the original.
|
||||
|
@ -972,7 +972,7 @@ void dm_kill_unmapped_request(struct request *clone, int error)
|
|||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
struct request *rq = tio->orig;
|
||||
|
||||
if (unlikely(blk_barrier_rq(clone))) {
|
||||
if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
|
||||
/*
|
||||
* Barrier clones share an original request.
|
||||
* Leave it to dm_end_request(), which handles this special
|
||||
|
|
|
@ -805,7 +805,8 @@ static void mspro_block_start(struct memstick_dev *card)
|
|||
|
||||
static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
|
||||
{
|
||||
if (!blk_fs_request(req) && !blk_pc_request(req)) {
|
||||
if (req->cmd_type != REQ_TYPE_FS &&
|
||||
req->cmd_type != REQ_TYPE_BLOCK_PC) {
|
||||
blk_dump_rq_flags(req, "MSPro unsupported request");
|
||||
return BLKPREP_KILL;
|
||||
}
|
||||
|
|
|
@ -883,7 +883,7 @@ static void i2o_block_request_fn(struct request_queue *q)
|
|||
if (!req)
|
||||
break;
|
||||
|
||||
if (blk_fs_request(req)) {
|
||||
if (req->cmd_type == REQ_TYPE_FS) {
|
||||
struct i2o_block_delayed_request *dreq;
|
||||
struct i2o_block_request *ireq = req->special;
|
||||
unsigned int queue_depth;
|
||||
|
|
|
@ -32,7 +32,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
|
|||
/*
|
||||
* We only like normal block requests.
|
||||
*/
|
||||
if (!blk_fs_request(req)) {
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
blk_dump_rq_flags(req, "MMC bad request");
|
||||
return BLKPREP_KILL;
|
||||
}
|
||||
|
|
|
@ -73,14 +73,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||
|
||||
buf = req->buffer;
|
||||
|
||||
if (!blk_fs_request(req))
|
||||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
return -EIO;
|
||||
|
||||
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
|
||||
get_capacity(req->rq_disk))
|
||||
return -EIO;
|
||||
|
||||
if (blk_discard_rq(req))
|
||||
if (req->cmd_flags & REQ_DISCARD)
|
||||
return tr->discard(dev, block, nsect);
|
||||
|
||||
switch(rq_data_dir(req)) {
|
||||
|
|
|
@ -307,7 +307,7 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
|
|||
(sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
|
||||
return FAILED;
|
||||
|
||||
if (blk_barrier_rq(scmd->request))
|
||||
if (scmd->request->cmd_flags & REQ_HARDBARRIER)
|
||||
/*
|
||||
* barrier requests should always retry on UA
|
||||
* otherwise block will get a spurious error
|
||||
|
@ -1318,16 +1318,16 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
|
|||
case DID_OK:
|
||||
break;
|
||||
case DID_BUS_BUSY:
|
||||
return blk_failfast_transport(scmd->request);
|
||||
return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);
|
||||
case DID_PARITY:
|
||||
return blk_failfast_dev(scmd->request);
|
||||
return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
|
||||
case DID_ERROR:
|
||||
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
|
||||
status_byte(scmd->result) == RESERVATION_CONFLICT)
|
||||
return 0;
|
||||
/* fall through */
|
||||
case DID_SOFT_ERROR:
|
||||
return blk_failfast_driver(scmd->request);
|
||||
return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);
|
||||
}
|
||||
|
||||
switch (status_byte(scmd->result)) {
|
||||
|
@ -1336,7 +1336,7 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)
|
|||
* assume caller has checked sense and determinted
|
||||
* the check condition was retryable.
|
||||
*/
|
||||
return blk_failfast_dev(scmd->request);
|
||||
return (scmd->request->cmd_flags & REQ_FAILFAST_DEV);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -722,7 +722,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|||
sense_deferred = scsi_sense_is_deferred(&sshdr);
|
||||
}
|
||||
|
||||
if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
|
||||
if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
|
||||
req->errors = result;
|
||||
if (result) {
|
||||
if (sense_valid && req->sense) {
|
||||
|
@ -757,7 +757,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|||
}
|
||||
}
|
||||
|
||||
BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
|
||||
/* no bidi support for !REQ_TYPE_BLOCK_PC yet */
|
||||
BUG_ON(blk_bidi_rq(req));
|
||||
|
||||
/*
|
||||
* Next deal with any sectors which we were able to correctly
|
||||
|
|
|
@ -485,7 +485,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
|
|||
* Discard request come in as REQ_TYPE_FS but we turn them into
|
||||
* block PC requests to make life easier.
|
||||
*/
|
||||
if (blk_discard_rq(rq))
|
||||
if (rq->cmd_flags & REQ_DISCARD)
|
||||
ret = sd_prepare_discard(rq);
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
|
@ -636,7 +636,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
|
|||
SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD;
|
||||
SCpnt->cmnd[7] = 0x18;
|
||||
SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32;
|
||||
SCpnt->cmnd[10] = protect | (blk_fua_rq(rq) ? 0x8 : 0);
|
||||
SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
|
||||
|
||||
/* LBA */
|
||||
SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
|
||||
|
@ -661,7 +661,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
|
|||
SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
|
||||
} else if (block > 0xffffffff) {
|
||||
SCpnt->cmnd[0] += READ_16 - READ_6;
|
||||
SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0);
|
||||
SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
|
||||
SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
|
||||
SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0;
|
||||
SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0;
|
||||
|
@ -682,7 +682,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
|
|||
this_count = 0xffff;
|
||||
|
||||
SCpnt->cmnd[0] += READ_10 - READ_6;
|
||||
SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0);
|
||||
SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
|
||||
SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
|
||||
SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
|
||||
SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
|
||||
|
@ -691,7 +691,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
|
|||
SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
|
||||
SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
|
||||
} else {
|
||||
if (unlikely(blk_fua_rq(rq))) {
|
||||
if (unlikely(rq->cmd_flags & REQ_FUA)) {
|
||||
/*
|
||||
* This happens only if this drive failed
|
||||
* 10byte rw command with ILLEGAL_REQUEST
|
||||
|
@ -1112,7 +1112,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
|
|||
u64 bad_lba;
|
||||
int info_valid;
|
||||
|
||||
if (!blk_fs_request(scmd->request))
|
||||
if (scmd->request->cmd_type != REQ_TYPE_FS)
|
||||
return 0;
|
||||
|
||||
info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
|
||||
|
|
|
@ -2022,7 +2022,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
|
|||
if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done
|
||||
!= cmd))
|
||||
{
|
||||
if(blk_fs_request(cmd->request)) {
|
||||
if (cmd->request->cmd_type == REQ_TYPE_FS) {
|
||||
sun3scsi_dma_setup(d, count,
|
||||
rq_data_dir(cmd->request));
|
||||
sun3_dma_setup_done = cmd;
|
||||
|
|
|
@ -524,7 +524,7 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
|
|||
struct scsi_cmnd *cmd,
|
||||
int write_flag)
|
||||
{
|
||||
if(blk_fs_request(cmd->request))
|
||||
if (cmd->request->cmd_type == REQ_TYPE_FS)
|
||||
return wanted;
|
||||
else
|
||||
return 0;
|
||||
|
|
|
@ -458,7 +458,7 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
|
|||
struct scsi_cmnd *cmd,
|
||||
int write_flag)
|
||||
{
|
||||
if(blk_fs_request(cmd->request))
|
||||
if (cmd->request->cmd_type == REQ_TYPE_FS)
|
||||
return wanted;
|
||||
else
|
||||
return 0;
|
||||
|
|
|
@ -823,7 +823,8 @@ static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
|
|||
blkvsc_req->cmnd[0] = READ_16;
|
||||
}
|
||||
|
||||
blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
|
||||
blkvsc_req->cmnd[1] |=
|
||||
(blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
|
||||
|
||||
*(unsigned long long *)&blkvsc_req->cmnd[2] =
|
||||
cpu_to_be64(blkvsc_req->sector_start);
|
||||
|
@ -839,7 +840,8 @@ static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
|
|||
blkvsc_req->cmnd[0] = READ_10;
|
||||
}
|
||||
|
||||
blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
|
||||
blkvsc_req->cmnd[1] |=
|
||||
(blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
|
||||
|
||||
*(unsigned int *)&blkvsc_req->cmnd[2] =
|
||||
cpu_to_be32(blkvsc_req->sector_start);
|
||||
|
@ -1286,7 +1288,7 @@ static void blkvsc_request(struct request_queue *queue)
|
|||
DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
|
||||
|
||||
blkdev = req->rq_disk->private_data;
|
||||
if (blkdev->shutting_down || !blk_fs_request(req) ||
|
||||
if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS ||
|
||||
blkdev->media_not_present) {
|
||||
__blk_end_request_cur(req, 0);
|
||||
continue;
|
||||
|
|
|
@ -604,33 +604,20 @@ enum {
|
|||
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
|
||||
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
|
||||
|
||||
#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
|
||||
#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
|
||||
#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
|
||||
#define blk_noretry_request(rq) \
|
||||
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
|
||||
REQ_FAILFAST_DRIVER))
|
||||
|
||||
#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV)
|
||||
#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
|
||||
#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
|
||||
#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \
|
||||
blk_failfast_transport(rq) || \
|
||||
blk_failfast_driver(rq))
|
||||
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
|
||||
#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT)
|
||||
#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET)
|
||||
#define blk_account_rq(rq) \
|
||||
(((rq)->cmd_flags & REQ_STARTED) && \
|
||||
((rq)->cmd_type == REQ_TYPE_FS || \
|
||||
((rq)->cmd_flags & REQ_DISCARD)))
|
||||
|
||||
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
|
||||
|
||||
#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
|
||||
#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
|
||||
#define blk_pm_request(rq) \
|
||||
(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
|
||||
((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
|
||||
(rq)->cmd_type == REQ_TYPE_PM_RESUME)
|
||||
|
||||
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
|
||||
#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
|
||||
#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
|
||||
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
|
||||
#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
|
||||
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
|
||||
/* rq->queuelist of dequeued request must be list_empty() */
|
||||
#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
|
||||
|
@ -652,9 +639,6 @@ static inline bool rq_is_sync(struct request *rq)
|
|||
return rw_is_sync(rq->cmd_flags);
|
||||
}
|
||||
|
||||
#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
|
||||
#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
|
||||
|
||||
static inline int blk_queue_full(struct request_queue *q, int sync)
|
||||
{
|
||||
if (sync)
|
||||
|
@ -687,7 +671,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
|
|||
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
|
||||
#define rq_mergeable(rq) \
|
||||
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
|
||||
(blk_discard_rq(rq) || blk_fs_request((rq))))
|
||||
(((rq)->cmd_flags & REQ_DISCARD) || \
|
||||
(rq)->cmd_type == REQ_TYPE_FS))
|
||||
|
||||
/*
|
||||
* q->prep_rq_fn return values
|
||||
|
|
|
@ -224,7 +224,7 @@ static inline int blk_trace_init_sysfs(struct device *dev)
|
|||
|
||||
static inline int blk_cmd_buf_len(struct request *rq)
|
||||
{
|
||||
return blk_pc_request(rq) ? rq->cmd_len * 3 : 1;
|
||||
return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1;
|
||||
}
|
||||
|
||||
extern void blk_dump_cmd(char *buf, struct request *rq);
|
||||
|
|
|
@ -25,8 +25,10 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
|
||||
__entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
|
||||
__entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
|
||||
__entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
0 : blk_rq_pos(rq);
|
||||
__entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
0 : blk_rq_sectors(rq);
|
||||
__entry->errors = rq->errors;
|
||||
|
||||
blk_fill_rwbs_rq(__entry->rwbs, rq);
|
||||
|
@ -109,9 +111,12 @@ DECLARE_EVENT_CLASS(block_rq,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
|
||||
__entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
|
||||
__entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
|
||||
__entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
|
||||
__entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
0 : blk_rq_pos(rq);
|
||||
__entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
0 : blk_rq_sectors(rq);
|
||||
__entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
blk_rq_bytes(rq) : 0;
|
||||
|
||||
blk_fill_rwbs_rq(__entry->rwbs, rq);
|
||||
blk_dump_cmd(__get_str(cmd), rq);
|
||||
|
|
|
@ -661,10 +661,10 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
|||
if (likely(!bt))
|
||||
return;
|
||||
|
||||
if (blk_discard_rq(rq))
|
||||
if (rq->cmd_flags & REQ_DISCARD)
|
||||
rw |= (1 << BIO_RW_DISCARD);
|
||||
|
||||
if (blk_pc_request(rq)) {
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
what |= BLK_TC_ACT(BLK_TC_PC);
|
||||
__blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
|
||||
what, rq->errors, rq->cmd_len, rq->cmd);
|
||||
|
@ -925,7 +925,7 @@ void blk_add_driver_data(struct request_queue *q,
|
|||
if (likely(!bt))
|
||||
return;
|
||||
|
||||
if (blk_pc_request(rq))
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
__blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
|
||||
BLK_TA_DRV_DATA, rq->errors, len, data);
|
||||
else
|
||||
|
@ -1730,7 +1730,7 @@ void blk_dump_cmd(char *buf, struct request *rq)
|
|||
int len = rq->cmd_len;
|
||||
unsigned char *cmd = rq->cmd;
|
||||
|
||||
if (!blk_pc_request(rq)) {
|
||||
if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
|
@ -1779,7 +1779,7 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
|
|||
int rw = rq->cmd_flags & 0x03;
|
||||
int bytes;
|
||||
|
||||
if (blk_discard_rq(rq))
|
||||
if (rq->cmd_flags & REQ_DISCARD)
|
||||
rw |= (1 << BIO_RW_DISCARD);
|
||||
|
||||
bytes = blk_rq_bytes(rq);
|
||||
|
|
Загрузка…
Ссылка в новой задаче