for-5.13/block-2021-04-27
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmCIJW0QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpr8sD/4qP+MsFTB1IFUu8fW7BjBPdduoK8Vq9o3S HB8iF/yhJZ73nLecMMdn/jTO8SCW0Iw+okywW3BugGnNPbwXo0UQ4jLhzbTts76P JvZaguZFhBsF3ceFOt3CRCQDOeoDfMp3sitLUVivkN+2vwMs9vJpVNaEeUjcCC1Z 8QjlpqYSMuakTwEn7QhlnKxVWn1V2B6PDjZMcf48ONRZGsCkoOXH1SE4Ge8nxjqa KHKO5bvwgRzGhKpvdHEIl8dmFL9WEWElBVoY3vE2EHL0SPE32zHlxtYLS0NAhY2M aprkJ0QP0Rgl8HpYiCstwAnJGKDg4a0ArWhf/CJTuLAWmTNFR7v5n7vw2SilJHTG 0FtiFiOnpvvBmUC0B1PUEQX8AiFcdXueLb6xboExcp2WtxIAe8wPoGFl6T1tobBY qsfWggGs/vD1RVrJISPC+20cJemcRyeakMV48w+n3Lt/ES3IEv/LXx6PO/PbXvOo B7HJXTofkoaX52A/1+NxraGapwzhYouhi6Sb6Fc++X59/a/oBuOUGuur0eZ+/oWA 9787mUUDmW/sahfZUgZh5AxqKo2jJULjeggANCICW9/RN6duV8TBQVOLW1/0Wddp 9lndiA9ZMveWF+J19+sjBoiYMYawLmURaOlDK77ctTCcR/ji3l4GZ+2KvBEMeIT8 O1OYEnwaIQ== =oza6 -----END PGP SIGNATURE----- Merge tag 'for-5.13/block-2021-04-27' of git://git.kernel.dk/linux-block Pull block updates from Jens Axboe: "Pretty quiet round this time, which is nice. In detail: - Series revamping bounce buffer support (Christoph) - Dead code removal (Christoph, Bart) - Partition iteration revamp, now using xarray (Christoph) - Passthrough request scheduler improvements (Lin) - Series of BFQ improvements (Paolo) - Fix ioprio task iteration (Peter) - Various little tweaks and fixes (Tejun, Saravanan, Bhaskar, Max, Nikolay)" * tag 'for-5.13/block-2021-04-27' of git://git.kernel.dk/linux-block: (41 commits) blk-iocost: don't ignore vrate_min on QD contention blk-mq: Fix spurious debugfs directory creation during initialization bfq/mq-deadline: remove redundant check for passthrough request blk-mq: bypass IO scheduler's limit_depth for passthrough request block: Remove an obsolete comment from sg_io() block: move bio_list_copy_data to pktcdvd block: remove zero_fill_bio_iter block: add queue_to_disk() to get gendisk from request_queue block: remove an incorrect check from blk_rq_append_bio block: initialize ret in bdev_disk_changed block: Fix sys_ioprio_set(.which=IOPRIO_WHO_PGRP) task iteration block: remove disk_part_iter block: simplify diskstats_show block: simplify show_partition block: simplify printk_all_partitions block: simplify partition_overlaps block: simplify partition removal block: take bd_mutex around delete_partitions in del_gendisk block: refactor blk_drop_partitions block: move more syncing and invalidation to delete_partition ...
This commit is contained in:
Коммит
6c00292113
|
@ -251,8 +251,6 @@ BT-445C VLB Fast SCSI-2
|
|||
BT-747C EISA Fast SCSI-2
|
||||
BT-757C EISA Wide Fast SCSI-2
|
||||
BT-757CD EISA Wide Differential Fast SCSI-2
|
||||
BT-545C ISA Fast SCSI-2
|
||||
BT-540CF ISA Fast SCSI-2
|
||||
======== ==== ==============================
|
||||
|
||||
MultiMaster "S" Series Host Adapters:
|
||||
|
@ -263,17 +261,13 @@ BT-747S EISA Fast SCSI-2
|
|||
BT-747D EISA Differential Fast SCSI-2
|
||||
BT-757S EISA Wide Fast SCSI-2
|
||||
BT-757D EISA Wide Differential Fast SCSI-2
|
||||
BT-545S ISA Fast SCSI-2
|
||||
BT-542D ISA Differential Fast SCSI-2
|
||||
BT-742A EISA SCSI-2 (742A revision H)
|
||||
BT-542B ISA SCSI-2 (542B revision H)
|
||||
======= ==== ==============================
|
||||
|
||||
MultiMaster "A" Series Host Adapters:
|
||||
|
||||
======= ==== ==============================
|
||||
BT-742A EISA SCSI-2 (742A revisions A - G)
|
||||
BT-542B ISA SCSI-2 (542B revisions A - G)
|
||||
======= ==== ==============================
|
||||
|
||||
AMI FastDisk Host Adapters that are true BusLogic MultiMaster clones are also
|
||||
|
@ -400,26 +394,11 @@ selected host adapter.
|
|||
|
||||
The BusLogic Driver Probing Options comprise the following:
|
||||
|
||||
IO:<integer>
|
||||
|
||||
The "IO:" option specifies an ISA I/O Address to be probed for a non-PCI
|
||||
MultiMaster Host Adapter. If neither "IO:" nor "NoProbeISA" options are
|
||||
specified, then the standard list of BusLogic MultiMaster ISA I/O Addresses
|
||||
will be probed (0x330, 0x334, 0x230, 0x234, 0x130, and 0x134). Multiple
|
||||
"IO:" options may be specified to precisely determine the I/O Addresses to
|
||||
be probed, but the probe order will always follow the standard list.
|
||||
|
||||
NoProbe
|
||||
|
||||
The "NoProbe" option disables all probing and therefore no BusLogic Host
|
||||
Adapters will be detected.
|
||||
|
||||
NoProbeISA
|
||||
|
||||
The "NoProbeISA" option disables probing of the standard BusLogic ISA I/O
|
||||
Addresses and therefore only PCI MultiMaster and FlashPoint Host Adapters
|
||||
will be detected.
|
||||
|
||||
NoProbePCI
|
||||
|
||||
The "NoProbePCI" options disables the interrogation of PCI Configuration
|
||||
|
@ -464,10 +443,7 @@ QueueDepth:<integer>
|
|||
Depth for devices that do not support Tagged Queuing. If no Queue Depth
|
||||
option is provided, the Queue Depth will be determined automatically based
|
||||
on the Host Adapter's Total Queue Depth and the number, type, speed, and
|
||||
capabilities of the detected Target Devices. For Host Adapters that
|
||||
require ISA Bounce Buffers, the Queue Depth is automatically set by default
|
||||
to BusLogic_TaggedQueueDepthBB or BusLogic_UntaggedQueueDepthBB to avoid
|
||||
excessive preallocation of DMA Bounce Buffer memory. Target Devices that
|
||||
capabilities of the detected Target Devices. Target Devices that
|
||||
do not support Tagged Queuing always have their Queue Depth set to
|
||||
BusLogic_UntaggedQueueDepth or BusLogic_UntaggedQueueDepthBB, unless a
|
||||
lower Queue Depth option is provided. A Queue Depth of 1 automatically
|
||||
|
|
|
@ -1095,10 +1095,6 @@ of interest:
|
|||
- maximum number of commands that can be queued on devices
|
||||
controlled by the host. Overridden by LLD calls to
|
||||
scsi_change_queue_depth().
|
||||
unchecked_isa_dma
|
||||
- 1=>only use bottom 16 MB of ram (ISA DMA addressing
|
||||
restriction), 0=>can use full 32 bit (or better) DMA
|
||||
address space
|
||||
no_async_abort
|
||||
- 1=>Asynchronous aborts are not supported
|
||||
- 0=>Timed-out commands will be aborted asynchronously
|
||||
|
|
|
@ -547,6 +547,8 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
|
|||
|
||||
entity->orig_weight = entity->weight = entity->new_weight = d->weight;
|
||||
entity->my_sched_data = &bfqg->sched_data;
|
||||
entity->last_bfqq_created = NULL;
|
||||
|
||||
bfqg->my_entity = entity; /*
|
||||
* the root_group's will be set to NULL
|
||||
* in bfq_init_queue()
|
||||
|
|
|
@ -1012,7 +1012,7 @@ static void
|
|||
bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
|
||||
struct bfq_io_cq *bic, bool bfq_already_existing)
|
||||
{
|
||||
unsigned int old_wr_coeff = bfqq->wr_coeff;
|
||||
unsigned int old_wr_coeff = 1;
|
||||
bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
|
||||
|
||||
if (bic->saved_has_short_ttime)
|
||||
|
@ -1033,7 +1033,13 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
|
|||
bfqq->ttime = bic->saved_ttime;
|
||||
bfqq->io_start_time = bic->saved_io_start_time;
|
||||
bfqq->tot_idle_time = bic->saved_tot_idle_time;
|
||||
bfqq->wr_coeff = bic->saved_wr_coeff;
|
||||
/*
|
||||
* Restore weight coefficient only if low_latency is on
|
||||
*/
|
||||
if (bfqd->low_latency) {
|
||||
old_wr_coeff = bfqq->wr_coeff;
|
||||
bfqq->wr_coeff = bic->saved_wr_coeff;
|
||||
}
|
||||
bfqq->service_from_wr = bic->saved_service_from_wr;
|
||||
bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
|
||||
bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
|
||||
|
@ -1069,7 +1075,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
|
|||
static int bfqq_process_refs(struct bfq_queue *bfqq)
|
||||
{
|
||||
return bfqq->ref - bfqq->allocated - bfqq->entity.on_st_or_in_serv -
|
||||
(bfqq->weight_counter != NULL);
|
||||
(bfqq->weight_counter != NULL) - bfqq->stable_ref;
|
||||
}
|
||||
|
||||
/* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
|
||||
|
@ -2622,6 +2628,11 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
|
||||
struct bfq_queue *bfqq);
|
||||
|
||||
static void bfq_put_stable_ref(struct bfq_queue *bfqq);
|
||||
|
||||
/*
|
||||
* Attempt to schedule a merge of bfqq with the currently in-service
|
||||
* queue or with a close queue among the scheduled queues. Return
|
||||
|
@ -2644,10 +2655,49 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
|
|||
*/
|
||||
static struct bfq_queue *
|
||||
bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
void *io_struct, bool request)
|
||||
void *io_struct, bool request, struct bfq_io_cq *bic)
|
||||
{
|
||||
struct bfq_queue *in_service_bfqq, *new_bfqq;
|
||||
|
||||
/*
|
||||
* Check delayed stable merge for rotational or non-queueing
|
||||
* devs. For this branch to be executed, bfqq must not be
|
||||
* currently merged with some other queue (i.e., bfqq->bic
|
||||
* must be non null). If we considered also merged queues,
|
||||
* then we should also check whether bfqq has already been
|
||||
* merged with bic->stable_merge_bfqq. But this would be
|
||||
* costly and complicated.
|
||||
*/
|
||||
if (unlikely(!bfqd->nonrot_with_queueing)) {
|
||||
if (bic->stable_merge_bfqq &&
|
||||
!bfq_bfqq_just_created(bfqq) &&
|
||||
time_is_after_jiffies(bfqq->split_time +
|
||||
msecs_to_jiffies(200))) {
|
||||
struct bfq_queue *stable_merge_bfqq =
|
||||
bic->stable_merge_bfqq;
|
||||
int proc_ref = min(bfqq_process_refs(bfqq),
|
||||
bfqq_process_refs(stable_merge_bfqq));
|
||||
|
||||
/* deschedule stable merge, because done or aborted here */
|
||||
bfq_put_stable_ref(stable_merge_bfqq);
|
||||
|
||||
bic->stable_merge_bfqq = NULL;
|
||||
|
||||
if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
|
||||
proc_ref > 0) {
|
||||
/* next function will take at least one ref */
|
||||
struct bfq_queue *new_bfqq =
|
||||
bfq_setup_merge(bfqq, stable_merge_bfqq);
|
||||
|
||||
bic->stably_merged = true;
|
||||
if (new_bfqq && new_bfqq->bic)
|
||||
new_bfqq->bic->stably_merged = true;
|
||||
return new_bfqq;
|
||||
} else
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not perform queue merging if the device is non
|
||||
* rotational and performs internal queueing. In fact, such a
|
||||
|
@ -2789,6 +2839,17 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq, struct bfq_queue *new_bfqq)
|
||||
{
|
||||
if (cur_bfqq->entity.parent &&
|
||||
cur_bfqq->entity.parent->last_bfqq_created == cur_bfqq)
|
||||
cur_bfqq->entity.parent->last_bfqq_created = new_bfqq;
|
||||
else if (cur_bfqq->bfqd && cur_bfqq->bfqd->last_bfqq_created == cur_bfqq)
|
||||
cur_bfqq->bfqd->last_bfqq_created = new_bfqq;
|
||||
}
|
||||
|
||||
void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
||||
{
|
||||
/*
|
||||
|
@ -2806,6 +2867,8 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
|||
bfqq != bfqd->in_service_queue)
|
||||
bfq_del_bfqq_busy(bfqd, bfqq, false);
|
||||
|
||||
bfq_reassign_last_bfqq(bfqq, NULL);
|
||||
|
||||
bfq_put_queue(bfqq);
|
||||
}
|
||||
|
||||
|
@ -2822,6 +2885,29 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
|||
bfq_mark_bfqq_IO_bound(new_bfqq);
|
||||
bfq_clear_bfqq_IO_bound(bfqq);
|
||||
|
||||
/*
|
||||
* The processes associated with bfqq are cooperators of the
|
||||
* processes associated with new_bfqq. So, if bfqq has a
|
||||
* waker, then assume that all these processes will be happy
|
||||
* to let bfqq's waker freely inject I/O when they have no
|
||||
* I/O.
|
||||
*/
|
||||
if (bfqq->waker_bfqq && !new_bfqq->waker_bfqq &&
|
||||
bfqq->waker_bfqq != new_bfqq) {
|
||||
new_bfqq->waker_bfqq = bfqq->waker_bfqq;
|
||||
new_bfqq->tentative_waker_bfqq = NULL;
|
||||
|
||||
/*
|
||||
* If the waker queue disappears, then
|
||||
* new_bfqq->waker_bfqq must be reset. So insert
|
||||
* new_bfqq into the woken_list of the waker. See
|
||||
* bfq_check_waker for details.
|
||||
*/
|
||||
hlist_add_head(&new_bfqq->woken_list_node,
|
||||
&new_bfqq->waker_bfqq->woken_list);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* If bfqq is weight-raised, then let new_bfqq inherit
|
||||
* weight-raising. To reduce false positives, neglect the case
|
||||
|
@ -2879,6 +2965,9 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
|
|||
*/
|
||||
new_bfqq->pid = -1;
|
||||
bfqq->bic = NULL;
|
||||
|
||||
bfq_reassign_last_bfqq(bfqq, new_bfqq);
|
||||
|
||||
bfq_release_process_ref(bfqd, bfqq);
|
||||
}
|
||||
|
||||
|
@ -2906,7 +2995,7 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
|
|||
* We take advantage of this function to perform an early merge
|
||||
* of the queues of possible cooperating processes.
|
||||
*/
|
||||
new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
|
||||
new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false, bfqd->bio_bic);
|
||||
if (new_bfqq) {
|
||||
/*
|
||||
* bic still points to bfqq, then it has not yet been
|
||||
|
@ -4491,9 +4580,15 @@ check_queue:
|
|||
bfq_bfqq_busy(bfqq->bic->bfqq[0]) &&
|
||||
bfqq->bic->bfqq[0]->next_rq ?
|
||||
bfqq->bic->bfqq[0] : NULL;
|
||||
struct bfq_queue *blocked_bfqq =
|
||||
!hlist_empty(&bfqq->woken_list) ?
|
||||
container_of(bfqq->woken_list.first,
|
||||
struct bfq_queue,
|
||||
woken_list_node)
|
||||
: NULL;
|
||||
|
||||
/*
|
||||
* The next three mutually-exclusive ifs decide
|
||||
* The next four mutually-exclusive ifs decide
|
||||
* whether to try injection, and choose the queue to
|
||||
* pick an I/O request from.
|
||||
*
|
||||
|
@ -4526,7 +4621,15 @@ check_queue:
|
|||
* next bfqq's I/O is brought forward dramatically,
|
||||
* for it is not blocked for milliseconds.
|
||||
*
|
||||
* The third if checks whether bfqq is a queue for
|
||||
* The third if checks whether there is a queue woken
|
||||
* by bfqq, and currently with pending I/O. Such a
|
||||
* woken queue does not steal bandwidth from bfqq,
|
||||
* because it remains soon without I/O if bfqq is not
|
||||
* served. So there is virtually no risk of loss of
|
||||
* bandwidth for bfqq if this woken queue has I/O
|
||||
* dispatched while bfqq is waiting for new I/O.
|
||||
*
|
||||
* The fourth if checks whether bfqq is a queue for
|
||||
* which it is better to avoid injection. It is so if
|
||||
* bfqq delivers more throughput when served without
|
||||
* any further I/O from other queues in the middle, or
|
||||
|
@ -4546,11 +4649,11 @@ check_queue:
|
|||
* bfq_update_has_short_ttime(), it is rather likely
|
||||
* that, if I/O is being plugged for bfqq and the
|
||||
* waker queue has pending I/O requests that are
|
||||
* blocking bfqq's I/O, then the third alternative
|
||||
* blocking bfqq's I/O, then the fourth alternative
|
||||
* above lets the waker queue get served before the
|
||||
* I/O-plugging timeout fires. So one may deem the
|
||||
* second alternative superfluous. It is not, because
|
||||
* the third alternative may be way less effective in
|
||||
* the fourth alternative may be way less effective in
|
||||
* case of a synchronization. For two main
|
||||
* reasons. First, throughput may be low because the
|
||||
* inject limit may be too low to guarantee the same
|
||||
|
@ -4559,7 +4662,7 @@ check_queue:
|
|||
* guarantees (the second alternative unconditionally
|
||||
* injects a pending I/O request of the waker queue
|
||||
* for each bfq_dispatch_request()). Second, with the
|
||||
* third alternative, the duration of the plugging,
|
||||
* fourth alternative, the duration of the plugging,
|
||||
* i.e., the time before bfqq finally receives new I/O,
|
||||
* may not be minimized, because the waker queue may
|
||||
* happen to be served only after other queues.
|
||||
|
@ -4577,6 +4680,14 @@ check_queue:
|
|||
bfq_bfqq_budget_left(bfqq->waker_bfqq)
|
||||
)
|
||||
bfqq = bfqq->waker_bfqq;
|
||||
else if (blocked_bfqq &&
|
||||
bfq_bfqq_busy(blocked_bfqq) &&
|
||||
blocked_bfqq->next_rq &&
|
||||
bfq_serv_to_charge(blocked_bfqq->next_rq,
|
||||
blocked_bfqq) <=
|
||||
bfq_bfqq_budget_left(blocked_bfqq)
|
||||
)
|
||||
bfqq = blocked_bfqq;
|
||||
else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
|
||||
(bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
|
||||
!bfq_bfqq_has_short_ttime(bfqq)))
|
||||
|
@ -4983,6 +5094,12 @@ void bfq_put_queue(struct bfq_queue *bfqq)
|
|||
bfqg_and_blkg_put(bfqg);
|
||||
}
|
||||
|
||||
static void bfq_put_stable_ref(struct bfq_queue *bfqq)
|
||||
{
|
||||
bfqq->stable_ref--;
|
||||
bfq_put_queue(bfqq);
|
||||
}
|
||||
|
||||
static void bfq_put_cooperator(struct bfq_queue *bfqq)
|
||||
{
|
||||
struct bfq_queue *__bfqq, *next;
|
||||
|
@ -5039,6 +5156,24 @@ static void bfq_exit_icq(struct io_cq *icq)
|
|||
{
|
||||
struct bfq_io_cq *bic = icq_to_bic(icq);
|
||||
|
||||
if (bic->stable_merge_bfqq) {
|
||||
struct bfq_data *bfqd = bic->stable_merge_bfqq->bfqd;
|
||||
|
||||
/*
|
||||
* bfqd is NULL if scheduler already exited, and in
|
||||
* that case this is the last time bfqq is accessed.
|
||||
*/
|
||||
if (bfqd) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bfqd->lock, flags);
|
||||
bfq_put_stable_ref(bic->stable_merge_bfqq);
|
||||
spin_unlock_irqrestore(&bfqd->lock, flags);
|
||||
} else {
|
||||
bfq_put_stable_ref(bic->stable_merge_bfqq);
|
||||
}
|
||||
}
|
||||
|
||||
bfq_exit_icq_bfqq(bic, true);
|
||||
bfq_exit_icq_bfqq(bic, false);
|
||||
}
|
||||
|
@ -5099,7 +5234,8 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
|
|||
|
||||
static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
|
||||
struct bio *bio, bool is_sync,
|
||||
struct bfq_io_cq *bic);
|
||||
struct bfq_io_cq *bic,
|
||||
bool respawn);
|
||||
|
||||
static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
|
||||
{
|
||||
|
@ -5119,7 +5255,7 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
|
|||
bfqq = bic_to_bfqq(bic, false);
|
||||
if (bfqq) {
|
||||
bfq_release_process_ref(bfqd, bfqq);
|
||||
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
|
||||
bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic, true);
|
||||
bic_set_bfqq(bic, bfqq, false);
|
||||
}
|
||||
|
||||
|
@ -5162,6 +5298,8 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||
/* set end request to minus infinity from now */
|
||||
bfqq->ttime.last_end_request = now_ns + 1;
|
||||
|
||||
bfqq->creation_time = jiffies;
|
||||
|
||||
bfqq->io_start_time = now_ns;
|
||||
|
||||
bfq_mark_bfqq_IO_bound(bfqq);
|
||||
|
@ -5211,9 +5349,156 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
|
|||
}
|
||||
}
|
||||
|
||||
static struct bfq_queue *
|
||||
bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
struct bfq_io_cq *bic,
|
||||
struct bfq_queue *last_bfqq_created)
|
||||
{
|
||||
struct bfq_queue *new_bfqq =
|
||||
bfq_setup_merge(bfqq, last_bfqq_created);
|
||||
|
||||
if (!new_bfqq)
|
||||
return bfqq;
|
||||
|
||||
if (new_bfqq->bic)
|
||||
new_bfqq->bic->stably_merged = true;
|
||||
bic->stably_merged = true;
|
||||
|
||||
/*
|
||||
* Reusing merge functions. This implies that
|
||||
* bfqq->bic must be set too, for
|
||||
* bfq_merge_bfqqs to correctly save bfqq's
|
||||
* state before killing it.
|
||||
*/
|
||||
bfqq->bic = bic;
|
||||
bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
|
||||
|
||||
return new_bfqq;
|
||||
}
|
||||
|
||||
/*
|
||||
* Many throughput-sensitive workloads are made of several parallel
|
||||
* I/O flows, with all flows generated by the same application, or
|
||||
* more generically by the same task (e.g., system boot). The most
|
||||
* counterproductive action with these workloads is plugging I/O
|
||||
* dispatch when one of the bfq_queues associated with these flows
|
||||
* remains temporarily empty.
|
||||
*
|
||||
* To avoid this plugging, BFQ has been using a burst-handling
|
||||
* mechanism for years now. This mechanism has proven effective for
|
||||
* throughput, and not detrimental for service guarantees. The
|
||||
* following function pushes this mechanism a little bit further,
|
||||
* basing on the following two facts.
|
||||
*
|
||||
* First, all the I/O flows of a the same application or task
|
||||
* contribute to the execution/completion of that common application
|
||||
* or task. So the performance figures that matter are total
|
||||
* throughput of the flows and task-wide I/O latency. In particular,
|
||||
* these flows do not need to be protected from each other, in terms
|
||||
* of individual bandwidth or latency.
|
||||
*
|
||||
* Second, the above fact holds regardless of the number of flows.
|
||||
*
|
||||
* Putting these two facts together, this commits merges stably the
|
||||
* bfq_queues associated with these I/O flows, i.e., with the
|
||||
* processes that generate these IO/ flows, regardless of how many the
|
||||
* involved processes are.
|
||||
*
|
||||
* To decide whether a set of bfq_queues is actually associated with
|
||||
* the I/O flows of a common application or task, and to merge these
|
||||
* queues stably, this function operates as follows: given a bfq_queue,
|
||||
* say Q2, currently being created, and the last bfq_queue, say Q1,
|
||||
* created before Q2, Q2 is merged stably with Q1 if
|
||||
* - very little time has elapsed since when Q1 was created
|
||||
* - Q2 has the same ioprio as Q1
|
||||
* - Q2 belongs to the same group as Q1
|
||||
*
|
||||
* Merging bfq_queues also reduces scheduling overhead. A fio test
|
||||
* with ten random readers on /dev/nullb shows a throughput boost of
|
||||
* 40%, with a quadcore. Since BFQ's execution time amounts to ~50% of
|
||||
* the total per-request processing time, the above throughput boost
|
||||
* implies that BFQ's overhead is reduced by more than 50%.
|
||||
*
|
||||
* This new mechanism most certainly obsoletes the current
|
||||
* burst-handling heuristics. We keep those heuristics for the moment.
|
||||
*/
|
||||
static struct bfq_queue *bfq_do_or_sched_stable_merge(struct bfq_data *bfqd,
|
||||
struct bfq_queue *bfqq,
|
||||
struct bfq_io_cq *bic)
|
||||
{
|
||||
struct bfq_queue **source_bfqq = bfqq->entity.parent ?
|
||||
&bfqq->entity.parent->last_bfqq_created :
|
||||
&bfqd->last_bfqq_created;
|
||||
|
||||
struct bfq_queue *last_bfqq_created = *source_bfqq;
|
||||
|
||||
/*
|
||||
* If last_bfqq_created has not been set yet, then init it. If
|
||||
* it has been set already, but too long ago, then move it
|
||||
* forward to bfqq. Finally, move also if bfqq belongs to a
|
||||
* different group than last_bfqq_created, or if bfqq has a
|
||||
* different ioprio or ioprio_class. If none of these
|
||||
* conditions holds true, then try an early stable merge or
|
||||
* schedule a delayed stable merge.
|
||||
*
|
||||
* A delayed merge is scheduled (instead of performing an
|
||||
* early merge), in case bfqq might soon prove to be more
|
||||
* throughput-beneficial if not merged. Currently this is
|
||||
* possible only if bfqd is rotational with no queueing. For
|
||||
* such a drive, not merging bfqq is better for throughput if
|
||||
* bfqq happens to contain sequential I/O. So, we wait a
|
||||
* little bit for enough I/O to flow through bfqq. After that,
|
||||
* if such an I/O is sequential, then the merge is
|
||||
* canceled. Otherwise the merge is finally performed.
|
||||
*/
|
||||
if (!last_bfqq_created ||
|
||||
time_before(last_bfqq_created->creation_time +
|
||||
bfqd->bfq_burst_interval,
|
||||
bfqq->creation_time) ||
|
||||
bfqq->entity.parent != last_bfqq_created->entity.parent ||
|
||||
bfqq->ioprio != last_bfqq_created->ioprio ||
|
||||
bfqq->ioprio_class != last_bfqq_created->ioprio_class)
|
||||
*source_bfqq = bfqq;
|
||||
else if (time_after_eq(last_bfqq_created->creation_time +
|
||||
bfqd->bfq_burst_interval,
|
||||
bfqq->creation_time)) {
|
||||
if (likely(bfqd->nonrot_with_queueing))
|
||||
/*
|
||||
* With this type of drive, leaving
|
||||
* bfqq alone may provide no
|
||||
* throughput benefits compared with
|
||||
* merging bfqq. So merge bfqq now.
|
||||
*/
|
||||
bfqq = bfq_do_early_stable_merge(bfqd, bfqq,
|
||||
bic,
|
||||
last_bfqq_created);
|
||||
else { /* schedule tentative stable merge */
|
||||
/*
|
||||
* get reference on last_bfqq_created,
|
||||
* to prevent it from being freed,
|
||||
* until we decide whether to merge
|
||||
*/
|
||||
last_bfqq_created->ref++;
|
||||
/*
|
||||
* need to keep track of stable refs, to
|
||||
* compute process refs correctly
|
||||
*/
|
||||
last_bfqq_created->stable_ref++;
|
||||
/*
|
||||
* Record the bfqq to merge to.
|
||||
*/
|
||||
bic->stable_merge_bfqq = last_bfqq_created;
|
||||
}
|
||||
}
|
||||
|
||||
return bfqq;
|
||||
}
|
||||
|
||||
|
||||
static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
|
||||
struct bio *bio, bool is_sync,
|
||||
struct bfq_io_cq *bic)
|
||||
struct bfq_io_cq *bic,
|
||||
bool respawn)
|
||||
{
|
||||
const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
|
||||
const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
|
||||
|
@ -5271,7 +5556,10 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
|
|||
|
||||
out:
|
||||
bfqq->ref++; /* get a process reference to this queue */
|
||||
bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
|
||||
|
||||
if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn)
|
||||
bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic);
|
||||
|
||||
rcu_read_unlock();
|
||||
return bfqq;
|
||||
}
|
||||
|
@ -5521,7 +5809,8 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||
static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
|
||||
{
|
||||
struct bfq_queue *bfqq = RQ_BFQQ(rq),
|
||||
*new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
|
||||
*new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true,
|
||||
RQ_BIC(rq));
|
||||
bool waiting, idle_timer_disabled = false;
|
||||
|
||||
if (new_bfqq) {
|
||||
|
@ -5627,7 +5916,48 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
|
||||
spin_lock_irq(&bfqd->lock);
|
||||
bfqq = bfq_init_rq(rq);
|
||||
if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
|
||||
|
||||
/*
|
||||
* Reqs with at_head or passthrough flags set are to be put
|
||||
* directly into dispatch list. Additional case for putting rq
|
||||
* directly into the dispatch queue: the only active
|
||||
* bfq_queues are bfqq and either its waker bfq_queue or one
|
||||
* of its woken bfq_queues. The rationale behind this
|
||||
* additional condition is as follows:
|
||||
* - consider a bfq_queue, say Q1, detected as a waker of
|
||||
* another bfq_queue, say Q2
|
||||
* - by definition of a waker, Q1 blocks the I/O of Q2, i.e.,
|
||||
* some I/O of Q1 needs to be completed for new I/O of Q2
|
||||
* to arrive. A notable example of waker is journald
|
||||
* - so, Q1 and Q2 are in any respect the queues of two
|
||||
* cooperating processes (or of two cooperating sets of
|
||||
* processes): the goal of Q1's I/O is doing what needs to
|
||||
* be done so that new Q2's I/O can finally be
|
||||
* issued. Therefore, if the service of Q1's I/O is delayed,
|
||||
* then Q2's I/O is delayed too. Conversely, if Q2's I/O is
|
||||
* delayed, the goal of Q1's I/O is hindered.
|
||||
* - as a consequence, if some I/O of Q1/Q2 arrives while
|
||||
* Q2/Q1 is the only queue in service, there is absolutely
|
||||
* no point in delaying the service of such an I/O. The
|
||||
* only possible result is a throughput loss
|
||||
* - so, when the above condition holds, the best option is to
|
||||
* have the new I/O dispatched as soon as possible
|
||||
* - the most effective and efficient way to attain the above
|
||||
* goal is to put the new I/O directly in the dispatch
|
||||
* list
|
||||
* - as an additional restriction, Q1 and Q2 must be the only
|
||||
* busy queues for this commit to put the I/O of Q2/Q1 in
|
||||
* the dispatch list. This is necessary, because, if also
|
||||
* other queues are waiting for service, then putting new
|
||||
* I/O directly in the dispatch list may evidently cause a
|
||||
* violation of service guarantees for the other queues
|
||||
*/
|
||||
if (!bfqq ||
|
||||
(bfqq != bfqd->in_service_queue &&
|
||||
bfqd->in_service_queue != NULL &&
|
||||
bfq_tot_busy_queues(bfqd) == 1 + bfq_bfqq_busy(bfqq) &&
|
||||
(bfqq->waker_bfqq == bfqd->in_service_queue ||
|
||||
bfqd->in_service_queue->waker_bfqq == bfqq)) || at_head) {
|
||||
if (at_head)
|
||||
list_add(&rq->queuelist, &bfqd->dispatch);
|
||||
else
|
||||
|
@ -5767,7 +6097,17 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
|
|||
1UL<<(BFQ_RATE_SHIFT - 10))
|
||||
bfq_update_rate_reset(bfqd, NULL);
|
||||
bfqd->last_completion = now_ns;
|
||||
bfqd->last_completed_rq_bfqq = bfqq;
|
||||
/*
|
||||
* Shared queues are likely to receive I/O at a high
|
||||
* rate. This may deceptively let them be considered as wakers
|
||||
* of other queues. But a false waker will unjustly steal
|
||||
* bandwidth to its supposedly woken queue. So considering
|
||||
* also shared queues in the waking mechanism may cause more
|
||||
* control troubles than throughput benefits. Then do not set
|
||||
* last_completed_rq_bfqq to bfqq if bfqq is a shared queue.
|
||||
*/
|
||||
if (!bfq_bfqq_coop(bfqq))
|
||||
bfqd->last_completed_rq_bfqq = bfqq;
|
||||
|
||||
/*
|
||||
* If we are waiting to discover whether the request pattern
|
||||
|
@ -6124,7 +6464,7 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
|
|||
|
||||
if (bfqq)
|
||||
bfq_put_queue(bfqq);
|
||||
bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
|
||||
bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, split);
|
||||
|
||||
bic_set_bfqq(bic, bfqq, is_sync);
|
||||
if (split && is_sync) {
|
||||
|
@ -6245,8 +6585,9 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
|
|||
|
||||
if (likely(!new_queue)) {
|
||||
/* If the queue was seeky for too long, break it apart. */
|
||||
if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
|
||||
bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
|
||||
if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
|
||||
!bic->stably_merged) {
|
||||
struct bfq_queue *old_bfqq = bfqq;
|
||||
|
||||
/* Update bic before losing reference to bfqq */
|
||||
if (bfq_bfqq_in_large_burst(bfqq))
|
||||
|
@ -6255,11 +6596,24 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
|
|||
bfqq = bfq_split_bfqq(bic, bfqq);
|
||||
split = true;
|
||||
|
||||
if (!bfqq)
|
||||
if (!bfqq) {
|
||||
bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
|
||||
true, is_sync,
|
||||
NULL);
|
||||
else
|
||||
bfqq->waker_bfqq = old_bfqq->waker_bfqq;
|
||||
bfqq->tentative_waker_bfqq = NULL;
|
||||
|
||||
/*
|
||||
* If the waker queue disappears, then
|
||||
* new_bfqq->waker_bfqq must be
|
||||
* reset. So insert new_bfqq into the
|
||||
* woken_list of the waker. See
|
||||
* bfq_check_waker for details.
|
||||
*/
|
||||
if (bfqq->waker_bfqq)
|
||||
hlist_add_head(&bfqq->woken_list_node,
|
||||
&bfqq->waker_bfqq->woken_list);
|
||||
} else
|
||||
bfqq_already_existing = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -197,6 +197,9 @@ struct bfq_entity {
|
|||
|
||||
/* flag, set if the entity is counted in groups_with_pending_reqs */
|
||||
bool in_groups_with_pending_reqs;
|
||||
|
||||
/* last child queue of entity created (for non-leaf entities) */
|
||||
struct bfq_queue *last_bfqq_created;
|
||||
};
|
||||
|
||||
struct bfq_group;
|
||||
|
@ -230,6 +233,8 @@ struct bfq_ttime {
|
|||
struct bfq_queue {
|
||||
/* reference counter */
|
||||
int ref;
|
||||
/* counter of references from other queues for delayed stable merge */
|
||||
int stable_ref;
|
||||
/* parent bfq_data */
|
||||
struct bfq_data *bfqd;
|
||||
|
||||
|
@ -365,6 +370,8 @@ struct bfq_queue {
|
|||
|
||||
unsigned long first_IO_time; /* time of first I/O for this queue */
|
||||
|
||||
unsigned long creation_time; /* when this queue is created */
|
||||
|
||||
/* max service rate measured so far */
|
||||
u32 max_service_rate;
|
||||
|
||||
|
@ -454,6 +461,11 @@ struct bfq_io_cq {
|
|||
u64 saved_last_serv_time_ns;
|
||||
unsigned int saved_inject_limit;
|
||||
unsigned long saved_decrease_time_jif;
|
||||
|
||||
/* candidate queue for a stable merge (due to close creation time) */
|
||||
struct bfq_queue *stable_merge_bfqq;
|
||||
|
||||
bool stably_merged; /* non splittable if true */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -578,6 +590,9 @@ struct bfq_data {
|
|||
/* bfqq owning the last completed rq */
|
||||
struct bfq_queue *last_completed_rq_bfqq;
|
||||
|
||||
/* last bfqq created, among those in the root group */
|
||||
struct bfq_queue *last_bfqq_created;
|
||||
|
||||
/* time of last transition from empty to non-empty (ns) */
|
||||
u64 last_empty_occupied_ns;
|
||||
|
||||
|
|
|
@ -1706,4 +1706,12 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
|
|||
|
||||
if (bfqq->wr_coeff > 1)
|
||||
bfqd->wr_busy_queues++;
|
||||
|
||||
/* Move bfqq to the head of the woken list of its waker */
|
||||
if (!hlist_unhashed(&bfqq->woken_list_node) &&
|
||||
&bfqq->woken_list_node != bfqq->waker_bfqq->woken_list.first) {
|
||||
hlist_del_init(&bfqq->woken_list_node);
|
||||
hlist_add_head(&bfqq->woken_list_node,
|
||||
&bfqq->waker_bfqq->woken_list);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -204,7 +204,6 @@ bool bio_integrity_prep(struct bio *bio)
|
|||
{
|
||||
struct bio_integrity_payload *bip;
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
||||
void *buf;
|
||||
unsigned long start, end;
|
||||
unsigned int len, nr_pages;
|
||||
|
@ -238,7 +237,7 @@ bool bio_integrity_prep(struct bio *bio)
|
|||
|
||||
/* Allocate kernel buffer for protection data */
|
||||
len = intervals * bi->tuple_size;
|
||||
buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
|
||||
buf = kmalloc(len, GFP_NOIO);
|
||||
status = BLK_STS_RESOURCE;
|
||||
if (unlikely(buf == NULL)) {
|
||||
printk(KERN_ERR "could not allocate integrity buffer\n");
|
||||
|
|
43
block/bio.c
43
block/bio.c
|
@ -493,20 +493,20 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
|
|||
}
|
||||
EXPORT_SYMBOL(bio_kmalloc);
|
||||
|
||||
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
|
||||
void zero_fill_bio(struct bio *bio)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
|
||||
__bio_for_each_segment(bv, bio, iter, start) {
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
char *data = bvec_kmap_irq(&bv, &flags);
|
||||
memset(data, 0, bv.bv_len);
|
||||
flush_dcache_page(bv.bv_page);
|
||||
bvec_kunmap_irq(data, &flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(zero_fill_bio_iter);
|
||||
EXPORT_SYMBOL(zero_fill_bio);
|
||||
|
||||
/**
|
||||
* bio_truncate - truncate the bio to small size of @new_size
|
||||
|
@ -1236,43 +1236,6 @@ void bio_copy_data(struct bio *dst, struct bio *src)
|
|||
}
|
||||
EXPORT_SYMBOL(bio_copy_data);
|
||||
|
||||
/**
|
||||
* bio_list_copy_data - copy contents of data buffers from one chain of bios to
|
||||
* another
|
||||
* @src: source bio list
|
||||
* @dst: destination bio list
|
||||
*
|
||||
* Stops when it reaches the end of either the @src list or @dst list - that is,
|
||||
* copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
|
||||
* bios).
|
||||
*/
|
||||
void bio_list_copy_data(struct bio *dst, struct bio *src)
|
||||
{
|
||||
struct bvec_iter src_iter = src->bi_iter;
|
||||
struct bvec_iter dst_iter = dst->bi_iter;
|
||||
|
||||
while (1) {
|
||||
if (!src_iter.bi_size) {
|
||||
src = src->bi_next;
|
||||
if (!src)
|
||||
break;
|
||||
|
||||
src_iter = src->bi_iter;
|
||||
}
|
||||
|
||||
if (!dst_iter.bi_size) {
|
||||
dst = dst->bi_next;
|
||||
if (!dst)
|
||||
break;
|
||||
|
||||
dst_iter = dst->bi_iter;
|
||||
}
|
||||
|
||||
bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(bio_list_copy_data);
|
||||
|
||||
void bio_free_pages(struct bio *bio)
|
||||
{
|
||||
struct bio_vec *bvec;
|
||||
|
|
|
@ -1161,10 +1161,8 @@ static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
|
|||
}
|
||||
|
||||
/*
|
||||
* queue's settings related to segment counting like q->bounce_pfn
|
||||
* may differ from that of other stacking queues.
|
||||
* Recalculate it to check the request correctly on this queue's
|
||||
* limitation.
|
||||
* The queue settings related to segment counting may differ from the
|
||||
* original queue.
|
||||
*/
|
||||
rq->nr_phys_segments = blk_recalc_rq_segments(rq);
|
||||
if (rq->nr_phys_segments > queue_max_segments(q)) {
|
||||
|
|
|
@ -987,10 +987,6 @@ static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
|
|||
return;
|
||||
}
|
||||
|
||||
/* rq_wait signal is always reliable, ignore user vrate_min */
|
||||
if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
|
||||
vrate_min = VRATE_MIN;
|
||||
|
||||
/*
|
||||
* If vrate is out of bounds, apply clamp gradually as the
|
||||
* bounds can change abruptly. Otherwise, apply busy_level
|
||||
|
|
119
block/blk-map.c
119
block/blk-map.c
|
@ -123,7 +123,6 @@ static int bio_uncopy_user(struct bio *bio)
|
|||
bio_free_pages(bio);
|
||||
}
|
||||
kfree(bmd);
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -132,7 +131,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
|||
{
|
||||
struct bio_map_data *bmd;
|
||||
struct page *page;
|
||||
struct bio *bio, *bounce_bio;
|
||||
struct bio *bio;
|
||||
int i = 0, ret;
|
||||
int nr_pages;
|
||||
unsigned int len = iter->count;
|
||||
|
@ -181,7 +180,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
|||
|
||||
i++;
|
||||
} else {
|
||||
page = alloc_page(rq->q->bounce_gfp | gfp_mask);
|
||||
page = alloc_page(GFP_NOIO | gfp_mask);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
|
@ -218,16 +217,9 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
|||
|
||||
bio->bi_private = bmd;
|
||||
|
||||
bounce_bio = bio;
|
||||
ret = blk_rq_append_bio(rq, &bounce_bio);
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
/*
|
||||
* We link the bounce buffer in and could have to traverse it later, so
|
||||
* we have to get a ref to prevent it from being freed
|
||||
*/
|
||||
bio_get(bounce_bio);
|
||||
return 0;
|
||||
cleanup:
|
||||
if (!map_data)
|
||||
|
@ -242,7 +234,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
|||
gfp_t gfp_mask)
|
||||
{
|
||||
unsigned int max_sectors = queue_max_hw_sectors(rq->q);
|
||||
struct bio *bio, *bounce_bio;
|
||||
struct bio *bio;
|
||||
int ret;
|
||||
int j;
|
||||
|
||||
|
@ -304,49 +296,17 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
|||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Subtle: if we end up needing to bounce a bio, it would normally
|
||||
* disappear when its bi_end_io is run. However, we need the original
|
||||
* bio for the unmap, so grab an extra reference to it
|
||||
*/
|
||||
bio_get(bio);
|
||||
|
||||
bounce_bio = bio;
|
||||
ret = blk_rq_append_bio(rq, &bounce_bio);
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
if (ret)
|
||||
goto out_put_orig;
|
||||
|
||||
/*
|
||||
* We link the bounce buffer in and could have to traverse it
|
||||
* later, so we have to get a ref to prevent it from being freed
|
||||
*/
|
||||
bio_get(bounce_bio);
|
||||
goto out_unmap;
|
||||
return 0;
|
||||
|
||||
out_put_orig:
|
||||
bio_put(bio);
|
||||
out_unmap:
|
||||
bio_release_pages(bio, false);
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_unmap_user - unmap a bio
|
||||
* @bio: the bio being unmapped
|
||||
*
|
||||
* Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
|
||||
* process context.
|
||||
*
|
||||
* bio_unmap_user() may sleep.
|
||||
*/
|
||||
static void bio_unmap_user(struct bio *bio)
|
||||
{
|
||||
bio_release_pages(bio, bio_data_dir(bio) == READ);
|
||||
bio_put(bio);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static void bio_invalidate_vmalloc_pages(struct bio *bio)
|
||||
{
|
||||
#ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
||||
|
@ -486,7 +446,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
|
|||
if (bytes > len)
|
||||
bytes = len;
|
||||
|
||||
page = alloc_page(q->bounce_gfp | gfp_mask);
|
||||
page = alloc_page(GFP_NOIO | gfp_mask);
|
||||
if (!page)
|
||||
goto cleanup;
|
||||
|
||||
|
@ -519,33 +479,24 @@ cleanup:
|
|||
* Append a bio to a passthrough request. Only works if the bio can be merged
|
||||
* into the request based on the driver constraints.
|
||||
*/
|
||||
int blk_rq_append_bio(struct request *rq, struct bio **bio)
|
||||
int blk_rq_append_bio(struct request *rq, struct bio *bio)
|
||||
{
|
||||
struct bio *orig_bio = *bio;
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
unsigned int nr_segs = 0;
|
||||
|
||||
blk_queue_bounce(rq->q, bio);
|
||||
|
||||
bio_for_each_bvec(bv, *bio, iter)
|
||||
bio_for_each_bvec(bv, bio, iter)
|
||||
nr_segs++;
|
||||
|
||||
if (!rq->bio) {
|
||||
blk_rq_bio_prep(rq, *bio, nr_segs);
|
||||
blk_rq_bio_prep(rq, bio, nr_segs);
|
||||
} else {
|
||||
if (!ll_back_merge_fn(rq, *bio, nr_segs)) {
|
||||
if (orig_bio != *bio) {
|
||||
bio_put(*bio);
|
||||
*bio = orig_bio;
|
||||
}
|
||||
if (!ll_back_merge_fn(rq, bio, nr_segs))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rq->biotail->bi_next = *bio;
|
||||
rq->biotail = *bio;
|
||||
rq->__data_len += (*bio)->bi_iter.bi_size;
|
||||
bio_crypt_free_ctx(*bio);
|
||||
rq->biotail->bi_next = bio;
|
||||
rq->biotail = bio;
|
||||
rq->__data_len += (bio)->bi_iter.bi_size;
|
||||
bio_crypt_free_ctx(bio);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -566,12 +517,6 @@ EXPORT_SYMBOL(blk_rq_append_bio);
|
|||
*
|
||||
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
|
||||
* still in process context.
|
||||
*
|
||||
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
|
||||
* before being submitted to the device, as pages mapped may be out of
|
||||
* reach. It's the callers responsibility to make sure this happens. The
|
||||
* original bio must be passed back in to blk_rq_unmap_user() for proper
|
||||
* unmapping.
|
||||
*/
|
||||
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
||||
struct rq_map_data *map_data,
|
||||
|
@ -588,6 +533,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
|||
|
||||
if (map_data)
|
||||
copy = true;
|
||||
else if (blk_queue_may_bounce(q))
|
||||
copy = true;
|
||||
else if (iov_iter_alignment(iter) & align)
|
||||
copy = true;
|
||||
else if (queue_virt_boundary(q))
|
||||
|
@ -641,25 +588,21 @@ EXPORT_SYMBOL(blk_rq_map_user);
|
|||
*/
|
||||
int blk_rq_unmap_user(struct bio *bio)
|
||||
{
|
||||
struct bio *mapped_bio;
|
||||
struct bio *next_bio;
|
||||
int ret = 0, ret2;
|
||||
|
||||
while (bio) {
|
||||
mapped_bio = bio;
|
||||
if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
|
||||
mapped_bio = bio->bi_private;
|
||||
|
||||
if (bio->bi_private) {
|
||||
ret2 = bio_uncopy_user(mapped_bio);
|
||||
ret2 = bio_uncopy_user(bio);
|
||||
if (ret2 && !ret)
|
||||
ret = ret2;
|
||||
} else {
|
||||
bio_unmap_user(mapped_bio);
|
||||
bio_release_pages(bio, bio_data_dir(bio) == READ);
|
||||
}
|
||||
|
||||
mapped_bio = bio;
|
||||
next_bio = bio;
|
||||
bio = bio->bi_next;
|
||||
bio_put(mapped_bio);
|
||||
bio_put(next_bio);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -684,7 +627,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
|||
{
|
||||
int reading = rq_data_dir(rq) == READ;
|
||||
unsigned long addr = (unsigned long) kbuf;
|
||||
struct bio *bio, *orig_bio;
|
||||
struct bio *bio;
|
||||
int ret;
|
||||
|
||||
if (len > (queue_max_hw_sectors(q) << 9))
|
||||
|
@ -692,7 +635,8 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
|||
if (!len || !kbuf)
|
||||
return -EINVAL;
|
||||
|
||||
if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
|
||||
if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
|
||||
blk_queue_may_bounce(q))
|
||||
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
|
||||
else
|
||||
bio = bio_map_kern(q, kbuf, len, gfp_mask);
|
||||
|
@ -703,14 +647,9 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
|||
bio->bi_opf &= ~REQ_OP_MASK;
|
||||
bio->bi_opf |= req_op(rq);
|
||||
|
||||
orig_bio = bio;
|
||||
ret = blk_rq_append_bio(rq, &bio);
|
||||
if (unlikely(ret)) {
|
||||
/* request is too big */
|
||||
bio_put(orig_bio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
ret = blk_rq_append_bio(rq, bio);
|
||||
if (unlikely(ret))
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_map_kern);
|
||||
|
|
|
@ -972,6 +972,14 @@ void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
|||
{
|
||||
struct elevator_type *e = q->elevator->type;
|
||||
|
||||
/*
|
||||
* If the parent debugfs directory has not been created yet, return;
|
||||
* We will be called again later on with appropriate parent debugfs
|
||||
* directory from blk_register_queue()
|
||||
*/
|
||||
if (!hctx->debugfs_dir)
|
||||
return;
|
||||
|
||||
if (!e->hctx_debugfs_attrs)
|
||||
return;
|
||||
|
||||
|
|
|
@ -373,8 +373,8 @@ static bool blk_mq_tagset_count_completed_rqs(struct request *rq,
|
|||
}
|
||||
|
||||
/**
|
||||
* blk_mq_tagset_wait_completed_request - wait until all completed req's
|
||||
* complete funtion is run
|
||||
* blk_mq_tagset_wait_completed_request - Wait until all scheduled request
|
||||
* completions have finished.
|
||||
* @tagset: Tag set to drain completed request
|
||||
*
|
||||
* Note: This function has to be run after all IO queues are shutdown
|
||||
|
@ -517,7 +517,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
|||
tags->nr_tags = total_tags;
|
||||
tags->nr_reserved_tags = reserved_tags;
|
||||
|
||||
if (flags & BLK_MQ_F_TAG_HCTX_SHARED)
|
||||
if (blk_mq_is_sbitmap_shared(flags))
|
||||
return tags;
|
||||
|
||||
if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) {
|
||||
|
@ -529,7 +529,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
|||
|
||||
void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
|
||||
{
|
||||
if (!(flags & BLK_MQ_F_TAG_HCTX_SHARED)) {
|
||||
if (!blk_mq_is_sbitmap_shared(flags)) {
|
||||
sbitmap_queue_free(tags->bitmap_tags);
|
||||
sbitmap_queue_free(tags->breserved_tags);
|
||||
}
|
||||
|
|
|
@ -361,11 +361,12 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
|||
|
||||
if (e) {
|
||||
/*
|
||||
* Flush requests are special and go directly to the
|
||||
* Flush/passthrough requests are special and go directly to the
|
||||
* dispatch list. Don't include reserved tags in the
|
||||
* limiting, as it isn't useful.
|
||||
*/
|
||||
if (!op_is_flush(data->cmd_flags) &&
|
||||
!blk_op_is_passthrough(data->cmd_flags) &&
|
||||
e->type->ops.limit_depth &&
|
||||
!(data->flags & BLK_MQ_REQ_RESERVED))
|
||||
e->type->ops.limit_depth(data->cmd_flags, data);
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
|
||||
#include <linux/gcd.h>
|
||||
#include <linux/lcm.h>
|
||||
#include <linux/jiffies.h>
|
||||
|
@ -17,11 +16,6 @@
|
|||
#include "blk.h"
|
||||
#include "blk-wbt.h"
|
||||
|
||||
unsigned long blk_max_low_pfn;
|
||||
EXPORT_SYMBOL(blk_max_low_pfn);
|
||||
|
||||
unsigned long blk_max_pfn;
|
||||
|
||||
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
|
||||
{
|
||||
q->rq_timeout = timeout;
|
||||
|
@ -55,7 +49,7 @@ void blk_set_default_limits(struct queue_limits *lim)
|
|||
lim->discard_alignment = 0;
|
||||
lim->discard_misaligned = 0;
|
||||
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
|
||||
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
|
||||
lim->bounce = BLK_BOUNCE_NONE;
|
||||
lim->alignment_offset = 0;
|
||||
lim->io_opt = 0;
|
||||
lim->misaligned = 0;
|
||||
|
@ -92,39 +86,16 @@ EXPORT_SYMBOL(blk_set_stacking_limits);
|
|||
/**
|
||||
* blk_queue_bounce_limit - set bounce buffer limit for queue
|
||||
* @q: the request queue for the device
|
||||
* @max_addr: the maximum address the device can handle
|
||||
* @bounce: bounce limit to enforce
|
||||
*
|
||||
* Description:
|
||||
* Different hardware can have different requirements as to what pages
|
||||
* it can do I/O directly to. A low level driver can call
|
||||
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
|
||||
* buffers for doing I/O to pages residing above @max_addr.
|
||||
* Force bouncing for ISA DMA ranges or highmem.
|
||||
*
|
||||
* DEPRECATED, don't use in new code.
|
||||
**/
|
||||
void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
|
||||
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
|
||||
{
|
||||
unsigned long b_pfn = max_addr >> PAGE_SHIFT;
|
||||
int dma = 0;
|
||||
|
||||
q->bounce_gfp = GFP_NOIO;
|
||||
#if BITS_PER_LONG == 64
|
||||
/*
|
||||
* Assume anything <= 4GB can be handled by IOMMU. Actually
|
||||
* some IOMMUs can handle everything, but I don't know of a
|
||||
* way to test this here.
|
||||
*/
|
||||
if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
|
||||
dma = 1;
|
||||
q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
|
||||
#else
|
||||
if (b_pfn < blk_max_low_pfn)
|
||||
dma = 1;
|
||||
q->limits.bounce_pfn = b_pfn;
|
||||
#endif
|
||||
if (dma) {
|
||||
init_emergency_isa_pool();
|
||||
q->bounce_gfp = GFP_NOIO | GFP_DMA;
|
||||
q->limits.bounce_pfn = b_pfn;
|
||||
}
|
||||
q->limits.bounce = bounce;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
||||
|
||||
|
@ -547,7 +518,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||
b->max_write_zeroes_sectors);
|
||||
t->max_zone_append_sectors = min(t->max_zone_append_sectors,
|
||||
b->max_zone_append_sectors);
|
||||
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
|
||||
t->bounce = max(t->bounce, b->bounce);
|
||||
|
||||
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
|
||||
b->seg_boundary_mask);
|
||||
|
@ -927,11 +898,3 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
|
||||
|
||||
static int __init blk_settings_init(void)
|
||||
{
|
||||
blk_max_low_pfn = max_low_pfn - 1;
|
||||
blk_max_pfn = max_pfn - 1;
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(blk_settings_init);
|
||||
|
|
|
@ -60,7 +60,7 @@ static ssize_t queue_var_store64(s64 *var, const char *page)
|
|||
|
||||
static ssize_t queue_requests_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(q->nr_requests, (page));
|
||||
return queue_var_show(q->nr_requests, page);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -264,6 +264,11 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
|
|||
return queue_var_show(max_hw_sectors_kb, (page));
|
||||
}
|
||||
|
||||
static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(q->limits.virt_boundary_mask, (page));
|
||||
}
|
||||
|
||||
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
|
||||
static ssize_t \
|
||||
queue_##name##_show(struct request_queue *q, char *page) \
|
||||
|
@ -610,6 +615,7 @@ QUEUE_RO_ENTRY(queue_fua, "fua");
|
|||
QUEUE_RO_ENTRY(queue_dax, "dax");
|
||||
QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
|
||||
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
|
||||
QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||
QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time");
|
||||
|
@ -670,6 +676,7 @@ static struct attribute *queue_attrs[] = {
|
|||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||
&blk_throtl_sample_time_entry.attr,
|
||||
#endif
|
||||
&queue_virt_boundary_mask_entry.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -52,14 +52,6 @@ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(blk_zone_cond_str);
|
||||
|
||||
static inline sector_t blk_zone_start(struct request_queue *q,
|
||||
sector_t sector)
|
||||
{
|
||||
sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
|
||||
|
||||
return sector & ~zone_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if a request is a write requests that needs zone write locking.
|
||||
*/
|
||||
|
|
18
block/blk.h
18
block/blk.h
|
@ -6,6 +6,7 @@
|
|||
#include <linux/blk-mq.h>
|
||||
#include <linux/part_stat.h>
|
||||
#include <linux/blk-crypto.h>
|
||||
#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
|
||||
#include <xen/xen.h>
|
||||
#include "blk-crypto-internal.h"
|
||||
#include "blk-mq.h"
|
||||
|
@ -311,18 +312,20 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { }
|
|||
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BOUNCE
|
||||
extern int init_emergency_isa_pool(void);
|
||||
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
#else
|
||||
static inline int init_emergency_isa_pool(void)
|
||||
void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
|
||||
|
||||
static inline bool blk_queue_may_bounce(struct request_queue *q)
|
||||
{
|
||||
return 0;
|
||||
return IS_ENABLED(CONFIG_BOUNCE) &&
|
||||
q->limits.bounce == BLK_BOUNCE_HIGH &&
|
||||
max_low_pfn >= max_pfn;
|
||||
}
|
||||
|
||||
static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
|
||||
{
|
||||
if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
|
||||
__blk_queue_bounce(q, bio);
|
||||
}
|
||||
#endif /* CONFIG_BOUNCE */
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
|
||||
extern int blk_iolatency_init(struct request_queue *q);
|
||||
|
@ -346,7 +349,6 @@ char *disk_name(struct gendisk *hd, int partno, char *buf);
|
|||
#define ADDPART_FLAG_NONE 0
|
||||
#define ADDPART_FLAG_RAID 1
|
||||
#define ADDPART_FLAG_WHOLEDISK 2
|
||||
void delete_partition(struct block_device *part);
|
||||
int bdev_add_partition(struct block_device *bdev, int partno,
|
||||
sector_t start, sector_t length);
|
||||
int bdev_del_partition(struct block_device *bdev, int partno);
|
||||
|
|
138
block/bounce.c
138
block/bounce.c
|
@ -18,7 +18,6 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/printk.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
@ -29,7 +28,7 @@
|
|||
#define ISA_POOL_SIZE 16
|
||||
|
||||
static struct bio_set bounce_bio_set, bounce_bio_split;
|
||||
static mempool_t page_pool, isa_page_pool;
|
||||
static mempool_t page_pool;
|
||||
|
||||
static void init_bounce_bioset(void)
|
||||
{
|
||||
|
@ -49,11 +48,11 @@ static void init_bounce_bioset(void)
|
|||
bounce_bs_setup = true;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HIGHMEM)
|
||||
static __init int init_emergency_pool(void)
|
||||
{
|
||||
int ret;
|
||||
#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
|
||||
|
||||
#ifndef CONFIG_MEMORY_HOTPLUG
|
||||
if (max_pfn <= max_low_pfn)
|
||||
return 0;
|
||||
#endif
|
||||
|
@ -67,9 +66,7 @@ static __init int init_emergency_pool(void)
|
|||
}
|
||||
|
||||
__initcall(init_emergency_pool);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* highmem version, map in to vec
|
||||
*/
|
||||
|
@ -82,48 +79,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
|
|||
kunmap_atomic(vto);
|
||||
}
|
||||
|
||||
#else /* CONFIG_HIGHMEM */
|
||||
|
||||
#define bounce_copy_vec(to, vfrom) \
|
||||
memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
|
||||
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
/*
|
||||
* allocate pages in the DMA region for the ISA pool
|
||||
*/
|
||||
static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(isa_mutex);
|
||||
|
||||
/*
|
||||
* gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
|
||||
* as the max address, so check if the pool has already been created.
|
||||
*/
|
||||
int init_emergency_isa_pool(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&isa_mutex);
|
||||
|
||||
if (mempool_initialized(&isa_page_pool)) {
|
||||
mutex_unlock(&isa_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa,
|
||||
mempool_free_pages, (void *) 0);
|
||||
BUG_ON(ret);
|
||||
|
||||
pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
|
||||
init_bounce_bioset();
|
||||
mutex_unlock(&isa_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Simple bounce buffer support for highmem pages. Depending on the
|
||||
* queue gfp mask set, *to may or may not be a highmem page. kmap it
|
||||
|
@ -159,7 +114,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
|
|||
}
|
||||
}
|
||||
|
||||
static void bounce_end_io(struct bio *bio, mempool_t *pool)
|
||||
static void bounce_end_io(struct bio *bio)
|
||||
{
|
||||
struct bio *bio_orig = bio->bi_private;
|
||||
struct bio_vec *bvec, orig_vec;
|
||||
|
@ -173,7 +128,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
|
|||
orig_vec = bio_iter_iovec(bio_orig, orig_iter);
|
||||
if (bvec->bv_page != orig_vec.bv_page) {
|
||||
dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
|
||||
mempool_free(bvec->bv_page, pool);
|
||||
mempool_free(bvec->bv_page, &page_pool);
|
||||
}
|
||||
bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
|
||||
}
|
||||
|
@ -185,33 +140,17 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
|
|||
|
||||
static void bounce_end_io_write(struct bio *bio)
|
||||
{
|
||||
bounce_end_io(bio, &page_pool);
|
||||
bounce_end_io(bio);
|
||||
}
|
||||
|
||||
static void bounce_end_io_write_isa(struct bio *bio)
|
||||
{
|
||||
|
||||
bounce_end_io(bio, &isa_page_pool);
|
||||
}
|
||||
|
||||
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
|
||||
static void bounce_end_io_read(struct bio *bio)
|
||||
{
|
||||
struct bio *bio_orig = bio->bi_private;
|
||||
|
||||
if (!bio->bi_status)
|
||||
copy_to_high_bio_irq(bio_orig, bio);
|
||||
|
||||
bounce_end_io(bio, pool);
|
||||
}
|
||||
|
||||
static void bounce_end_io_read(struct bio *bio)
|
||||
{
|
||||
__bounce_end_io_read(bio, &page_pool);
|
||||
}
|
||||
|
||||
static void bounce_end_io_read_isa(struct bio *bio)
|
||||
{
|
||||
__bounce_end_io_read(bio, &isa_page_pool);
|
||||
bounce_end_io(bio);
|
||||
}
|
||||
|
||||
static struct bio *bounce_clone_bio(struct bio *bio_src)
|
||||
|
@ -241,12 +180,8 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
|
|||
* asking for trouble and would force extra work on
|
||||
* __bio_clone_fast() anyways.
|
||||
*/
|
||||
if (bio_is_passthrough(bio_src))
|
||||
bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL,
|
||||
bio_segments(bio_src));
|
||||
else
|
||||
bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
|
||||
&bounce_bio_set);
|
||||
bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
|
||||
&bounce_bio_set);
|
||||
bio->bi_bdev = bio_src->bi_bdev;
|
||||
if (bio_flagged(bio_src, BIO_REMAPPED))
|
||||
bio_set_flag(bio, BIO_REMAPPED);
|
||||
|
@ -287,8 +222,7 @@ err_put:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||
mempool_t *pool)
|
||||
void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
||||
{
|
||||
struct bio *bio;
|
||||
int rw = bio_data_dir(*bio_orig);
|
||||
|
@ -301,14 +235,13 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|||
bio_for_each_segment(from, *bio_orig, iter) {
|
||||
if (i++ < BIO_MAX_VECS)
|
||||
sectors += from.bv_len >> 9;
|
||||
if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
|
||||
if (PageHighMem(from.bv_page))
|
||||
bounce = true;
|
||||
}
|
||||
if (!bounce)
|
||||
return;
|
||||
|
||||
if (!bio_is_passthrough(*bio_orig) &&
|
||||
sectors < bio_sectors(*bio_orig)) {
|
||||
if (sectors < bio_sectors(*bio_orig)) {
|
||||
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
|
||||
bio_chain(bio, *bio_orig);
|
||||
submit_bio_noacct(*bio_orig);
|
||||
|
@ -324,10 +257,10 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|||
for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
|
||||
struct page *page = to->bv_page;
|
||||
|
||||
if (page_to_pfn(page) <= q->limits.bounce_pfn)
|
||||
if (!PageHighMem(page))
|
||||
continue;
|
||||
|
||||
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
|
||||
to->bv_page = mempool_alloc(&page_pool, GFP_NOIO);
|
||||
inc_zone_page_state(to->bv_page, NR_BOUNCE);
|
||||
|
||||
if (rw == WRITE) {
|
||||
|
@ -346,46 +279,11 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|||
|
||||
bio->bi_flags |= (1 << BIO_BOUNCED);
|
||||
|
||||
if (pool == &page_pool) {
|
||||
if (rw == READ)
|
||||
bio->bi_end_io = bounce_end_io_read;
|
||||
else
|
||||
bio->bi_end_io = bounce_end_io_write;
|
||||
if (rw == READ)
|
||||
bio->bi_end_io = bounce_end_io_read;
|
||||
} else {
|
||||
bio->bi_end_io = bounce_end_io_write_isa;
|
||||
if (rw == READ)
|
||||
bio->bi_end_io = bounce_end_io_read_isa;
|
||||
}
|
||||
|
||||
bio->bi_private = *bio_orig;
|
||||
*bio_orig = bio;
|
||||
}
|
||||
|
||||
void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
||||
{
|
||||
mempool_t *pool;
|
||||
|
||||
/*
|
||||
* Data-less bio, nothing to bounce
|
||||
*/
|
||||
if (!bio_has_data(*bio_orig))
|
||||
return;
|
||||
|
||||
/*
|
||||
* for non-isa bounce case, just check if the bounce pfn is equal
|
||||
* to or bigger than the highest pfn in the system -- in that case,
|
||||
* don't waste time iterating over bio segments
|
||||
*/
|
||||
if (!(q->bounce_gfp & GFP_DMA)) {
|
||||
if (q->limits.bounce_pfn >= blk_max_pfn)
|
||||
return;
|
||||
pool = &page_pool;
|
||||
} else {
|
||||
BUG_ON(!mempool_initialized(&isa_page_pool));
|
||||
pool = &isa_page_pool;
|
||||
}
|
||||
|
||||
/*
|
||||
* slow path
|
||||
*/
|
||||
__blk_queue_bounce(q, bio_orig, pool);
|
||||
}
|
||||
|
|
|
@ -621,7 +621,8 @@ static inline bool elv_support_iosched(struct request_queue *q)
|
|||
*/
|
||||
static struct elevator_type *elevator_get_default(struct request_queue *q)
|
||||
{
|
||||
if (q->nr_hw_queues != 1)
|
||||
if (q->nr_hw_queues != 1 &&
|
||||
!blk_mq_is_sbitmap_shared(q->tag_set->flags))
|
||||
return NULL;
|
||||
|
||||
return elevator_get(q, "mq-deadline", false);
|
||||
|
|
183
block/genhd.c
183
block/genhd.c
|
@ -161,81 +161,6 @@ static void part_in_flight_rw(struct block_device *part,
|
|||
inflight[1] = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* disk_part_iter_init - initialize partition iterator
|
||||
* @piter: iterator to initialize
|
||||
* @disk: disk to iterate over
|
||||
* @flags: DISK_PITER_* flags
|
||||
*
|
||||
* Initialize @piter so that it iterates over partitions of @disk.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Don't care.
|
||||
*/
|
||||
void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
|
||||
unsigned int flags)
|
||||
{
|
||||
piter->disk = disk;
|
||||
piter->part = NULL;
|
||||
if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
|
||||
piter->idx = 0;
|
||||
else
|
||||
piter->idx = 1;
|
||||
piter->flags = flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* disk_part_iter_next - proceed iterator to the next partition and return it
|
||||
* @piter: iterator of interest
|
||||
*
|
||||
* Proceed @piter to the next partition and return it.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Don't care.
|
||||
*/
|
||||
struct block_device *disk_part_iter_next(struct disk_part_iter *piter)
|
||||
{
|
||||
struct block_device *part;
|
||||
unsigned long idx;
|
||||
|
||||
/* put the last partition */
|
||||
disk_part_iter_exit(piter);
|
||||
|
||||
rcu_read_lock();
|
||||
xa_for_each_start(&piter->disk->part_tbl, idx, part, piter->idx) {
|
||||
if (!bdev_nr_sectors(part) &&
|
||||
!(piter->flags & DISK_PITER_INCL_EMPTY) &&
|
||||
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
|
||||
piter->idx == 0))
|
||||
continue;
|
||||
|
||||
piter->part = bdgrab(part);
|
||||
if (!piter->part)
|
||||
continue;
|
||||
piter->idx = idx + 1;
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return piter->part;
|
||||
}
|
||||
|
||||
/**
|
||||
* disk_part_iter_exit - finish up partition iteration
|
||||
* @piter: iter of interest
|
||||
*
|
||||
* Called when iteration is over. Cleans up @piter.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Don't care.
|
||||
*/
|
||||
void disk_part_iter_exit(struct disk_part_iter *piter)
|
||||
{
|
||||
if (piter->part)
|
||||
bdput(piter->part);
|
||||
piter->part = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Can be deleted altogether. Later.
|
||||
*
|
||||
|
@ -472,13 +397,22 @@ static char *bdevt_str(dev_t devt, char *buf)
|
|||
|
||||
void disk_uevent(struct gendisk *disk, enum kobject_action action)
|
||||
{
|
||||
struct disk_part_iter piter;
|
||||
struct block_device *part;
|
||||
unsigned long idx;
|
||||
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY_PART0);
|
||||
while ((part = disk_part_iter_next(&piter)))
|
||||
rcu_read_lock();
|
||||
xa_for_each(&disk->part_tbl, idx, part) {
|
||||
if (bdev_is_partition(part) && !bdev_nr_sectors(part))
|
||||
continue;
|
||||
if (!bdgrab(part))
|
||||
continue;
|
||||
|
||||
rcu_read_unlock();
|
||||
kobject_uevent(bdev_kobj(part), action);
|
||||
disk_part_iter_exit(&piter);
|
||||
bdput(part);
|
||||
rcu_read_lock();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disk_uevent);
|
||||
|
||||
|
@ -646,18 +580,6 @@ void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
|
|||
}
|
||||
EXPORT_SYMBOL(device_add_disk_no_queue_reg);
|
||||
|
||||
static void invalidate_partition(struct block_device *bdev)
|
||||
{
|
||||
fsync_bdev(bdev);
|
||||
__invalidate_device(bdev, true);
|
||||
|
||||
/*
|
||||
* Unhash the bdev inode for this device so that it can't be looked
|
||||
* up any more even if openers still hold references to it.
|
||||
*/
|
||||
remove_inode_hash(bdev->bd_inode);
|
||||
}
|
||||
|
||||
/**
|
||||
* del_gendisk - remove the gendisk
|
||||
* @disk: the struct gendisk to remove
|
||||
|
@ -679,9 +601,6 @@ static void invalidate_partition(struct block_device *bdev)
|
|||
*/
|
||||
void del_gendisk(struct gendisk *disk)
|
||||
{
|
||||
struct disk_part_iter piter;
|
||||
struct block_device *part;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (WARN_ON_ONCE(!disk->queue))
|
||||
|
@ -696,15 +615,19 @@ void del_gendisk(struct gendisk *disk)
|
|||
*/
|
||||
down_write(&bdev_lookup_sem);
|
||||
|
||||
/* invalidate stuff */
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
|
||||
while ((part = disk_part_iter_next(&piter))) {
|
||||
invalidate_partition(part);
|
||||
delete_partition(part);
|
||||
}
|
||||
disk_part_iter_exit(&piter);
|
||||
mutex_lock(&disk->part0->bd_mutex);
|
||||
blk_drop_partitions(disk);
|
||||
mutex_unlock(&disk->part0->bd_mutex);
|
||||
|
||||
fsync_bdev(disk->part0);
|
||||
__invalidate_device(disk->part0, true);
|
||||
|
||||
/*
|
||||
* Unhash the bdev inode for this device so that it can't be looked
|
||||
* up any more even if openers still hold references to it.
|
||||
*/
|
||||
remove_inode_hash(disk->part0->bd_inode);
|
||||
|
||||
invalidate_partition(disk->part0);
|
||||
set_capacity(disk, 0);
|
||||
disk->flags &= ~GENHD_FL_UP;
|
||||
up_write(&bdev_lookup_sem);
|
||||
|
@ -817,10 +740,10 @@ void __init printk_all_partitions(void)
|
|||
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
|
||||
while ((dev = class_dev_iter_next(&iter))) {
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
struct disk_part_iter piter;
|
||||
struct block_device *part;
|
||||
char name_buf[BDEVNAME_SIZE];
|
||||
char devt_buf[BDEVT_SIZE];
|
||||
unsigned long idx;
|
||||
|
||||
/*
|
||||
* Don't show empty devices or things that have been
|
||||
|
@ -831,30 +754,29 @@ void __init printk_all_partitions(void)
|
|||
continue;
|
||||
|
||||
/*
|
||||
* Note, unlike /proc/partitions, I am showing the
|
||||
* numbers in hex - the same format as the root=
|
||||
* option takes.
|
||||
* Note, unlike /proc/partitions, I am showing the numbers in
|
||||
* hex - the same format as the root= option takes.
|
||||
*/
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
|
||||
while ((part = disk_part_iter_next(&piter))) {
|
||||
bool is_part0 = part == disk->part0;
|
||||
|
||||
printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
|
||||
rcu_read_lock();
|
||||
xa_for_each(&disk->part_tbl, idx, part) {
|
||||
if (!bdev_nr_sectors(part))
|
||||
continue;
|
||||
printk("%s%s %10llu %s %s",
|
||||
bdev_is_partition(part) ? " " : "",
|
||||
bdevt_str(part->bd_dev, devt_buf),
|
||||
bdev_nr_sectors(part) >> 1,
|
||||
disk_name(disk, part->bd_partno, name_buf),
|
||||
part->bd_meta_info ?
|
||||
part->bd_meta_info->uuid : "");
|
||||
if (is_part0) {
|
||||
if (dev->parent && dev->parent->driver)
|
||||
printk(" driver: %s\n",
|
||||
dev->parent->driver->name);
|
||||
else
|
||||
printk(" (driver?)\n");
|
||||
} else
|
||||
if (bdev_is_partition(part))
|
||||
printk("\n");
|
||||
else if (dev->parent && dev->parent->driver)
|
||||
printk(" driver: %s\n",
|
||||
dev->parent->driver->name);
|
||||
else
|
||||
printk(" (driver?)\n");
|
||||
}
|
||||
disk_part_iter_exit(&piter);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
class_dev_iter_exit(&iter);
|
||||
}
|
||||
|
@ -919,8 +841,8 @@ static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
|
|||
static int show_partition(struct seq_file *seqf, void *v)
|
||||
{
|
||||
struct gendisk *sgp = v;
|
||||
struct disk_part_iter piter;
|
||||
struct block_device *part;
|
||||
unsigned long idx;
|
||||
char buf[BDEVNAME_SIZE];
|
||||
|
||||
/* Don't show non-partitionable removeable devices or empty devices */
|
||||
|
@ -930,15 +852,16 @@ static int show_partition(struct seq_file *seqf, void *v)
|
|||
if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
|
||||
return 0;
|
||||
|
||||
/* show the full disk and all non-0 size partitions of it */
|
||||
disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
|
||||
while ((part = disk_part_iter_next(&piter)))
|
||||
rcu_read_lock();
|
||||
xa_for_each(&sgp->part_tbl, idx, part) {
|
||||
if (!bdev_nr_sectors(part))
|
||||
continue;
|
||||
seq_printf(seqf, "%4d %7d %10llu %s\n",
|
||||
MAJOR(part->bd_dev), MINOR(part->bd_dev),
|
||||
bdev_nr_sectors(part) >> 1,
|
||||
disk_name(sgp, part->bd_partno, buf));
|
||||
disk_part_iter_exit(&piter);
|
||||
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1247,11 +1170,11 @@ const struct device_type disk_type = {
|
|||
static int diskstats_show(struct seq_file *seqf, void *v)
|
||||
{
|
||||
struct gendisk *gp = v;
|
||||
struct disk_part_iter piter;
|
||||
struct block_device *hd;
|
||||
char buf[BDEVNAME_SIZE];
|
||||
unsigned int inflight;
|
||||
struct disk_stats stat;
|
||||
unsigned long idx;
|
||||
|
||||
/*
|
||||
if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
|
||||
|
@ -1261,8 +1184,10 @@ static int diskstats_show(struct seq_file *seqf, void *v)
|
|||
"\n\n");
|
||||
*/
|
||||
|
||||
disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
|
||||
while ((hd = disk_part_iter_next(&piter))) {
|
||||
rcu_read_lock();
|
||||
xa_for_each(&gp->part_tbl, idx, hd) {
|
||||
if (bdev_is_partition(hd) && !bdev_nr_sectors(hd))
|
||||
continue;
|
||||
part_stat_read_all(hd, &stat);
|
||||
if (queue_is_mq(gp->queue))
|
||||
inflight = blk_mq_in_flight(gp->queue, hd);
|
||||
|
@ -1305,7 +1230,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
|
|||
NSEC_PER_MSEC)
|
||||
);
|
||||
}
|
||||
disk_part_iter_exit(&piter);
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -119,11 +119,17 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
|
|||
pgrp = task_pgrp(current);
|
||||
else
|
||||
pgrp = find_vpid(who);
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
|
||||
ret = set_task_ioprio(p, ioprio);
|
||||
if (ret)
|
||||
break;
|
||||
if (ret) {
|
||||
read_unlock(&tasklist_lock);
|
||||
goto out;
|
||||
}
|
||||
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
break;
|
||||
case IOPRIO_WHO_USER:
|
||||
uid = make_kuid(current_user_ns(), who);
|
||||
|
@ -153,6 +159,7 @@ free_uid:
|
|||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -500,11 +500,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
|
||||
trace_block_rq_insert(rq);
|
||||
|
||||
if (at_head || blk_rq_is_passthrough(rq)) {
|
||||
if (at_head)
|
||||
list_add(&rq->queuelist, &dd->dispatch);
|
||||
else
|
||||
list_add_tail(&rq->queuelist, &dd->dispatch);
|
||||
if (at_head) {
|
||||
list_add(&rq->queuelist, &dd->dispatch);
|
||||
} else {
|
||||
deadline_add_rq_rb(dd, rq);
|
||||
|
||||
|
|
|
@ -285,8 +285,11 @@ struct device_type part_type = {
|
|||
* Must be called either with bd_mutex held, before a disk can be opened or
|
||||
* after all disk users are gone.
|
||||
*/
|
||||
void delete_partition(struct block_device *part)
|
||||
static void delete_partition(struct block_device *part)
|
||||
{
|
||||
fsync_bdev(part);
|
||||
__invalidate_device(part, true);
|
||||
|
||||
xa_erase(&part->bd_disk->part_tbl, part->bd_partno);
|
||||
kobject_put(part->bd_holder_dir);
|
||||
device_del(&part->bd_device);
|
||||
|
@ -424,21 +427,21 @@ out_put:
|
|||
static bool partition_overlaps(struct gendisk *disk, sector_t start,
|
||||
sector_t length, int skip_partno)
|
||||
{
|
||||
struct disk_part_iter piter;
|
||||
struct block_device *part;
|
||||
bool overlap = false;
|
||||
unsigned long idx;
|
||||
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
|
||||
while ((part = disk_part_iter_next(&piter))) {
|
||||
if (part->bd_partno == skip_partno ||
|
||||
start >= part->bd_start_sect + bdev_nr_sectors(part) ||
|
||||
start + length <= part->bd_start_sect)
|
||||
continue;
|
||||
overlap = true;
|
||||
break;
|
||||
rcu_read_lock();
|
||||
xa_for_each_start(&disk->part_tbl, idx, part, 1) {
|
||||
if (part->bd_partno != skip_partno &&
|
||||
start < part->bd_start_sect + bdev_nr_sectors(part) &&
|
||||
start + length > part->bd_start_sect) {
|
||||
overlap = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
disk_part_iter_exit(&piter);
|
||||
return overlap;
|
||||
}
|
||||
|
||||
|
@ -475,9 +478,6 @@ int bdev_del_partition(struct block_device *bdev, int partno)
|
|||
if (part->bd_openers)
|
||||
goto out_unlock;
|
||||
|
||||
sync_blockdev(part);
|
||||
invalidate_bdev(part);
|
||||
|
||||
delete_partition(part);
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
|
@ -533,28 +533,20 @@ static bool disk_unlock_native_capacity(struct gendisk *disk)
|
|||
}
|
||||
}
|
||||
|
||||
int blk_drop_partitions(struct block_device *bdev)
|
||||
void blk_drop_partitions(struct gendisk *disk)
|
||||
{
|
||||
struct disk_part_iter piter;
|
||||
struct block_device *part;
|
||||
unsigned long idx;
|
||||
|
||||
if (bdev->bd_part_count)
|
||||
return -EBUSY;
|
||||
lockdep_assert_held(&disk->part0->bd_mutex);
|
||||
|
||||
sync_blockdev(bdev);
|
||||
invalidate_bdev(bdev);
|
||||
|
||||
disk_part_iter_init(&piter, bdev->bd_disk, DISK_PITER_INCL_EMPTY);
|
||||
while ((part = disk_part_iter_next(&piter)))
|
||||
xa_for_each_start(&disk->part_tbl, idx, part, 1) {
|
||||
if (!bdgrab(part))
|
||||
continue;
|
||||
delete_partition(part);
|
||||
disk_part_iter_exit(&piter);
|
||||
|
||||
return 0;
|
||||
bdput(part);
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_S390
|
||||
/* for historic reasons in the DASD driver */
|
||||
EXPORT_SYMBOL_GPL(blk_drop_partitions);
|
||||
#endif
|
||||
|
||||
static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
|
||||
struct parsed_partitions *state, int p)
|
||||
|
|
|
@ -353,10 +353,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
|
|||
|
||||
start_time = jiffies;
|
||||
|
||||
/* ignore return value. All information is passed back to caller
|
||||
* (if he doesn't check that is his problem).
|
||||
* N.B. a non-zero SCSI status is _not_ necessarily an error.
|
||||
*/
|
||||
blk_execute_rq(bd_disk, rq, at_head);
|
||||
|
||||
hdr->duration = jiffies_to_msecs(jiffies - start_time);
|
||||
|
@ -431,7 +427,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|||
|
||||
bytes = max(in_len, out_len);
|
||||
if (bytes) {
|
||||
buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
|
||||
buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1043,8 +1043,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
|
|||
blk_queue_max_segments(q, queue_max_segments(q) - 1);
|
||||
|
||||
sdev->dma_drain_len = ATAPI_MAX_DRAIN;
|
||||
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len,
|
||||
q->bounce_gfp | GFP_KERNEL);
|
||||
sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO);
|
||||
if (!sdev->dma_drain_buf) {
|
||||
ata_dev_err(dev, "drain buffer allocation failed\n");
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -1199,6 +1199,42 @@ try_next_bio:
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_list_copy_data - copy contents of data buffers from one chain of bios to
|
||||
* another
|
||||
* @src: source bio list
|
||||
* @dst: destination bio list
|
||||
*
|
||||
* Stops when it reaches the end of either the @src list or @dst list - that is,
|
||||
* copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
|
||||
* bios).
|
||||
*/
|
||||
static void bio_list_copy_data(struct bio *dst, struct bio *src)
|
||||
{
|
||||
struct bvec_iter src_iter = src->bi_iter;
|
||||
struct bvec_iter dst_iter = dst->bi_iter;
|
||||
|
||||
while (1) {
|
||||
if (!src_iter.bi_size) {
|
||||
src = src->bi_next;
|
||||
if (!src)
|
||||
break;
|
||||
|
||||
src_iter = src->bi_iter;
|
||||
}
|
||||
|
||||
if (!dst_iter.bi_size) {
|
||||
dst = dst->bi_next;
|
||||
if (!dst)
|
||||
break;
|
||||
|
||||
dst_iter = dst->bi_iter;
|
||||
}
|
||||
|
||||
bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Assemble a bio to write one packet and queue the bio for processing
|
||||
* by the underlying block device.
|
||||
|
|
|
@ -660,7 +660,7 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q,
|
|||
rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
|
||||
|
||||
if (rqd->bio)
|
||||
blk_rq_append_bio(rq, &rqd->bio);
|
||||
blk_rq_append_bio(rq, rqd->bio);
|
||||
else
|
||||
rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
|
||||
|
||||
|
|
|
@ -146,12 +146,11 @@ void dasd_destroy_partitions(struct dasd_block *block)
|
|||
block->bdev = NULL;
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
blk_drop_partitions(bdev);
|
||||
bdev_disk_changed(bdev, true);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
|
||||
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
|
||||
blkdev_put(bdev, FMODE_READ);
|
||||
set_capacity(block->gdp, 0);
|
||||
}
|
||||
|
||||
int dasd_gendisk_init(void)
|
||||
|
|
|
@ -561,60 +561,6 @@ done:
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
blogic_add_probeaddr_isa appends a single ISA I/O Address to the list
|
||||
of I/O Address and Bus Probe Information to be checked for potential BusLogic
|
||||
Host Adapters.
|
||||
*/
|
||||
|
||||
static void __init blogic_add_probeaddr_isa(unsigned long io_addr)
|
||||
{
|
||||
struct blogic_probeinfo *probeinfo;
|
||||
if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
|
||||
return;
|
||||
probeinfo = &blogic_probeinfo_list[blogic_probeinfo_count++];
|
||||
probeinfo->adapter_type = BLOGIC_MULTIMASTER;
|
||||
probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
|
||||
probeinfo->io_addr = io_addr;
|
||||
probeinfo->pci_device = NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
blogic_init_probeinfo_isa initializes the list of I/O Address and
|
||||
Bus Probe Information to be checked for potential BusLogic SCSI Host Adapters
|
||||
only from the list of standard BusLogic MultiMaster ISA I/O Addresses.
|
||||
*/
|
||||
|
||||
static void __init blogic_init_probeinfo_isa(struct blogic_adapter *adapter)
|
||||
{
|
||||
/*
|
||||
If BusLogic Driver Options specifications requested that ISA
|
||||
Bus Probes be inhibited, do not proceed further.
|
||||
*/
|
||||
if (blogic_probe_options.noprobe_isa)
|
||||
return;
|
||||
/*
|
||||
Append the list of standard BusLogic MultiMaster ISA I/O Addresses.
|
||||
*/
|
||||
if (!blogic_probe_options.limited_isa || blogic_probe_options.probe330)
|
||||
blogic_add_probeaddr_isa(0x330);
|
||||
if (!blogic_probe_options.limited_isa || blogic_probe_options.probe334)
|
||||
blogic_add_probeaddr_isa(0x334);
|
||||
if (!blogic_probe_options.limited_isa || blogic_probe_options.probe230)
|
||||
blogic_add_probeaddr_isa(0x230);
|
||||
if (!blogic_probe_options.limited_isa || blogic_probe_options.probe234)
|
||||
blogic_add_probeaddr_isa(0x234);
|
||||
if (!blogic_probe_options.limited_isa || blogic_probe_options.probe130)
|
||||
blogic_add_probeaddr_isa(0x130);
|
||||
if (!blogic_probe_options.limited_isa || blogic_probe_options.probe134)
|
||||
blogic_add_probeaddr_isa(0x134);
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
|
||||
|
||||
/*
|
||||
blogic_sort_probeinfo sorts a section of blogic_probeinfo_list in order
|
||||
of increasing PCI Bus and Device Number.
|
||||
|
@ -667,14 +613,11 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
|
|||
int nonpr_mmcount = 0, mmcount = 0;
|
||||
bool force_scan_order = false;
|
||||
bool force_scan_order_checked = false;
|
||||
bool addr_seen[6];
|
||||
struct pci_dev *pci_device = NULL;
|
||||
int i;
|
||||
if (blogic_probeinfo_count >= BLOGIC_MAX_ADAPTERS)
|
||||
return 0;
|
||||
blogic_probeinfo_count++;
|
||||
for (i = 0; i < 6; i++)
|
||||
addr_seen[i] = false;
|
||||
/*
|
||||
Iterate over the MultiMaster PCI Host Adapters. For each
|
||||
enumerated host adapter, determine whether its ISA Compatible
|
||||
|
@ -744,11 +687,8 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
|
|||
host_adapter->io_addr = io_addr;
|
||||
blogic_intreset(host_adapter);
|
||||
if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
|
||||
&adapter_info, sizeof(adapter_info)) ==
|
||||
sizeof(adapter_info)) {
|
||||
if (adapter_info.isa_port < 6)
|
||||
addr_seen[adapter_info.isa_port] = true;
|
||||
} else
|
||||
&adapter_info, sizeof(adapter_info)) !=
|
||||
sizeof(adapter_info))
|
||||
adapter_info.isa_port = BLOGIC_IO_DISABLE;
|
||||
/*
|
||||
Issue the Modify I/O Address command to disable the
|
||||
|
@ -835,45 +775,6 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
|
|||
if (force_scan_order)
|
||||
blogic_sort_probeinfo(&blogic_probeinfo_list[nonpr_mmindex],
|
||||
nonpr_mmcount);
|
||||
/*
|
||||
If no PCI MultiMaster Host Adapter is assigned the Primary
|
||||
I/O Address, then the Primary I/O Address must be probed
|
||||
explicitly before any PCI host adapters are probed.
|
||||
*/
|
||||
if (!blogic_probe_options.noprobe_isa)
|
||||
if (pr_probeinfo->io_addr == 0 &&
|
||||
(!blogic_probe_options.limited_isa ||
|
||||
blogic_probe_options.probe330)) {
|
||||
pr_probeinfo->adapter_type = BLOGIC_MULTIMASTER;
|
||||
pr_probeinfo->adapter_bus_type = BLOGIC_ISA_BUS;
|
||||
pr_probeinfo->io_addr = 0x330;
|
||||
}
|
||||
/*
|
||||
Append the list of standard BusLogic MultiMaster ISA I/O Addresses,
|
||||
omitting the Primary I/O Address which has already been handled.
|
||||
*/
|
||||
if (!blogic_probe_options.noprobe_isa) {
|
||||
if (!addr_seen[1] &&
|
||||
(!blogic_probe_options.limited_isa ||
|
||||
blogic_probe_options.probe334))
|
||||
blogic_add_probeaddr_isa(0x334);
|
||||
if (!addr_seen[2] &&
|
||||
(!blogic_probe_options.limited_isa ||
|
||||
blogic_probe_options.probe230))
|
||||
blogic_add_probeaddr_isa(0x230);
|
||||
if (!addr_seen[3] &&
|
||||
(!blogic_probe_options.limited_isa ||
|
||||
blogic_probe_options.probe234))
|
||||
blogic_add_probeaddr_isa(0x234);
|
||||
if (!addr_seen[4] &&
|
||||
(!blogic_probe_options.limited_isa ||
|
||||
blogic_probe_options.probe130))
|
||||
blogic_add_probeaddr_isa(0x130);
|
||||
if (!addr_seen[5] &&
|
||||
(!blogic_probe_options.limited_isa ||
|
||||
blogic_probe_options.probe134))
|
||||
blogic_add_probeaddr_isa(0x134);
|
||||
}
|
||||
/*
|
||||
Iterate over the older non-compliant MultiMaster PCI Host Adapters,
|
||||
noting the PCI bus location and assigned IRQ Channel.
|
||||
|
@ -1078,18 +979,10 @@ static void __init blogic_init_probeinfo_list(struct blogic_adapter *adapter)
|
|||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
blogic_init_probeinfo_isa(adapter);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#else
|
||||
#define blogic_init_probeinfo_list(adapter) \
|
||||
blogic_init_probeinfo_isa(adapter)
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
|
||||
/*
|
||||
blogic_failure prints a standardized error message, and then returns false.
|
||||
*/
|
||||
|
@ -1539,14 +1432,6 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
|
|||
else if (config.irq_ch15)
|
||||
adapter->irq_ch = 15;
|
||||
}
|
||||
if (adapter->adapter_bus_type == BLOGIC_ISA_BUS) {
|
||||
if (config.dma_ch5)
|
||||
adapter->dma_ch = 5;
|
||||
else if (config.dma_ch6)
|
||||
adapter->dma_ch = 6;
|
||||
else if (config.dma_ch7)
|
||||
adapter->dma_ch = 7;
|
||||
}
|
||||
/*
|
||||
Determine whether Extended Translation is enabled and save it in
|
||||
the Host Adapter structure.
|
||||
|
@ -1686,8 +1571,7 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
|
|||
if (adapter->fw_ver[0] == '5')
|
||||
adapter->adapter_qdepth = 192;
|
||||
else if (adapter->fw_ver[0] == '4')
|
||||
adapter->adapter_qdepth = (adapter->adapter_bus_type !=
|
||||
BLOGIC_ISA_BUS ? 100 : 50);
|
||||
adapter->adapter_qdepth = 100;
|
||||
else
|
||||
adapter->adapter_qdepth = 30;
|
||||
if (strcmp(adapter->fw_ver, "3.31") >= 0) {
|
||||
|
@ -1727,26 +1611,17 @@ static bool __init blogic_rdconfig(struct blogic_adapter *adapter)
|
|||
bios_addr is 0.
|
||||
*/
|
||||
adapter->bios_addr = ext_setupinfo.bios_addr << 12;
|
||||
/*
|
||||
ISA Host Adapters require Bounce Buffers if there is more than
|
||||
16MB memory.
|
||||
*/
|
||||
if (adapter->adapter_bus_type == BLOGIC_ISA_BUS &&
|
||||
(void *) high_memory > (void *) MAX_DMA_ADDRESS)
|
||||
adapter->need_bouncebuf = true;
|
||||
/*
|
||||
BusLogic BT-445S Host Adapters prior to board revision E have a
|
||||
hardware bug whereby when the BIOS is enabled, transfers to/from
|
||||
the same address range the BIOS occupies modulo 16MB are handled
|
||||
incorrectly. Only properly functioning BT-445S Host Adapters
|
||||
have firmware version 3.37, so require that ISA Bounce Buffers
|
||||
be used for the buggy BT-445S models if there is more than 16MB
|
||||
memory.
|
||||
have firmware version 3.37.
|
||||
*/
|
||||
if (adapter->bios_addr > 0 && strcmp(adapter->model, "BT-445S") == 0 &&
|
||||
strcmp(adapter->fw_ver, "3.37") < 0 &&
|
||||
(void *) high_memory > (void *) MAX_DMA_ADDRESS)
|
||||
adapter->need_bouncebuf = true;
|
||||
if (adapter->bios_addr > 0 &&
|
||||
strcmp(adapter->model, "BT-445S") == 0 &&
|
||||
strcmp(adapter->fw_ver, "3.37") < 0)
|
||||
return blogic_failure(adapter, "Too old firmware");
|
||||
/*
|
||||
Initialize parameters common to MultiMaster and FlashPoint
|
||||
Host Adapters.
|
||||
|
@ -1769,14 +1644,9 @@ common:
|
|||
if (adapter->drvr_opts != NULL &&
|
||||
adapter->drvr_opts->qdepth[tgt_id] > 0)
|
||||
qdepth = adapter->drvr_opts->qdepth[tgt_id];
|
||||
else if (adapter->need_bouncebuf)
|
||||
qdepth = BLOGIC_TAG_DEPTH_BB;
|
||||
adapter->qdepth[tgt_id] = qdepth;
|
||||
}
|
||||
if (adapter->need_bouncebuf)
|
||||
adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH_BB;
|
||||
else
|
||||
adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
|
||||
adapter->untag_qdepth = BLOGIC_UNTAG_DEPTH;
|
||||
if (adapter->drvr_opts != NULL)
|
||||
adapter->common_qdepth = adapter->drvr_opts->common_qdepth;
|
||||
if (adapter->common_qdepth > 0 &&
|
||||
|
@ -1839,11 +1709,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter)
|
|||
blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : ""));
|
||||
blogic_info(" Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge"));
|
||||
if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) {
|
||||
blogic_info(" DMA Channel: ", adapter);
|
||||
if (adapter->dma_ch > 0)
|
||||
blogic_info("%d, ", adapter, adapter->dma_ch);
|
||||
else
|
||||
blogic_info("None, ", adapter);
|
||||
blogic_info(" DMA Channel: None, ", adapter);
|
||||
if (adapter->bios_addr > 0)
|
||||
blogic_info("BIOS Address: 0x%lX, ", adapter,
|
||||
adapter->bios_addr);
|
||||
|
@ -1995,18 +1861,6 @@ static bool __init blogic_getres(struct blogic_adapter *adapter)
|
|||
return false;
|
||||
}
|
||||
adapter->irq_acquired = true;
|
||||
/*
|
||||
Acquire exclusive access to the DMA Channel.
|
||||
*/
|
||||
if (adapter->dma_ch > 0) {
|
||||
if (request_dma(adapter->dma_ch, adapter->full_model) < 0) {
|
||||
blogic_err("UNABLE TO ACQUIRE DMA CHANNEL %d - DETACHING\n", adapter, adapter->dma_ch);
|
||||
return false;
|
||||
}
|
||||
set_dma_mode(adapter->dma_ch, DMA_MODE_CASCADE);
|
||||
enable_dma(adapter->dma_ch);
|
||||
adapter->dma_chan_acquired = true;
|
||||
}
|
||||
/*
|
||||
Indicate the System Resource Acquisition completed successfully,
|
||||
*/
|
||||
|
@ -2026,11 +1880,6 @@ static void blogic_relres(struct blogic_adapter *adapter)
|
|||
*/
|
||||
if (adapter->irq_acquired)
|
||||
free_irq(adapter->irq_ch, adapter);
|
||||
/*
|
||||
Release exclusive access to the DMA Channel.
|
||||
*/
|
||||
if (adapter->dma_chan_acquired)
|
||||
free_dma(adapter->dma_ch);
|
||||
/*
|
||||
Release any allocated memory structs not released elsewhere
|
||||
*/
|
||||
|
@ -2299,7 +2148,6 @@ static void __init blogic_inithoststruct(struct blogic_adapter *adapter,
|
|||
host->this_id = adapter->scsi_id;
|
||||
host->can_queue = adapter->drvr_qdepth;
|
||||
host->sg_tablesize = adapter->drvr_sglimit;
|
||||
host->unchecked_isa_dma = adapter->need_bouncebuf;
|
||||
host->cmd_per_lun = adapter->untag_qdepth;
|
||||
}
|
||||
|
||||
|
@ -3666,37 +3514,7 @@ static int __init blogic_parseopts(char *options)
|
|||
|
||||
memset(drvr_opts, 0, sizeof(struct blogic_drvr_options));
|
||||
while (*options != '\0' && *options != ';') {
|
||||
/* Probing Options. */
|
||||
if (blogic_parse(&options, "IO:")) {
|
||||
unsigned long io_addr = simple_strtoul(options,
|
||||
&options, 0);
|
||||
blogic_probe_options.limited_isa = true;
|
||||
switch (io_addr) {
|
||||
case 0x330:
|
||||
blogic_probe_options.probe330 = true;
|
||||
break;
|
||||
case 0x334:
|
||||
blogic_probe_options.probe334 = true;
|
||||
break;
|
||||
case 0x230:
|
||||
blogic_probe_options.probe230 = true;
|
||||
break;
|
||||
case 0x234:
|
||||
blogic_probe_options.probe234 = true;
|
||||
break;
|
||||
case 0x130:
|
||||
blogic_probe_options.probe130 = true;
|
||||
break;
|
||||
case 0x134:
|
||||
blogic_probe_options.probe134 = true;
|
||||
break;
|
||||
default:
|
||||
blogic_err("BusLogic: Invalid Driver Options (invalid I/O Address 0x%lX)\n", NULL, io_addr);
|
||||
return 0;
|
||||
}
|
||||
} else if (blogic_parse(&options, "NoProbeISA"))
|
||||
blogic_probe_options.noprobe_isa = true;
|
||||
else if (blogic_parse(&options, "NoProbePCI"))
|
||||
if (blogic_parse(&options, "NoProbePCI"))
|
||||
blogic_probe_options.noprobe_pci = true;
|
||||
else if (blogic_parse(&options, "NoProbe"))
|
||||
blogic_probe_options.noprobe = true;
|
||||
|
@ -3851,7 +3669,6 @@ static struct scsi_host_template blogic_template = {
|
|||
#if 0
|
||||
.eh_abort_handler = blogic_abort,
|
||||
#endif
|
||||
.unchecked_isa_dma = 1,
|
||||
.max_sectors = 128,
|
||||
};
|
||||
|
||||
|
|
|
@ -237,18 +237,10 @@ struct blogic_probeinfo {
|
|||
|
||||
struct blogic_probe_options {
|
||||
bool noprobe:1; /* Bit 0 */
|
||||
bool noprobe_isa:1; /* Bit 1 */
|
||||
bool noprobe_pci:1; /* Bit 2 */
|
||||
bool nosort_pci:1; /* Bit 3 */
|
||||
bool multimaster_first:1; /* Bit 4 */
|
||||
bool flashpoint_first:1; /* Bit 5 */
|
||||
bool limited_isa:1; /* Bit 6 */
|
||||
bool probe330:1; /* Bit 7 */
|
||||
bool probe334:1; /* Bit 8 */
|
||||
bool probe230:1; /* Bit 9 */
|
||||
bool probe234:1; /* Bit 10 */
|
||||
bool probe130:1; /* Bit 11 */
|
||||
bool probe134:1; /* Bit 12 */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -997,10 +989,8 @@ struct blogic_adapter {
|
|||
unsigned char bus;
|
||||
unsigned char dev;
|
||||
unsigned char irq_ch;
|
||||
unsigned char dma_ch;
|
||||
unsigned char scsi_id;
|
||||
bool irq_acquired:1;
|
||||
bool dma_chan_acquired:1;
|
||||
bool ext_trans_enable:1;
|
||||
bool parity:1;
|
||||
bool reset_enabled:1;
|
||||
|
@ -1013,7 +1003,6 @@ struct blogic_adapter {
|
|||
bool terminfo_valid:1;
|
||||
bool low_term:1;
|
||||
bool high_term:1;
|
||||
bool need_bouncebuf:1;
|
||||
bool strict_rr:1;
|
||||
bool scam_enabled:1;
|
||||
bool scam_lev2:1;
|
||||
|
|
|
@ -497,7 +497,7 @@ config SCSI_HPTIOP
|
|||
|
||||
config SCSI_BUSLOGIC
|
||||
tristate "BusLogic SCSI support"
|
||||
depends on (PCI || ISA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
|
||||
depends on PCI && SCSI && VIRT_TO_BUS
|
||||
help
|
||||
This is support for BusLogic MultiMaster and FlashPoint SCSI Host
|
||||
Adapters. Consult the SCSI-HOWTO, available from
|
||||
|
|
|
@ -84,8 +84,6 @@ typedef unsigned char uchar;
|
|||
|
||||
#define ASC_CS_TYPE unsigned short
|
||||
|
||||
#define ASC_IS_ISA (0x0001)
|
||||
#define ASC_IS_ISAPNP (0x0081)
|
||||
#define ASC_IS_EISA (0x0002)
|
||||
#define ASC_IS_PCI (0x0004)
|
||||
#define ASC_IS_PCI_ULTRA (0x0104)
|
||||
|
@ -101,11 +99,6 @@ typedef unsigned char uchar;
|
|||
#define ASC_CHIP_MIN_VER_PCI (0x09)
|
||||
#define ASC_CHIP_MAX_VER_PCI (0x0F)
|
||||
#define ASC_CHIP_VER_PCI_BIT (0x08)
|
||||
#define ASC_CHIP_MIN_VER_ISA (0x11)
|
||||
#define ASC_CHIP_MIN_VER_ISA_PNP (0x21)
|
||||
#define ASC_CHIP_MAX_VER_ISA (0x27)
|
||||
#define ASC_CHIP_VER_ISA_BIT (0x30)
|
||||
#define ASC_CHIP_VER_ISAPNP_BIT (0x20)
|
||||
#define ASC_CHIP_VER_ASYN_BUG (0x21)
|
||||
#define ASC_CHIP_VER_PCI 0x08
|
||||
#define ASC_CHIP_VER_PCI_ULTRA_3150 (ASC_CHIP_VER_PCI | 0x02)
|
||||
|
@ -116,7 +109,6 @@ typedef unsigned char uchar;
|
|||
#define ASC_CHIP_LATEST_VER_EISA ((ASC_CHIP_MIN_VER_EISA - 1) + 3)
|
||||
#define ASC_MAX_VL_DMA_COUNT (0x07FFFFFFL)
|
||||
#define ASC_MAX_PCI_DMA_COUNT (0xFFFFFFFFL)
|
||||
#define ASC_MAX_ISA_DMA_COUNT (0x00FFFFFFL)
|
||||
|
||||
#define ASC_SCSI_ID_BITS 3
|
||||
#define ASC_SCSI_TIX_TYPE uchar
|
||||
|
@ -194,7 +186,6 @@ typedef unsigned char uchar;
|
|||
#define ASC_FLAG_SRB_LINEAR_ADDR 0x08
|
||||
#define ASC_FLAG_WIN16 0x10
|
||||
#define ASC_FLAG_WIN32 0x20
|
||||
#define ASC_FLAG_ISA_OVER_16MB 0x40
|
||||
#define ASC_FLAG_DOS_VM_CALLBACK 0x80
|
||||
#define ASC_TAG_FLAG_EXTRA_BYTES 0x10
|
||||
#define ASC_TAG_FLAG_DISABLE_DISCONNECT 0x04
|
||||
|
@ -464,8 +455,6 @@ typedef struct asc_dvc_cfg {
|
|||
ASC_SCSI_BIT_ID_TYPE disc_enable;
|
||||
ASC_SCSI_BIT_ID_TYPE sdtr_enable;
|
||||
uchar chip_scsi_id;
|
||||
uchar isa_dma_speed;
|
||||
uchar isa_dma_channel;
|
||||
uchar chip_version;
|
||||
ushort mcode_date;
|
||||
ushort mcode_version;
|
||||
|
@ -572,10 +561,8 @@ typedef struct asc_cap_info_array {
|
|||
#define ASC_EEP_MAX_RETRY 20
|
||||
|
||||
/*
|
||||
* These macros keep the chip SCSI id and ISA DMA speed
|
||||
* bitfields in board order. C bitfields aren't portable
|
||||
* between big and little-endian platforms so they are
|
||||
* not used.
|
||||
* These macros keep the chip SCSI id bitfields in board order. C bitfields
|
||||
* aren't portable between big and little-endian platforms so they are not used.
|
||||
*/
|
||||
|
||||
#define ASC_EEP_GET_CHIP_ID(cfg) ((cfg)->id_speed & 0x0f)
|
||||
|
@ -2340,9 +2327,8 @@ static void asc_prt_asc_dvc_cfg(ASC_DVC_CFG *h)
|
|||
printk(" disc_enable 0x%x, sdtr_enable 0x%x,\n",
|
||||
h->disc_enable, h->sdtr_enable);
|
||||
|
||||
printk(" chip_scsi_id %d, isa_dma_speed %d, isa_dma_channel %d, "
|
||||
"chip_version %d,\n", h->chip_scsi_id, h->isa_dma_speed,
|
||||
h->isa_dma_channel, h->chip_version);
|
||||
printk(" chip_scsi_id %d, chip_version %d,\n",
|
||||
h->chip_scsi_id, h->chip_version);
|
||||
|
||||
printk(" mcode_date 0x%x, mcode_version %d\n",
|
||||
h->mcode_date, h->mcode_version);
|
||||
|
@ -2415,8 +2401,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
|
|||
printk(" dma_channel %d, this_id %d, can_queue %d,\n",
|
||||
s->dma_channel, s->this_id, s->can_queue);
|
||||
|
||||
printk(" cmd_per_lun %d, sg_tablesize %d, unchecked_isa_dma %d\n",
|
||||
s->cmd_per_lun, s->sg_tablesize, s->unchecked_isa_dma);
|
||||
printk(" cmd_per_lun %d, sg_tablesize %d\n",
|
||||
s->cmd_per_lun, s->sg_tablesize);
|
||||
|
||||
if (ASC_NARROW_BOARD(boardp)) {
|
||||
asc_prt_asc_dvc_var(&boardp->dvc_var.asc_dvc_var);
|
||||
|
@ -2632,42 +2618,28 @@ static const char *advansys_info(struct Scsi_Host *shost)
|
|||
if (ASC_NARROW_BOARD(boardp)) {
|
||||
asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
|
||||
ASC_DBG(1, "begin\n");
|
||||
if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
|
||||
if ((asc_dvc_varp->bus_type & ASC_IS_ISAPNP) ==
|
||||
ASC_IS_ISAPNP) {
|
||||
busname = "ISA PnP";
|
||||
|
||||
if (asc_dvc_varp->bus_type & ASC_IS_VL) {
|
||||
busname = "VL";
|
||||
} else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
|
||||
busname = "EISA";
|
||||
} else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
|
||||
if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
|
||||
== ASC_IS_PCI_ULTRA) {
|
||||
busname = "PCI Ultra";
|
||||
} else {
|
||||
busname = "ISA";
|
||||
busname = "PCI";
|
||||
}
|
||||
sprintf(info,
|
||||
"AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X, DMA 0x%X",
|
||||
ASC_VERSION, busname,
|
||||
(ulong)shost->io_port,
|
||||
(ulong)shost->io_port + ASC_IOADR_GAP - 1,
|
||||
boardp->irq, shost->dma_channel);
|
||||
} else {
|
||||
if (asc_dvc_varp->bus_type & ASC_IS_VL) {
|
||||
busname = "VL";
|
||||
} else if (asc_dvc_varp->bus_type & ASC_IS_EISA) {
|
||||
busname = "EISA";
|
||||
} else if (asc_dvc_varp->bus_type & ASC_IS_PCI) {
|
||||
if ((asc_dvc_varp->bus_type & ASC_IS_PCI_ULTRA)
|
||||
== ASC_IS_PCI_ULTRA) {
|
||||
busname = "PCI Ultra";
|
||||
} else {
|
||||
busname = "PCI";
|
||||
}
|
||||
} else {
|
||||
busname = "?";
|
||||
shost_printk(KERN_ERR, shost, "unknown bus "
|
||||
"type %d\n", asc_dvc_varp->bus_type);
|
||||
}
|
||||
sprintf(info,
|
||||
"AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X",
|
||||
ASC_VERSION, busname, (ulong)shost->io_port,
|
||||
(ulong)shost->io_port + ASC_IOADR_GAP - 1,
|
||||
boardp->irq);
|
||||
busname = "?";
|
||||
shost_printk(KERN_ERR, shost, "unknown bus "
|
||||
"type %d\n", asc_dvc_varp->bus_type);
|
||||
}
|
||||
sprintf(info,
|
||||
"AdvanSys SCSI %s: %s: IO 0x%lX-0x%lX, IRQ 0x%X",
|
||||
ASC_VERSION, busname, (ulong)shost->io_port,
|
||||
(ulong)shost->io_port + ASC_IOADR_GAP - 1,
|
||||
boardp->irq);
|
||||
} else {
|
||||
/*
|
||||
* Wide Adapter Information
|
||||
|
@ -2873,12 +2845,7 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
|
|||
ASCEEP_CONFIG *ep;
|
||||
int i;
|
||||
uchar serialstr[13];
|
||||
#ifdef CONFIG_ISA
|
||||
ASC_DVC_VAR *asc_dvc_varp;
|
||||
int isa_dma_speed[] = { 10, 8, 7, 6, 5, 4, 3, 2 };
|
||||
|
||||
asc_dvc_varp = &boardp->dvc_var.asc_dvc_var;
|
||||
#endif /* CONFIG_ISA */
|
||||
ep = &boardp->eep_config.asc_eep;
|
||||
|
||||
seq_printf(m,
|
||||
|
@ -2926,14 +2893,6 @@ static void asc_prt_asc_board_eeprom(struct seq_file *m, struct Scsi_Host *shost
|
|||
seq_printf(m, " %c",
|
||||
(ep->init_sdtr & ADV_TID_TO_TIDMASK(i)) ? 'Y' : 'N');
|
||||
seq_putc(m, '\n');
|
||||
|
||||
#ifdef CONFIG_ISA
|
||||
if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
|
||||
seq_printf(m,
|
||||
" Host ISA DMA speed: %d MB/S\n",
|
||||
isa_dma_speed[ASC_EEP_GET_DMA_SPD(ep)]);
|
||||
}
|
||||
#endif /* CONFIG_ISA */
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3180,10 +3139,6 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
|
|||
shost->unique_id, shost->can_queue, shost->this_id,
|
||||
shost->sg_tablesize, shost->cmd_per_lun);
|
||||
|
||||
seq_printf(m,
|
||||
" unchecked_isa_dma %d\n",
|
||||
shost->unchecked_isa_dma);
|
||||
|
||||
seq_printf(m,
|
||||
" flags 0x%x, last_reset 0x%lx, jiffies 0x%lx, asc_n_io_port 0x%x\n",
|
||||
boardp->flags, shost->last_reset, jiffies,
|
||||
|
@ -8563,12 +8518,6 @@ static unsigned short AscGetChipBiosAddress(PortAddr iop_base,
|
|||
}
|
||||
|
||||
cfg_lsw = AscGetChipCfgLsw(iop_base);
|
||||
|
||||
/*
|
||||
* ISA PnP uses the top bit as the 32K BIOS flag
|
||||
*/
|
||||
if (bus_type == ASC_IS_ISAPNP)
|
||||
cfg_lsw &= 0x7FFF;
|
||||
bios_addr = ASC_BIOS_MIN_ADDR + (cfg_lsw >> 12) * ASC_BIOS_BANK_SIZE;
|
||||
return bios_addr;
|
||||
}
|
||||
|
@ -8611,19 +8560,6 @@ static unsigned char AscGetChipVersion(PortAddr iop_base,
|
|||
return AscGetChipVerNo(iop_base);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ISA
|
||||
static void AscEnableIsaDma(uchar dma_channel)
|
||||
{
|
||||
if (dma_channel < 4) {
|
||||
outp(0x000B, (ushort)(0xC0 | dma_channel));
|
||||
outp(0x000A, dma_channel);
|
||||
} else if (dma_channel < 8) {
|
||||
outp(0x00D6, (ushort)(0xC0 | (dma_channel - 4)));
|
||||
outp(0x00D4, (ushort)(dma_channel - 4));
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_ISA */
|
||||
|
||||
static int AscStopQueueExe(PortAddr iop_base)
|
||||
{
|
||||
int count = 0;
|
||||
|
@ -8644,65 +8580,11 @@ static int AscStopQueueExe(PortAddr iop_base)
|
|||
|
||||
static unsigned int AscGetMaxDmaCount(ushort bus_type)
|
||||
{
|
||||
if (bus_type & ASC_IS_ISA)
|
||||
return ASC_MAX_ISA_DMA_COUNT;
|
||||
else if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
|
||||
if (bus_type & (ASC_IS_EISA | ASC_IS_VL))
|
||||
return ASC_MAX_VL_DMA_COUNT;
|
||||
return ASC_MAX_PCI_DMA_COUNT;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ISA
|
||||
static ushort AscGetIsaDmaChannel(PortAddr iop_base)
|
||||
{
|
||||
ushort channel;
|
||||
|
||||
channel = AscGetChipCfgLsw(iop_base) & 0x0003;
|
||||
if (channel == 0x03)
|
||||
return (0);
|
||||
else if (channel == 0x00)
|
||||
return (7);
|
||||
return (channel + 4);
|
||||
}
|
||||
|
||||
static ushort AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel)
|
||||
{
|
||||
ushort cfg_lsw;
|
||||
uchar value;
|
||||
|
||||
if ((dma_channel >= 5) && (dma_channel <= 7)) {
|
||||
if (dma_channel == 7)
|
||||
value = 0x00;
|
||||
else
|
||||
value = dma_channel - 4;
|
||||
cfg_lsw = AscGetChipCfgLsw(iop_base) & 0xFFFC;
|
||||
cfg_lsw |= value;
|
||||
AscSetChipCfgLsw(iop_base, cfg_lsw);
|
||||
return (AscGetIsaDmaChannel(iop_base));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uchar AscGetIsaDmaSpeed(PortAddr iop_base)
|
||||
{
|
||||
uchar speed_value;
|
||||
|
||||
AscSetBank(iop_base, 1);
|
||||
speed_value = AscReadChipDmaSpeed(iop_base);
|
||||
speed_value &= 0x07;
|
||||
AscSetBank(iop_base, 0);
|
||||
return speed_value;
|
||||
}
|
||||
|
||||
static uchar AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
|
||||
{
|
||||
speed_value &= 0x07;
|
||||
AscSetBank(iop_base, 1);
|
||||
AscWriteChipDmaSpeed(iop_base, speed_value);
|
||||
AscSetBank(iop_base, 0);
|
||||
return AscGetIsaDmaSpeed(iop_base);
|
||||
}
|
||||
#endif /* CONFIG_ISA */
|
||||
|
||||
static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
|
||||
{
|
||||
int i;
|
||||
|
@ -8712,7 +8594,7 @@ static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
|
|||
iop_base = asc_dvc->iop_base;
|
||||
asc_dvc->err_code = 0;
|
||||
if ((asc_dvc->bus_type &
|
||||
(ASC_IS_ISA | ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
|
||||
(ASC_IS_PCI | ASC_IS_EISA | ASC_IS_VL)) == 0) {
|
||||
asc_dvc->err_code |= ASC_IERR_NO_BUS_TYPE;
|
||||
}
|
||||
AscSetChipControl(iop_base, CC_HALT);
|
||||
|
@ -8767,17 +8649,6 @@ static void AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
|
|||
(SEC_ACTIVE_NEGATE | SEC_SLEW_RATE));
|
||||
}
|
||||
|
||||
asc_dvc->cfg->isa_dma_speed = ASC_DEF_ISA_DMA_SPEED;
|
||||
#ifdef CONFIG_ISA
|
||||
if ((asc_dvc->bus_type & ASC_IS_ISA) != 0) {
|
||||
if (chip_version >= ASC_CHIP_MIN_VER_ISA_PNP) {
|
||||
AscSetChipIFC(iop_base, IFC_INIT_DEFAULT);
|
||||
asc_dvc->bus_type = ASC_IS_ISAPNP;
|
||||
}
|
||||
asc_dvc->cfg->isa_dma_channel =
|
||||
(uchar)AscGetIsaDmaChannel(iop_base);
|
||||
}
|
||||
#endif /* CONFIG_ISA */
|
||||
for (i = 0; i <= ASC_MAX_TID; i++) {
|
||||
asc_dvc->cur_dvc_qng[i] = 0;
|
||||
asc_dvc->max_dvc_qng[i] = ASC_MAX_SCSI1_QNG;
|
||||
|
@ -9141,7 +9012,6 @@ static int AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
|
|||
asc_dvc->cfg->sdtr_enable = eep_config->init_sdtr;
|
||||
asc_dvc->cfg->disc_enable = eep_config->disc_enable;
|
||||
asc_dvc->cfg->cmd_qng_enabled = eep_config->use_cmd_qng;
|
||||
asc_dvc->cfg->isa_dma_speed = ASC_EEP_GET_DMA_SPD(eep_config);
|
||||
asc_dvc->start_motor = eep_config->start_motor;
|
||||
asc_dvc->dvc_cntl = eep_config->cntl;
|
||||
asc_dvc->no_scam = eep_config->no_scam;
|
||||
|
@ -9314,22 +9184,10 @@ static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
|
|||
}
|
||||
} else
|
||||
#endif /* CONFIG_PCI */
|
||||
if (asc_dvc->bus_type == ASC_IS_ISAPNP) {
|
||||
if (AscGetChipVersion(iop_base, asc_dvc->bus_type)
|
||||
== ASC_CHIP_VER_ASYN_BUG) {
|
||||
asc_dvc->bug_fix_cntl |= ASC_BUG_FIX_ASYN_USE_SYN;
|
||||
}
|
||||
}
|
||||
if (AscSetChipScsiID(iop_base, asc_dvc->cfg->chip_scsi_id) !=
|
||||
asc_dvc->cfg->chip_scsi_id) {
|
||||
asc_dvc->err_code |= ASC_IERR_SET_SCSI_ID;
|
||||
}
|
||||
#ifdef CONFIG_ISA
|
||||
if (asc_dvc->bus_type & ASC_IS_ISA) {
|
||||
AscSetIsaDmaChannel(iop_base, asc_dvc->cfg->isa_dma_channel);
|
||||
AscSetIsaDmaSpeed(iop_base, asc_dvc->cfg->isa_dma_speed);
|
||||
}
|
||||
#endif /* CONFIG_ISA */
|
||||
|
||||
asc_dvc->init_state |= ASC_INIT_STATE_END_SET_CFG;
|
||||
|
||||
|
@ -10752,12 +10610,6 @@ static struct scsi_host_template advansys_template = {
|
|||
.eh_host_reset_handler = advansys_reset,
|
||||
.bios_param = advansys_biosparam,
|
||||
.slave_configure = advansys_slave_configure,
|
||||
/*
|
||||
* Because the driver may control an ISA adapter 'unchecked_isa_dma'
|
||||
* must be set. The flag will be cleared in advansys_board_found
|
||||
* for non-ISA adapters.
|
||||
*/
|
||||
.unchecked_isa_dma = true,
|
||||
};
|
||||
|
||||
static int advansys_wide_init_chip(struct Scsi_Host *shost)
|
||||
|
@ -10923,29 +10775,21 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
|
|||
*/
|
||||
switch (asc_dvc_varp->bus_type) {
|
||||
#ifdef CONFIG_ISA
|
||||
case ASC_IS_ISA:
|
||||
shost->unchecked_isa_dma = true;
|
||||
share_irq = 0;
|
||||
break;
|
||||
case ASC_IS_VL:
|
||||
shost->unchecked_isa_dma = false;
|
||||
share_irq = 0;
|
||||
break;
|
||||
case ASC_IS_EISA:
|
||||
shost->unchecked_isa_dma = false;
|
||||
share_irq = IRQF_SHARED;
|
||||
break;
|
||||
#endif /* CONFIG_ISA */
|
||||
#ifdef CONFIG_PCI
|
||||
case ASC_IS_PCI:
|
||||
shost->unchecked_isa_dma = false;
|
||||
share_irq = IRQF_SHARED;
|
||||
break;
|
||||
#endif /* CONFIG_PCI */
|
||||
default:
|
||||
shost_printk(KERN_ERR, shost, "unknown adapter type: "
|
||||
"%d\n", asc_dvc_varp->bus_type);
|
||||
shost->unchecked_isa_dma = false;
|
||||
share_irq = 0;
|
||||
break;
|
||||
}
|
||||
|
@ -10964,7 +10808,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
|
|||
* For Wide boards set PCI information before calling
|
||||
* AdvInitGetConfig().
|
||||
*/
|
||||
shost->unchecked_isa_dma = false;
|
||||
share_irq = IRQF_SHARED;
|
||||
ASC_DBG(2, "AdvInitGetConfig()\n");
|
||||
|
||||
|
@ -11000,7 +10843,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
|
|||
ep->init_sdtr = asc_dvc_varp->cfg->sdtr_enable;
|
||||
ep->disc_enable = asc_dvc_varp->cfg->disc_enable;
|
||||
ep->use_cmd_qng = asc_dvc_varp->cfg->cmd_qng_enabled;
|
||||
ASC_EEP_SET_DMA_SPD(ep, asc_dvc_varp->cfg->isa_dma_speed);
|
||||
ASC_EEP_SET_DMA_SPD(ep, ASC_DEF_ISA_DMA_SPEED);
|
||||
ep->start_motor = asc_dvc_varp->start_motor;
|
||||
ep->cntl = asc_dvc_varp->dvc_cntl;
|
||||
ep->no_scam = asc_dvc_varp->no_scam;
|
||||
|
@ -11228,22 +11071,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
|
|||
|
||||
/* Register DMA Channel for Narrow boards. */
|
||||
shost->dma_channel = NO_ISA_DMA; /* Default to no ISA DMA. */
|
||||
#ifdef CONFIG_ISA
|
||||
if (ASC_NARROW_BOARD(boardp)) {
|
||||
/* Register DMA channel for ISA bus. */
|
||||
if (asc_dvc_varp->bus_type & ASC_IS_ISA) {
|
||||
shost->dma_channel = asc_dvc_varp->cfg->isa_dma_channel;
|
||||
ret = request_dma(shost->dma_channel, DRV_NAME);
|
||||
if (ret) {
|
||||
shost_printk(KERN_ERR, shost, "request_dma() "
|
||||
"%d failed %d\n",
|
||||
shost->dma_channel, ret);
|
||||
goto err_unmap;
|
||||
}
|
||||
AscEnableIsaDma(shost->dma_channel);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_ISA */
|
||||
|
||||
/* Register IRQ Number. */
|
||||
ASC_DBG(2, "request_irq(%d, %p)\n", boardp->irq, shost);
|
||||
|
@ -11262,7 +11089,7 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
|
|||
shost_printk(KERN_ERR, shost, "request_irq(): IRQ 0x%x "
|
||||
"failed with %d\n", boardp->irq, ret);
|
||||
}
|
||||
goto err_free_dma;
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -11314,11 +11141,6 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
|
|||
advansys_wide_free_mem(boardp);
|
||||
err_free_irq:
|
||||
free_irq(boardp->irq, shost);
|
||||
err_free_dma:
|
||||
#ifdef CONFIG_ISA
|
||||
if (shost->dma_channel != NO_ISA_DMA)
|
||||
free_dma(shost->dma_channel);
|
||||
#endif
|
||||
err_unmap:
|
||||
if (boardp->ioremap_addr)
|
||||
iounmap(boardp->ioremap_addr);
|
||||
|
@ -11339,12 +11161,7 @@ static int advansys_release(struct Scsi_Host *shost)
|
|||
ASC_DBG(1, "begin\n");
|
||||
scsi_remove_host(shost);
|
||||
free_irq(board->irq, shost);
|
||||
#ifdef CONFIG_ISA
|
||||
if (shost->dma_channel != NO_ISA_DMA) {
|
||||
ASC_DBG(1, "free_dma()\n");
|
||||
free_dma(shost->dma_channel);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ASC_NARROW_BOARD(board)) {
|
||||
dma_unmap_single(board->dev,
|
||||
board->dvc_var.asc_dvc_var.overrun_dma,
|
||||
|
@ -11366,79 +11183,13 @@ static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = {
|
|||
0x0210, 0x0230, 0x0250, 0x0330
|
||||
};
|
||||
|
||||
/*
|
||||
* The ISA IRQ number is found in bits 2 and 3 of the CfgLsw. It decodes as:
|
||||
* 00: 10
|
||||
* 01: 11
|
||||
* 10: 12
|
||||
* 11: 15
|
||||
*/
|
||||
static unsigned int advansys_isa_irq_no(PortAddr iop_base)
|
||||
{
|
||||
unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base);
|
||||
unsigned int chip_irq = ((cfg_lsw >> 2) & 0x03) + 10;
|
||||
if (chip_irq == 13)
|
||||
chip_irq = 15;
|
||||
return chip_irq;
|
||||
}
|
||||
|
||||
static int advansys_isa_probe(struct device *dev, unsigned int id)
|
||||
{
|
||||
int err = -ENODEV;
|
||||
PortAddr iop_base = _asc_def_iop_base[id];
|
||||
struct Scsi_Host *shost;
|
||||
struct asc_board *board;
|
||||
|
||||
if (!request_region(iop_base, ASC_IOADR_GAP, DRV_NAME)) {
|
||||
ASC_DBG(1, "I/O port 0x%x busy\n", iop_base);
|
||||
return -ENODEV;
|
||||
}
|
||||
ASC_DBG(1, "probing I/O port 0x%x\n", iop_base);
|
||||
if (!AscFindSignature(iop_base))
|
||||
goto release_region;
|
||||
if (!(AscGetChipVersion(iop_base, ASC_IS_ISA) & ASC_CHIP_VER_ISA_BIT))
|
||||
goto release_region;
|
||||
|
||||
err = -ENOMEM;
|
||||
shost = scsi_host_alloc(&advansys_template, sizeof(*board));
|
||||
if (!shost)
|
||||
goto release_region;
|
||||
|
||||
board = shost_priv(shost);
|
||||
board->irq = advansys_isa_irq_no(iop_base);
|
||||
board->dev = dev;
|
||||
board->shost = shost;
|
||||
|
||||
err = advansys_board_found(shost, iop_base, ASC_IS_ISA);
|
||||
if (err)
|
||||
goto free_host;
|
||||
|
||||
dev_set_drvdata(dev, shost);
|
||||
return 0;
|
||||
|
||||
free_host:
|
||||
scsi_host_put(shost);
|
||||
release_region:
|
||||
release_region(iop_base, ASC_IOADR_GAP);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void advansys_isa_remove(struct device *dev, unsigned int id)
|
||||
static void advansys_vlb_remove(struct device *dev, unsigned int id)
|
||||
{
|
||||
int ioport = _asc_def_iop_base[id];
|
||||
advansys_release(dev_get_drvdata(dev));
|
||||
release_region(ioport, ASC_IOADR_GAP);
|
||||
}
|
||||
|
||||
static struct isa_driver advansys_isa_driver = {
|
||||
.probe = advansys_isa_probe,
|
||||
.remove = advansys_isa_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = DRV_NAME,
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* The VLB IRQ number is found in bits 2 to 4 of the CfgLsw. It decodes as:
|
||||
* 000: invalid
|
||||
|
@ -11507,7 +11258,7 @@ static int advansys_vlb_probe(struct device *dev, unsigned int id)
|
|||
|
||||
static struct isa_driver advansys_vlb_driver = {
|
||||
.probe = advansys_vlb_probe,
|
||||
.remove = advansys_isa_remove,
|
||||
.remove = advansys_vlb_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "advansys_vlb",
|
||||
|
@ -11757,15 +11508,10 @@ static int __init advansys_init(void)
|
|||
{
|
||||
int error;
|
||||
|
||||
error = isa_register_driver(&advansys_isa_driver,
|
||||
ASC_IOADR_TABLE_MAX_IX);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
error = isa_register_driver(&advansys_vlb_driver,
|
||||
ASC_IOADR_TABLE_MAX_IX);
|
||||
if (error)
|
||||
goto unregister_isa;
|
||||
goto fail;
|
||||
|
||||
error = eisa_driver_register(&advansys_eisa_driver);
|
||||
if (error)
|
||||
|
@ -11781,8 +11527,6 @@ static int __init advansys_init(void)
|
|||
eisa_driver_unregister(&advansys_eisa_driver);
|
||||
unregister_vlb:
|
||||
isa_unregister_driver(&advansys_vlb_driver);
|
||||
unregister_isa:
|
||||
isa_unregister_driver(&advansys_isa_driver);
|
||||
fail:
|
||||
return error;
|
||||
}
|
||||
|
@ -11792,7 +11536,6 @@ static void __exit advansys_exit(void)
|
|||
pci_unregister_driver(&advansys_pci_driver);
|
||||
eisa_driver_unregister(&advansys_eisa_driver);
|
||||
isa_unregister_driver(&advansys_vlb_driver);
|
||||
isa_unregister_driver(&advansys_isa_driver);
|
||||
}
|
||||
|
||||
module_init(advansys_init);
|
||||
|
|
|
@ -65,9 +65,12 @@ struct aha1542_hostdata {
|
|||
dma_addr_t ccb_handle;
|
||||
};
|
||||
|
||||
#define AHA1542_MAX_SECTORS 16
|
||||
|
||||
struct aha1542_cmd {
|
||||
struct chain *chain;
|
||||
dma_addr_t chain_handle;
|
||||
/* bounce buffer */
|
||||
void *data_buffer;
|
||||
dma_addr_t data_buffer_handle;
|
||||
};
|
||||
|
||||
static inline void aha1542_intr_reset(u16 base)
|
||||
|
@ -257,15 +260,19 @@ static int aha1542_test_port(struct Scsi_Host *sh)
|
|||
static void aha1542_free_cmd(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
|
||||
struct device *dev = cmd->device->host->dma_dev;
|
||||
size_t len = scsi_sg_count(cmd) * sizeof(struct chain);
|
||||
|
||||
if (acmd->chain) {
|
||||
dma_unmap_single(dev, acmd->chain_handle, len, DMA_TO_DEVICE);
|
||||
kfree(acmd->chain);
|
||||
if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
void *buf = acmd->data_buffer;
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bv;
|
||||
|
||||
rq_for_each_segment(bv, cmd->request, iter) {
|
||||
memcpy_to_page(bv.bv_page, bv.bv_offset, buf,
|
||||
bv.bv_len);
|
||||
buf += bv.bv_len;
|
||||
}
|
||||
}
|
||||
|
||||
acmd->chain = NULL;
|
||||
scsi_dma_unmap(cmd);
|
||||
}
|
||||
|
||||
|
@ -416,7 +423,7 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
u8 lun = cmd->device->lun;
|
||||
unsigned long flags;
|
||||
int bufflen = scsi_bufflen(cmd);
|
||||
int mbo, sg_count;
|
||||
int mbo;
|
||||
struct mailbox *mb = aha1542->mb;
|
||||
struct ccb *ccb = aha1542->ccb;
|
||||
|
||||
|
@ -438,17 +445,17 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
|
||||
}
|
||||
#endif
|
||||
sg_count = scsi_dma_map(cmd);
|
||||
if (sg_count) {
|
||||
size_t len = sg_count * sizeof(struct chain);
|
||||
|
||||
acmd->chain = kmalloc(len, GFP_DMA);
|
||||
if (!acmd->chain)
|
||||
goto out_unmap;
|
||||
acmd->chain_handle = dma_map_single(sh->dma_dev, acmd->chain,
|
||||
len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(sh->dma_dev, acmd->chain_handle))
|
||||
goto out_free_chain;
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
void *buf = acmd->data_buffer;
|
||||
struct req_iterator iter;
|
||||
struct bio_vec bv;
|
||||
|
||||
rq_for_each_segment(bv, cmd->request, iter) {
|
||||
memcpy_from_page(buf, bv.bv_page, bv.bv_offset,
|
||||
bv.bv_len);
|
||||
buf += bv.bv_len;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -496,27 +503,12 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
direction = 16;
|
||||
|
||||
memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen);
|
||||
|
||||
if (bufflen) {
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
|
||||
scsi_for_each_sg(cmd, sg, sg_count, i) {
|
||||
any2scsi(acmd->chain[i].dataptr, sg_dma_address(sg));
|
||||
any2scsi(acmd->chain[i].datalen, sg_dma_len(sg));
|
||||
};
|
||||
any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
|
||||
any2scsi(ccb[mbo].dataptr, acmd->chain_handle);
|
||||
#ifdef DEBUG
|
||||
shost_printk(KERN_DEBUG, sh, "cptr %p: ", acmd->chain);
|
||||
print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, acmd->chain, 18);
|
||||
#endif
|
||||
} else {
|
||||
ccb[mbo].op = 0; /* SCSI Initiator Command */
|
||||
any2scsi(ccb[mbo].datalen, 0);
|
||||
ccb[mbo].op = 0; /* SCSI Initiator Command */
|
||||
any2scsi(ccb[mbo].datalen, bufflen);
|
||||
if (bufflen)
|
||||
any2scsi(ccb[mbo].dataptr, acmd->data_buffer_handle);
|
||||
else
|
||||
any2scsi(ccb[mbo].dataptr, 0);
|
||||
};
|
||||
ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7); /*SCSI Target Id */
|
||||
ccb[mbo].rsalen = 16;
|
||||
ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0;
|
||||
|
@ -531,12 +523,6 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
spin_unlock_irqrestore(sh->host_lock, flags);
|
||||
|
||||
return 0;
|
||||
out_free_chain:
|
||||
kfree(acmd->chain);
|
||||
acmd->chain = NULL;
|
||||
out_unmap:
|
||||
scsi_dma_unmap(cmd);
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
}
|
||||
|
||||
/* Initialize mailboxes */
|
||||
|
@ -1027,6 +1013,27 @@ static int aha1542_biosparam(struct scsi_device *sdev,
|
|||
}
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static int aha1542_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
|
||||
|
||||
acmd->data_buffer = dma_alloc_coherent(shost->dma_dev,
|
||||
SECTOR_SIZE * AHA1542_MAX_SECTORS,
|
||||
&acmd->data_buffer_handle, GFP_KERNEL);
|
||||
if (!acmd->data_buffer)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aha1542_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
|
||||
|
||||
dma_free_coherent(shost->dma_dev, SECTOR_SIZE * AHA1542_MAX_SECTORS,
|
||||
acmd->data_buffer, acmd->data_buffer_handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct scsi_host_template driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.proc_name = "aha1542",
|
||||
|
@ -1037,10 +1044,12 @@ static struct scsi_host_template driver_template = {
|
|||
.eh_bus_reset_handler = aha1542_bus_reset,
|
||||
.eh_host_reset_handler = aha1542_host_reset,
|
||||
.bios_param = aha1542_biosparam,
|
||||
.init_cmd_priv = aha1542_init_cmd_priv,
|
||||
.exit_cmd_priv = aha1542_exit_cmd_priv,
|
||||
.can_queue = AHA1542_MAILBOXES,
|
||||
.this_id = 7,
|
||||
.sg_tablesize = 16,
|
||||
.unchecked_isa_dma = 1,
|
||||
.max_sectors = AHA1542_MAX_SECTORS,
|
||||
.sg_tablesize = SG_ALL,
|
||||
};
|
||||
|
||||
static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
|
||||
|
|
|
@ -249,7 +249,6 @@ static struct scsi_host_template driver_template = {
|
|||
.cmd_per_lun =
|
||||
ESAS2R_DEFAULT_CMD_PER_LUN,
|
||||
.present = 0,
|
||||
.unchecked_isa_dma = 0,
|
||||
.emulated = 0,
|
||||
.proc_name = ESAS2R_DRVR_NAME,
|
||||
.change_queue_depth = scsi_change_queue_depth,
|
||||
|
|
|
@ -371,13 +371,9 @@ static struct device_type scsi_host_type = {
|
|||
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
||||
{
|
||||
struct Scsi_Host *shost;
|
||||
gfp_t gfp_mask = GFP_KERNEL;
|
||||
int index;
|
||||
|
||||
if (sht->unchecked_isa_dma && privsize)
|
||||
gfp_mask |= __GFP_DMA;
|
||||
|
||||
shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
|
||||
shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
|
||||
if (!shost)
|
||||
return NULL;
|
||||
|
||||
|
@ -419,7 +415,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
|
|||
shost->sg_tablesize = sht->sg_tablesize;
|
||||
shost->sg_prot_tablesize = sht->sg_prot_tablesize;
|
||||
shost->cmd_per_lun = sht->cmd_per_lun;
|
||||
shost->unchecked_isa_dma = sht->unchecked_isa_dma;
|
||||
shost->no_write_same = sht->no_write_same;
|
||||
shost->host_tagset = sht->host_tagset;
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#define SCSI_CMD_FLAG_NAME(name)[const_ilog2(SCMD_##name)] = #name
|
||||
static const char *const scsi_cmd_flags[] = {
|
||||
SCSI_CMD_FLAG_NAME(TAGGED),
|
||||
SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA),
|
||||
SCSI_CMD_FLAG_NAME(INITIALIZED),
|
||||
};
|
||||
#undef SCSI_CMD_FLAG_NAME
|
||||
|
|
|
@ -53,49 +53,16 @@
|
|||
#endif
|
||||
|
||||
static struct kmem_cache *scsi_sense_cache;
|
||||
static struct kmem_cache *scsi_sense_isadma_cache;
|
||||
static DEFINE_MUTEX(scsi_sense_cache_mutex);
|
||||
|
||||
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
|
||||
|
||||
static inline struct kmem_cache *
|
||||
scsi_select_sense_cache(bool unchecked_isa_dma)
|
||||
{
|
||||
return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
|
||||
}
|
||||
|
||||
static void scsi_free_sense_buffer(bool unchecked_isa_dma,
|
||||
unsigned char *sense_buffer)
|
||||
{
|
||||
kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
|
||||
sense_buffer);
|
||||
}
|
||||
|
||||
static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
|
||||
gfp_t gfp_mask, int numa_node)
|
||||
{
|
||||
return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
|
||||
gfp_mask, numa_node);
|
||||
}
|
||||
|
||||
int scsi_init_sense_cache(struct Scsi_Host *shost)
|
||||
{
|
||||
struct kmem_cache *cache;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&scsi_sense_cache_mutex);
|
||||
cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
|
||||
if (cache)
|
||||
goto exit;
|
||||
|
||||
if (shost->unchecked_isa_dma) {
|
||||
scsi_sense_isadma_cache =
|
||||
kmem_cache_create("scsi_sense_cache(DMA)",
|
||||
SCSI_SENSE_BUFFERSIZE, 0,
|
||||
SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
|
||||
if (!scsi_sense_isadma_cache)
|
||||
ret = -ENOMEM;
|
||||
} else {
|
||||
if (!scsi_sense_cache) {
|
||||
scsi_sense_cache =
|
||||
kmem_cache_create_usercopy("scsi_sense_cache",
|
||||
SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
|
||||
|
@ -103,7 +70,6 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
|
|||
if (!scsi_sense_cache)
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
exit:
|
||||
mutex_unlock(&scsi_sense_cache_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1748,15 +1714,12 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
|||
unsigned int hctx_idx, unsigned int numa_node)
|
||||
{
|
||||
struct Scsi_Host *shost = set->driver_data;
|
||||
const bool unchecked_isa_dma = shost->unchecked_isa_dma;
|
||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
struct scatterlist *sg;
|
||||
int ret = 0;
|
||||
|
||||
if (unchecked_isa_dma)
|
||||
cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
|
||||
cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
|
||||
GFP_KERNEL, numa_node);
|
||||
cmd->sense_buffer =
|
||||
kmem_cache_alloc_node(scsi_sense_cache, GFP_KERNEL, numa_node);
|
||||
if (!cmd->sense_buffer)
|
||||
return -ENOMEM;
|
||||
cmd->req.sense = cmd->sense_buffer;
|
||||
|
@ -1770,8 +1733,7 @@ static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
|||
if (shost->hostt->init_cmd_priv) {
|
||||
ret = shost->hostt->init_cmd_priv(shost, cmd);
|
||||
if (ret < 0)
|
||||
scsi_free_sense_buffer(unchecked_isa_dma,
|
||||
cmd->sense_buffer);
|
||||
kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1785,8 +1747,7 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
|
|||
|
||||
if (shost->hostt->exit_cmd_priv)
|
||||
shost->hostt->exit_cmd_priv(shost, cmd);
|
||||
scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
|
||||
cmd->sense_buffer);
|
||||
kmem_cache_free(scsi_sense_cache, cmd->sense_buffer);
|
||||
}
|
||||
|
||||
static int scsi_map_queues(struct blk_mq_tag_set *set)
|
||||
|
@ -1821,8 +1782,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
|
|||
dma_max_mapping_size(dev) >> SECTOR_SHIFT);
|
||||
}
|
||||
blk_queue_max_hw_sectors(q, shost->max_sectors);
|
||||
if (shost->unchecked_isa_dma)
|
||||
blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
|
||||
blk_queue_segment_boundary(q, shost->dma_boundary);
|
||||
dma_set_seg_boundary(dev, shost->dma_boundary);
|
||||
|
||||
|
@ -1988,7 +1947,6 @@ EXPORT_SYMBOL(scsi_unblock_requests);
|
|||
void scsi_exit_queue(void)
|
||||
{
|
||||
kmem_cache_destroy(scsi_sense_cache);
|
||||
kmem_cache_destroy(scsi_sense_isadma_cache);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1078,8 +1078,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
|
|||
if (!sdev)
|
||||
goto out;
|
||||
|
||||
result = kmalloc(result_len, GFP_KERNEL |
|
||||
((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
|
||||
result = kmalloc(result_len, GFP_KERNEL);
|
||||
if (!result)
|
||||
goto out_free_sdev;
|
||||
|
||||
|
@ -1336,8 +1335,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflag
|
|||
*/
|
||||
length = (511 + 1) * sizeof(struct scsi_lun);
|
||||
retry:
|
||||
lun_data = kmalloc(length, GFP_KERNEL |
|
||||
(sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
|
||||
lun_data = kmalloc(length, GFP_KERNEL);
|
||||
if (!lun_data) {
|
||||
printk(ALLOC_FAILURE_MSG, __func__);
|
||||
goto out;
|
||||
|
|
|
@ -373,7 +373,6 @@ shost_rd_attr(cmd_per_lun, "%hd\n");
|
|||
shost_rd_attr(can_queue, "%d\n");
|
||||
shost_rd_attr(sg_tablesize, "%hu\n");
|
||||
shost_rd_attr(sg_prot_tablesize, "%hu\n");
|
||||
shost_rd_attr(unchecked_isa_dma, "%d\n");
|
||||
shost_rd_attr(prot_capabilities, "%u\n");
|
||||
shost_rd_attr(prot_guard_type, "%hd\n");
|
||||
shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
|
||||
|
@ -411,7 +410,6 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
|
|||
&dev_attr_can_queue.attr,
|
||||
&dev_attr_sg_tablesize.attr,
|
||||
&dev_attr_sg_prot_tablesize.attr,
|
||||
&dev_attr_unchecked_isa_dma.attr,
|
||||
&dev_attr_proc_name.attr,
|
||||
&dev_attr_scan.attr,
|
||||
&dev_attr_hstate.attr,
|
||||
|
|
|
@ -974,7 +974,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
|
|||
*/
|
||||
return 0;
|
||||
case SG_GET_LOW_DMA:
|
||||
return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
|
||||
return put_user(0, ip);
|
||||
case SG_GET_SCSI_ID:
|
||||
{
|
||||
sg_scsi_id_t v;
|
||||
|
@ -1777,7 +1777,6 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
|
|||
|
||||
if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
|
||||
dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
|
||||
!sfp->parentdp->device->host->unchecked_isa_dma &&
|
||||
blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
|
||||
md = NULL;
|
||||
else
|
||||
|
@ -1893,7 +1892,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
|||
int sg_tablesize = sfp->parentdp->sg_tablesize;
|
||||
int blk_size = buff_size, order;
|
||||
gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO;
|
||||
struct sg_device *sdp = sfp->parentdp;
|
||||
|
||||
if (blk_size < 0)
|
||||
return -EFAULT;
|
||||
|
@ -1919,9 +1917,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
|
|||
scatter_elem_sz_prev = num;
|
||||
}
|
||||
|
||||
if (sdp->device->host->unchecked_isa_dma)
|
||||
gfp_mask |= GFP_DMA;
|
||||
|
||||
order = get_order(num);
|
||||
retry:
|
||||
ret_sz = 1 << (PAGE_SHIFT + order);
|
||||
|
@ -2547,8 +2542,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
|
|||
"(res)sgat=%d low_dma=%d\n", k,
|
||||
jiffies_to_msecs(fp->timeout),
|
||||
fp->reserve.bufflen,
|
||||
(int) fp->reserve.k_use_sg,
|
||||
(int) sdp->device->host->unchecked_isa_dma);
|
||||
(int) fp->reserve.k_use_sg, 0);
|
||||
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
|
||||
(int) fp->cmd_q, (int) fp->force_packid,
|
||||
(int) fp->keep_orphan);
|
||||
|
|
|
@ -33,10 +33,6 @@ static int xa_test = 0;
|
|||
|
||||
module_param(xa_test, int, S_IRUGO | S_IWUSR);
|
||||
|
||||
/* primitive to determine whether we need to have GFP_DMA set based on
|
||||
* the status of the unchecked_isa_dma flag in the host structure */
|
||||
#define SR_GFP_DMA(cd) (((cd)->device->host->unchecked_isa_dma) ? GFP_DMA : 0)
|
||||
|
||||
static int sr_read_tochdr(struct cdrom_device_info *cdi,
|
||||
struct cdrom_tochdr *tochdr)
|
||||
{
|
||||
|
@ -45,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
|
|||
int result;
|
||||
unsigned char *buffer;
|
||||
|
||||
buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
|
||||
buffer = kmalloc(32, GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -75,7 +71,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
|
|||
int result;
|
||||
unsigned char *buffer;
|
||||
|
||||
buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
|
||||
buffer = kmalloc(32, GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -384,7 +380,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
|
|||
{
|
||||
Scsi_CD *cd = cdi->handle;
|
||||
struct packet_command cgc;
|
||||
char *buffer = kmalloc(32, GFP_KERNEL | SR_GFP_DMA(cd));
|
||||
char *buffer = kmalloc(32, GFP_KERNEL);
|
||||
int result;
|
||||
|
||||
if (!buffer)
|
||||
|
@ -567,7 +563,7 @@ int sr_is_xa(Scsi_CD *cd)
|
|||
if (!xa_test)
|
||||
return 0;
|
||||
|
||||
raw_sector = kmalloc(2048, GFP_KERNEL | SR_GFP_DMA(cd));
|
||||
raw_sector = kmalloc(2048, GFP_KERNEL);
|
||||
if (!raw_sector)
|
||||
return -ENOMEM;
|
||||
if (0 == sr_read_sector(cd, cd->ms_offset + 16,
|
||||
|
|
|
@ -188,7 +188,7 @@ static int st_max_sg_segs = ST_MAX_SG;
|
|||
|
||||
static int modes_defined;
|
||||
|
||||
static int enlarge_buffer(struct st_buffer *, int, int);
|
||||
static int enlarge_buffer(struct st_buffer *, int);
|
||||
static void clear_buffer(struct st_buffer *);
|
||||
static void normalize_buffer(struct st_buffer *);
|
||||
static int append_to_buffer(const char __user *, struct st_buffer *, int);
|
||||
|
@ -1289,7 +1289,7 @@ static int st_open(struct inode *inode, struct file *filp)
|
|||
}
|
||||
|
||||
/* See that we have at least a one page buffer available */
|
||||
if (!enlarge_buffer(STp->buffer, PAGE_SIZE, STp->restr_dma)) {
|
||||
if (!enlarge_buffer(STp->buffer, PAGE_SIZE)) {
|
||||
st_printk(KERN_WARNING, STp,
|
||||
"Can't allocate one page tape buffer.\n");
|
||||
retval = (-EOVERFLOW);
|
||||
|
@ -1586,7 +1586,7 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
|
|||
}
|
||||
|
||||
if (bufsize > STbp->buffer_size &&
|
||||
!enlarge_buffer(STbp, bufsize, STp->restr_dma)) {
|
||||
!enlarge_buffer(STbp, bufsize)) {
|
||||
st_printk(KERN_WARNING, STp,
|
||||
"Can't allocate %d byte tape buffer.\n",
|
||||
bufsize);
|
||||
|
@ -3894,7 +3894,7 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned lon
|
|||
|
||||
/* Try to allocate a new tape buffer. Calling function must not hold
|
||||
dev_arr_lock. */
|
||||
static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
|
||||
static struct st_buffer *new_tape_buffer(int max_sg)
|
||||
{
|
||||
struct st_buffer *tb;
|
||||
|
||||
|
@ -3905,7 +3905,6 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
|
|||
}
|
||||
tb->frp_segs = 0;
|
||||
tb->use_sg = max_sg;
|
||||
tb->dma = need_dma;
|
||||
tb->buffer_size = 0;
|
||||
|
||||
tb->reserved_pages = kcalloc(max_sg, sizeof(struct page *),
|
||||
|
@ -3922,7 +3921,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)
|
|||
/* Try to allocate enough space in the tape buffer */
|
||||
#define ST_MAX_ORDER 6
|
||||
|
||||
static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
|
||||
static int enlarge_buffer(struct st_buffer * STbuffer, int new_size)
|
||||
{
|
||||
int segs, max_segs, b_size, order, got;
|
||||
gfp_t priority;
|
||||
|
@ -3936,8 +3935,6 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
|
|||
max_segs = STbuffer->use_sg;
|
||||
|
||||
priority = GFP_KERNEL | __GFP_NOWARN;
|
||||
if (need_dma)
|
||||
priority |= GFP_DMA;
|
||||
|
||||
if (STbuffer->cleared)
|
||||
priority |= __GFP_ZERO;
|
||||
|
@ -3957,7 +3954,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
|
|||
if (order == ST_MAX_ORDER)
|
||||
return 0;
|
||||
normalize_buffer(STbuffer);
|
||||
return enlarge_buffer(STbuffer, new_size, need_dma);
|
||||
return enlarge_buffer(STbuffer, new_size);
|
||||
}
|
||||
|
||||
for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size;
|
||||
|
@ -4296,7 +4293,7 @@ static int st_probe(struct device *dev)
|
|||
i = queue_max_segments(SDp->request_queue);
|
||||
if (st_max_sg_segs < i)
|
||||
i = st_max_sg_segs;
|
||||
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
|
||||
buffer = new_tape_buffer(i);
|
||||
if (buffer == NULL) {
|
||||
sdev_printk(KERN_ERR, SDp,
|
||||
"st: Can't allocate new tape buffer. "
|
||||
|
@ -4340,7 +4337,6 @@ static int st_probe(struct device *dev)
|
|||
tpnt->dirty = 0;
|
||||
tpnt->in_use = 0;
|
||||
tpnt->drv_buffer = 1; /* Try buffering if no mode sense */
|
||||
tpnt->restr_dma = (SDp->host)->unchecked_isa_dma;
|
||||
tpnt->use_pf = (SDp->scsi_level >= SCSI_2);
|
||||
tpnt->density = 0;
|
||||
tpnt->do_auto_lock = ST_AUTO_LOCK;
|
||||
|
@ -4358,7 +4354,7 @@ static int st_probe(struct device *dev)
|
|||
tpnt->nbr_partitions = 0;
|
||||
blk_queue_rq_timeout(tpnt->device->request_queue, ST_TIMEOUT);
|
||||
tpnt->long_timeout = ST_LONG_TIMEOUT;
|
||||
tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
|
||||
tpnt->try_dio = try_direct_io;
|
||||
|
||||
for (i = 0; i < ST_NBR_MODES; i++) {
|
||||
STm = &(tpnt->modes[i]);
|
||||
|
|
|
@ -35,7 +35,6 @@ struct st_request {
|
|||
|
||||
/* The tape buffer descriptor. */
|
||||
struct st_buffer {
|
||||
unsigned char dma; /* DMA-able buffer */
|
||||
unsigned char cleared; /* internal buffer cleared after open? */
|
||||
unsigned short do_dio; /* direct i/o set up? */
|
||||
int buffer_size;
|
||||
|
@ -133,7 +132,6 @@ struct scsi_tape {
|
|||
unsigned char two_fm;
|
||||
unsigned char fast_mteom;
|
||||
unsigned char immediate;
|
||||
unsigned char restr_dma;
|
||||
unsigned char scsi2_logical;
|
||||
unsigned char default_drvbuffer; /* 0xff = don't touch, value 3 bits */
|
||||
unsigned char cln_mode; /* 0 = none, otherwise sense byte nbr */
|
||||
|
|
|
@ -910,7 +910,7 @@ new_bio:
|
|||
" %d i: %d bio: %p, allocating another"
|
||||
" bio\n", bio->bi_vcnt, i, bio);
|
||||
|
||||
rc = blk_rq_append_bio(req, &bio);
|
||||
rc = blk_rq_append_bio(req, bio);
|
||||
if (rc) {
|
||||
pr_err("pSCSI: failed to append bio\n");
|
||||
goto fail;
|
||||
|
@ -929,7 +929,7 @@ new_bio:
|
|||
}
|
||||
|
||||
if (bio) {
|
||||
rc = blk_rq_append_bio(req, &bio);
|
||||
rc = blk_rq_append_bio(req, bio);
|
||||
if (rc) {
|
||||
pr_err("pSCSI: failed to append bio\n");
|
||||
goto fail;
|
||||
|
|
|
@ -1240,14 +1240,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
|
|||
int bdev_disk_changed(struct block_device *bdev, bool invalidate)
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&bdev->bd_mutex);
|
||||
|
||||
rescan:
|
||||
ret = blk_drop_partitions(bdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (bdev->bd_part_count)
|
||||
return -EBUSY;
|
||||
sync_blockdev(bdev);
|
||||
invalidate_bdev(bdev);
|
||||
blk_drop_partitions(disk);
|
||||
|
||||
clear_bit(GD_NEED_PART_SCAN, &disk->state);
|
||||
|
||||
|
|
|
@ -483,16 +483,10 @@ extern void bio_check_pages_dirty(struct bio *bio);
|
|||
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
|
||||
struct bio *src, struct bvec_iter *src_iter);
|
||||
extern void bio_copy_data(struct bio *dst, struct bio *src);
|
||||
extern void bio_list_copy_data(struct bio *dst, struct bio *src);
|
||||
extern void bio_free_pages(struct bio *bio);
|
||||
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
|
||||
void bio_truncate(struct bio *bio, unsigned new_size);
|
||||
void guard_bio_eod(struct bio *bio);
|
||||
|
||||
static inline void zero_fill_bio(struct bio *bio)
|
||||
{
|
||||
zero_fill_bio_iter(bio, bio->bi_iter);
|
||||
}
|
||||
void zero_fill_bio(struct bio *bio);
|
||||
|
||||
extern const char *bio_devname(struct bio *bio, char *buffer);
|
||||
|
||||
|
|
|
@ -272,6 +272,12 @@ static inline bool bio_is_passthrough(struct bio *bio)
|
|||
return blk_op_is_scsi(op) || blk_op_is_private(op);
|
||||
}
|
||||
|
||||
static inline bool blk_op_is_passthrough(unsigned int op)
|
||||
{
|
||||
return (blk_op_is_scsi(op & REQ_OP_MASK) ||
|
||||
blk_op_is_private(op & REQ_OP_MASK));
|
||||
}
|
||||
|
||||
static inline unsigned short req_get_ioprio(struct request *req)
|
||||
{
|
||||
return req->ioprio;
|
||||
|
@ -311,8 +317,17 @@ enum blk_zoned_model {
|
|||
BLK_ZONED_HM, /* Host-managed zoned block device */
|
||||
};
|
||||
|
||||
/*
|
||||
* BLK_BOUNCE_NONE: never bounce (default)
|
||||
* BLK_BOUNCE_HIGH: bounce all highmem pages
|
||||
*/
|
||||
enum blk_bounce {
|
||||
BLK_BOUNCE_NONE,
|
||||
BLK_BOUNCE_HIGH,
|
||||
};
|
||||
|
||||
struct queue_limits {
|
||||
unsigned long bounce_pfn;
|
||||
enum blk_bounce bounce;
|
||||
unsigned long seg_boundary_mask;
|
||||
unsigned long virt_boundary_mask;
|
||||
|
||||
|
@ -434,11 +449,6 @@ struct request_queue {
|
|||
*/
|
||||
int id;
|
||||
|
||||
/*
|
||||
* queue needs bounce pages for pages above this limit
|
||||
*/
|
||||
gfp_t bounce_gfp;
|
||||
|
||||
spinlock_t queue_lock;
|
||||
|
||||
/*
|
||||
|
@ -683,6 +693,8 @@ static inline bool blk_account_rq(struct request *rq)
|
|||
dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
|
||||
(dir), (attrs))
|
||||
|
||||
#define queue_to_disk(q) (dev_to_disk(kobj_to_dev((q)->kobj.parent)))
|
||||
|
||||
static inline bool queue_is_mq(struct request_queue *q)
|
||||
{
|
||||
return q->mq_ops;
|
||||
|
@ -838,24 +850,6 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
|
|||
return q->nr_requests;
|
||||
}
|
||||
|
||||
extern unsigned long blk_max_low_pfn, blk_max_pfn;
|
||||
|
||||
/*
|
||||
* standard bounce addresses:
|
||||
*
|
||||
* BLK_BOUNCE_HIGH : bounce all highmem pages
|
||||
* BLK_BOUNCE_ANY : don't bounce anything
|
||||
* BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
|
||||
*/
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
|
||||
#else
|
||||
#define BLK_BOUNCE_HIGH -1ULL
|
||||
#endif
|
||||
#define BLK_BOUNCE_ANY (-1ULL)
|
||||
#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
|
||||
|
||||
/*
|
||||
* default timeout for SG_IO if none specified
|
||||
*/
|
||||
|
@ -921,7 +915,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
|||
extern void blk_rq_unprep_clone(struct request *rq);
|
||||
extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
|
||||
struct request *rq);
|
||||
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
|
||||
int blk_rq_append_bio(struct request *rq, struct bio *bio);
|
||||
extern void blk_queue_split(struct bio **);
|
||||
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
|
||||
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
|
||||
|
@ -1139,7 +1133,7 @@ extern void blk_abort_request(struct request *);
|
|||
* Access functions for manipulating queue properties
|
||||
*/
|
||||
extern void blk_cleanup_queue(struct request_queue *);
|
||||
extern void blk_queue_bounce_limit(struct request_queue *, u64);
|
||||
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
|
||||
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
|
||||
|
|
|
@ -204,25 +204,6 @@ static inline dev_t disk_devt(struct gendisk *disk)
|
|||
|
||||
void disk_uevent(struct gendisk *disk, enum kobject_action action);
|
||||
|
||||
/*
|
||||
* Smarter partition iterator without context limits.
|
||||
*/
|
||||
#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
|
||||
#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
|
||||
#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
|
||||
|
||||
struct disk_part_iter {
|
||||
struct gendisk *disk;
|
||||
struct block_device *part;
|
||||
unsigned long idx;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
extern void disk_part_iter_init(struct disk_part_iter *piter,
|
||||
struct gendisk *disk, unsigned int flags);
|
||||
struct block_device *disk_part_iter_next(struct disk_part_iter *piter);
|
||||
extern void disk_part_iter_exit(struct disk_part_iter *piter);
|
||||
|
||||
/* block/genhd.c */
|
||||
extern void device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
const struct attribute_group **groups);
|
||||
|
@ -273,7 +254,7 @@ static inline sector_t get_capacity(struct gendisk *disk)
|
|||
|
||||
int bdev_disk_changed(struct block_device *bdev, bool invalidate);
|
||||
int blk_add_partitions(struct gendisk *disk, struct block_device *bdev);
|
||||
int blk_drop_partitions(struct block_device *bdev);
|
||||
void blk_drop_partitions(struct gendisk *disk);
|
||||
|
||||
extern struct gendisk *__alloc_disk_node(int minors, int node_id);
|
||||
extern void put_disk(struct gendisk *disk);
|
||||
|
|
|
@ -55,11 +55,10 @@ struct scsi_pointer {
|
|||
|
||||
/* for scmd->flags */
|
||||
#define SCMD_TAGGED (1 << 0)
|
||||
#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
|
||||
#define SCMD_INITIALIZED (1 << 2)
|
||||
#define SCMD_LAST (1 << 3)
|
||||
#define SCMD_INITIALIZED (1 << 1)
|
||||
#define SCMD_LAST (1 << 2)
|
||||
/* flags preserved across unprep / reprep */
|
||||
#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
|
||||
#define SCMD_PRESERVED_FLAGS (SCMD_INITIALIZED)
|
||||
|
||||
/* for scmd->state */
|
||||
#define SCMD_STATE_COMPLETE 0
|
||||
|
|
|
@ -424,11 +424,6 @@ struct scsi_host_template {
|
|||
*/
|
||||
unsigned supported_mode:2;
|
||||
|
||||
/*
|
||||
* True if this host adapter uses unchecked DMA onto an ISA bus.
|
||||
*/
|
||||
unsigned unchecked_isa_dma:1;
|
||||
|
||||
/*
|
||||
* True for emulated SCSI host adapters (e.g. ATAPI).
|
||||
*/
|
||||
|
@ -617,7 +612,6 @@ struct Scsi_Host {
|
|||
*/
|
||||
unsigned nr_hw_queues;
|
||||
unsigned active_mode:2;
|
||||
unsigned unchecked_isa_dma:1;
|
||||
|
||||
/*
|
||||
* Host has requested that no further requests come through for the
|
||||
|
|
|
@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
|
||||
__entry->dev = disk_devt(queue_to_disk(q));
|
||||
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
|
||||
strlcpy(__entry->type, type, sizeof(__entry->type));
|
||||
__entry->percentile = percentile;
|
||||
|
@ -59,7 +59,7 @@ TRACE_EVENT(kyber_adjust,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
|
||||
__entry->dev = disk_devt(queue_to_disk(q));
|
||||
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
|
||||
__entry->depth = depth;
|
||||
),
|
||||
|
@ -81,7 +81,7 @@ TRACE_EVENT(kyber_throttled,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
|
||||
__entry->dev = disk_devt(queue_to_disk(q));
|
||||
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
|
||||
),
|
||||
|
||||
|
|
|
@ -283,12 +283,11 @@ config PHYS_ADDR_T_64BIT
|
|||
config BOUNCE
|
||||
bool "Enable bounce buffers"
|
||||
default y
|
||||
depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
|
||||
depends on BLOCK && MMU && HIGHMEM
|
||||
help
|
||||
Enable bounce buffers for devices that cannot access
|
||||
the full range of memory available to the CPU. Enabled
|
||||
by default when ZONE_DMA or HIGHMEM is selected, but you
|
||||
may say n to override this.
|
||||
Enable bounce buffers for devices that cannot access the full range of
|
||||
memory available to the CPU. Enabled by default when HIGHMEM is
|
||||
selected, but you may say n to override this.
|
||||
|
||||
config VIRT_TO_BUS
|
||||
bool
|
||||
|
|
Загрузка…
Ссылка в новой задаче