if (bio_list_empty(&qn->bios_bps) &&
bio_list_empty(&qn->bios_iops)) {
@@ -553,6 +564,11 @@ static bool throtl_slice_used(struct throtl_grp
*tg, bool rw)
return true;
}
+static unsigned int sq_queued(struct throtl_service_queue *sq, int type)
+{
+ return sq->nr_queued_bps[type] + sq->nr_queued_iops[type];
+}
+
static unsigned int calculate_io_allowed(u32 iops_limit,
unsigned long jiffy_elapsed)
{
@@ -682,9 +698,9 @@ static void tg_update_carryover(struct throtl_grp
*tg)
long long bytes[2] = {0};
int ios[2] = {0};
- if (tg->service_queue.nr_queued[READ])
+ if (sq_queued(&tg->service_queue, READ))
__tg_update_carryover(tg, READ, &bytes[READ], &ios[READ]);
- if (tg->service_queue.nr_queued[WRITE])
+ if (sq_queued(&tg->service_queue, WRITE))
__tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]);
/* see comments in struct throtl_grp for meaning of these
fields. */
@@ -776,7 +792,8 @@ static void throtl_charge_iops_bio(struct
throtl_grp *tg, struct bio *bio)
*/
static void tg_update_slice(struct throtl_grp *tg, bool rw)
{
- if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
+ if (throtl_slice_used(tg, rw) &&
+ sq_queued(&tg->service_queue, rw) == 0)
throtl_start_new_slice(tg, rw, true);
else
throtl_extend_slice(tg, rw, jiffies + tg->td->throtl_slice);
@@ -832,7 +849,7 @@ static unsigned long tg_dispatch_time(struct
throtl_grp *tg, struct bio *bio)
* this function with a different bio if there are other bios
* queued.
*/
- BUG_ON(tg->service_queue.nr_queued[rw] &&
+ BUG_ON(sq_queued(&tg->service_queue, rw) &&
bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
wait = tg_dispatch_bps_time(tg, bio);
@@ -872,12 +889,11 @@ static void throtl_add_bio_tg(struct bio *bio,
struct throtl_qnode *qn,
* dispatched. Mark that @tg was empty. This is automatically
* cleared on the next tg_update_disptime().
*/
- if (!sq->nr_queued[rw])
+ if (sq_queued(sq, rw) == 0)
tg->flags |= THROTL_TG_WAS_EMPTY;
- throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
+ throtl_qnode_add_bio(bio, qn, sq);
- sq->nr_queued[rw]++;
throtl_enqueue_tg(tg);
}
@@ -931,8 +947,7 @@ static void tg_dispatch_one_bio(struct throtl_grp
*tg, bool rw)
* getting released prematurely. Remember the tg to put and put it
* after @bio is transferred to @parent_sq.
*/
- bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
- sq->nr_queued[rw]--;
+ bio = throtl_pop_queued(sq, &tg_to_put, rw);
throtl_charge_iops_bio(tg, bio);
@@ -949,7 +964,7 @@ static void tg_dispatch_one_bio(struct throtl_grp
*tg, bool rw)
} else {
bio_set_flag(bio, BIO_BPS_THROTTLED);
throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
- &parent_sq->queued[rw]);
+ parent_sq);
BUG_ON(tg->td->nr_queued[rw] <= 0);
tg->td->nr_queued[rw]--;
}
@@ -1014,7 +1029,7 @@ static int throtl_select_dispatch(struct
throtl_service_queue *parent_sq)
nr_disp += throtl_dispatch_tg(tg);
sq = &tg->service_queue;
- if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
+ if (sq_queued(sq, READ) || sq_queued(sq, WRITE))
tg_update_disptime(tg);
else
throtl_dequeue_tg(tg);
@@ -1067,9 +1082,11 @@ static void throtl_pending_timer_fn(struct
timer_list *t)
dispatched = false;
while (true) {
+ unsigned int bio_cnt_r = sq_queued(sq, READ);
+ unsigned int bio_cnt_w = sq_queued(sq, WRITE);
+
throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
- sq->nr_queued[READ] + sq->nr_queued[WRITE],
- sq->nr_queued[READ], sq->nr_queued[WRITE]);
+ bio_cnt_r + bio_cnt_w, bio_cnt_r, bio_cnt_w);
ret = throtl_select_dispatch(sq);
if (ret) {
@@ -1131,7 +1148,7 @@ static void blk_throtl_dispatch_work_fn(struct
work_struct *work)
spin_lock_irq(&q->queue_lock);
for (rw = READ; rw <= WRITE; rw++)
- while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
+ while ((bio = throtl_pop_queued(td_sq, NULL, rw)))
bio_list_add(&bio_list_on_stack, bio);
spin_unlock_irq(&q->queue_lock);
@@ -1637,7 +1654,7 @@ void blk_throtl_cancel_bios(struct gendisk *disk)
static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio,
bool rw)
{
/* throtl is FIFO - if bios are already queued, should queue */
- if (tg->service_queue.nr_queued[rw])
+ if (sq_queued(&tg->service_queue, rw))
return false;
return tg_dispatch_time(tg, bio) == 0;
@@ -1711,7 +1728,7 @@ bool __blk_throtl_bio(struct bio *bio)
tg->bytes_disp[rw], bio->bi_iter.bi_size,
tg_bps_limit(tg, rw),
tg->io_disp[rw], tg_iops_limit(tg, rw),
- sq->nr_queued[READ], sq->nr_queued[WRITE]);
+ sq_queued(sq, READ), sq_queued(sq, WRITE));
td->nr_queued[rw]++;
throtl_add_bio_tg(bio, qn, tg);
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index 5257e5c053e6..04e92cfd0ab1 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -41,7 +41,8 @@ struct throtl_service_queue {
* children throtl_grp's.
*/
struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */
- unsigned int nr_queued[2]; /* number of queued bios */
+ unsigned int nr_queued_bps[2]; /* number of queued bps
bios */
+ unsigned int nr_queued_iops[2]; /* number of queued
iops bios */
/*
* RB tree of active children throtl_grp's, which are sorted by