Hi,
在 2025/07/31 15:04, Damien Le Moal 写道:
On 7/31/25 3:32 PM, Yu Kuai wrote:
Hi,
在 2025/07/31 14:22, Damien Le Moal 写道:
On 7/31/25 3:20 PM, Hannes Reinecke wrote:
On 7/30/25 10:22, Yu Kuai wrote:
From: Yu Kuai <yukuai3@xxxxxxxxxx>
Replace the internal spinlock 'dd->lock' with the new spinlock in
elevator_queue, there are no functional changes.
Signed-off-by: Yu Kuai <yukuai3@xxxxxxxxxx>
---
block/mq-deadline.c | 58 +++++++++++++++++++++------------------------
1 file changed, 27 insertions(+), 31 deletions(-)
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 9ab6c6256695..2054c023e855 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -101,7 +101,7 @@ struct deadline_data {
u32 async_depth;
int prio_aging_expire;
- spinlock_t lock;
+ spinlock_t *lock;
};
/* Maps an I/O priority class to a deadline scheduler priority. */
@@ -213,7 +213,7 @@ static void dd_merged_requests(struct request_queue *q,
struct request *req,
const u8 ioprio_class = dd_rq_ioclass(next);
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
- lockdep_assert_held(&dd->lock);
+ lockdep_assert_held(dd->lock);
dd->per_prio[prio].stats.merged++;
@@ -253,7 +253,7 @@ static u32 dd_queued(struct deadline_data *dd, enum
dd_prio prio)
{
const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
- lockdep_assert_held(&dd->lock);
+ lockdep_assert_held(dd->lock);
return stats->inserted - atomic_read(&stats->completed);
}
@@ -323,7 +323,7 @@ static struct request *__dd_dispatch_request(struct
deadline_data *dd,
enum dd_prio prio;
u8 ioprio_class;
- lockdep_assert_held(&dd->lock);
+ lockdep_assert_held(dd->lock);
if (!list_empty(&per_prio->dispatch)) {
rq = list_first_entry(&per_prio->dispatch, struct request,
@@ -434,7 +434,7 @@ static struct request
*dd_dispatch_prio_aged_requests(struct deadline_data *dd,
enum dd_prio prio;
int prio_cnt;
- lockdep_assert_held(&dd->lock);
+ lockdep_assert_held(dd->lock);
prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd,
DD_BE_PRIO) +
!!dd_queued(dd, DD_IDLE_PRIO);
@@ -466,10 +466,9 @@ static struct request *dd_dispatch_request(struct
blk_mq_hw_ctx *hctx)
struct request *rq;
enum dd_prio prio;
- spin_lock(&dd->lock);
rq = dd_dispatch_prio_aged_requests(dd, now);
if (rq)
- goto unlock;
+ return rq;
/*
* Next, dispatch requests in priority order. Ignore lower priority
@@ -481,9 +480,6 @@ static struct request *dd_dispatch_request(struct
blk_mq_hw_ctx *hctx)
break;
}
-unlock:
- spin_unlock(&dd->lock);
-
return rq;
}
@@ -538,9 +534,9 @@ static void dd_exit_sched(struct elevator_queue *e)
WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
- spin_lock(&dd->lock);
+ spin_lock(dd->lock);
queued = dd_queued(dd, prio);
- spin_unlock(&dd->lock);
+ spin_unlock(dd->lock);
WARN_ONCE(queued != 0,
"statistics for priority %d: i %u m %u d %u c %u\n",
Do you still need 'dd->lock'? Can't you just refer to the lock from the
elevator_queue structure directly?
Indeed. Little inline helpers for locking/unlocking q->elevator->lock would be
nice.
How about the first patch to factor out inline helpers like dd_lock()
and dd_unlock(), still use dd->lock without any functional changes, and
then switch to use q->elevator->lock in the next patch? (same for bfq)
Patch one can introduce elv->lock and the helpers, then patch 2 use the helpers
to replace dd->lock. Just don't say "no functional change" in the commit
message and rather explain that things keep working the same way as before, but
using a different lock. That will address Bart's comment too.
And same for bfq in patch 3.
Ok, this is what I did in the first RFC version:
https://lore.kernel.org/all/20250530080355.1138759-3-yukuai1@xxxxxxxxxxxxxxx/
I somehow convince myself using dd->lock is better. :(
Will change this in the next version.
Thanks,
Kuai