__getblk_slow() already implies failing a first lookup as the fastpath, so try to create the buffers immediately and avoid the redundant lookup. This saves 5-10% of the total cost/latency of the slowpath. Signed-off-by: Davidlohr Bueso <dave@xxxxxxxxxxxx> --- fs/buffer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/buffer.c b/fs/buffer.c index 5a4342881f3b..b02cced96529 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1139,15 +1139,15 @@ __getblk_slow(struct block_device *bdev, sector_t block, for (;;) { struct buffer_head *bh; + if (!grow_buffers(bdev, block, size, gfp)) + return NULL; + if (blocking) bh = __find_get_block_nonatomic(bdev, block, size); else bh = __find_get_block(bdev, block, size); if (bh) return bh; - - if (!grow_buffers(bdev, block, size, gfp)) - return NULL; } } -- 2.39.5