The buffer_migrate_folio_norefs() should avoid holding the spin lock held in order to ensure we can support large folios. The prior commit "fs/buffer: avoid races with folio migrations on __find_get_block_slow()" ripped out the only rationale for having the atomic context, so we can remove the spin lock call now. Signed-off-by: Luis Chamberlain <mcgrof@xxxxxxxxxx> --- mm/migrate.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 9d6f59cf77f8..439aaa610104 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -861,12 +861,12 @@ static int __buffer_migrate_folio(struct address_space *mapping, } bh = bh->b_this_page; } while (bh != head); + spin_unlock(&mapping->i_private_lock); if (busy) { if (invalidated) { rc = -EAGAIN; goto unlock_buffers; } - spin_unlock(&mapping->i_private_lock); invalidate_bh_lrus(); invalidated = true; goto recheck_buffers; @@ -884,8 +884,6 @@ static int __buffer_migrate_folio(struct address_space *mapping, } while (bh != head); unlock_buffers: - if (check_refs) - spin_unlock(&mapping->i_private_lock); bh = head; do { unlock_buffer(bh); -- 2.47.2