Because the NFS client will already happily handle misaligned O_DIRECT IO (by sending it out to NFSD via RPC) this commit's new capabilities are for the benefit of LOCALIO and require the nfs modparam: localio_O_DIRECT_align_misaligned_IO=Y When enabled, a misaligned DIO WRITE is split into a head, middle and tail as needed. The large middle extent is DIO-aligned and the head and/or tail are misaligned (due to each being a partial page). The misaligned head and/or tail extents are issued using buffered IO and the DIO-aligned middle is issued using O_DIRECT. Signed-off-by: Mike Snitzer <snitzer@xxxxxxxxxx> --- fs/nfs/direct.c | 40 +++++++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index fc011571c5d29..3803289a94793 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -963,8 +963,15 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, if (result < 0) break; - bytes = result; - npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; + /* Limit the amount of bytes serviced each iteration to aligned batches */ + if (pos < dreq->middle_offset && dreq->start_len) + bytes = min_t(size_t, dreq->start_len, result); + else if (pos < dreq->end_offset && dreq->middle_len) + bytes = min_t(size_t, dreq->middle_len, result); + else + bytes = result; + npages = (bytes + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; + for (i = 0; i < npages; i++) { struct nfs_page *req; unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); @@ -983,6 +990,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, } pgbase = 0; + result -= req_len; bytes -= req_len; requested_bytes += req_len; pos += req_len; @@ -992,9 +1000,28 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, continue; } + /* Issue IO if this req was the end of the start or middle */ + if (bytes == 0) { + if ((dreq->start_len && + pos == dreq->middle_offset && result >= dreq->middle_len) || + (dreq->end_len && + pos == dreq->end_offset && result == dreq->end_len)) + desc.pg_doio_now = 1; + } + nfs_lock_request(req); - if (nfs_pageio_add_request(&desc, req)) + if (nfs_pageio_add_request(&desc, req)) { + if (desc.pg_doio_now) { + /* Reset and handle iter to next aligned boundary */ + iov_iter_revert(iter, result); + desc.pg_doio_now = 0; + break; + } continue; + } + + if (unlikely(desc.pg_doio_now)) + desc.pg_doio_now = 0; /* Exit on hard errors */ if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) { @@ -1092,8 +1119,11 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, goto out; dreq->inode = inode; - dreq->max_count = count; - dreq->io_start = pos; + if (swap || !nfs_analyze_dio(pos, count, dreq)) { + dreq->max_count = count; + dreq->io_start = pos; + } + dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); l_ctx = nfs_get_lock_context(dreq->ctx); if (IS_ERR(l_ctx)) { -- 2.44.0