Factor out the core logic for updating last_used_idx to be reused by the packed in order implementation. Acked-by: Eugenio Pérez <eperezma@xxxxxxxxxx> Reviewed-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx> Signed-off-by: Jason Wang <jasowang@xxxxxxxxxx> --- drivers/virtio/virtio_ring.c | 43 +++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index b0eb68efb535..271508203443 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1751,6 +1751,30 @@ static bool more_used_packed(const struct vring_virtqueue *vq) return virtqueue_poll_packed(vq, READ_ONCE(vq->last_used_idx)); } +static void update_last_used_idx_packed(struct vring_virtqueue *vq, + u16 id, u16 last_used, + u16 used_wrap_counter) +{ + last_used += vq->packed.desc_state[id].num; + if (unlikely(last_used >= vq->packed.vring.num)) { + last_used -= vq->packed.vring.num; + used_wrap_counter ^= 1; + } + + last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); + WRITE_ONCE(vq->last_used_idx, last_used); + + /* + * If we expect an interrupt for the next entry, tell host + * by writing event index and flush out the write before + * the read in the next get_buf call. + */ + if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) + virtio_store_mb(vq->weak_barriers, + &vq->packed.vring.driver->off_wrap, + cpu_to_le16(vq->last_used_idx)); +} + static void *virtqueue_get_buf_ctx_packed(struct vring_virtqueue *vq, unsigned int *len, void **ctx) @@ -1794,24 +1818,7 @@ static void *virtqueue_get_buf_ctx_packed(struct vring_virtqueue *vq, ret = vq->packed.desc_state[id].data; detach_buf_packed(vq, id, ctx); - last_used += vq->packed.desc_state[id].num; - if (unlikely(last_used >= vq->packed.vring.num)) { - last_used -= vq->packed.vring.num; - used_wrap_counter ^= 1; - } - - last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR)); - WRITE_ONCE(vq->last_used_idx, last_used); - - /* - * If we expect an interrupt for the next entry, tell host - * by writing event index and flush out the write before - * the read in the next get_buf call. - */ - if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) - virtio_store_mb(vq->weak_barriers, - &vq->packed.vring.driver->off_wrap, - cpu_to_le16(vq->last_used_idx)); + update_last_used_idx_packed(vq, id, last_used, used_wrap_counter); LAST_ADD_TIME_INVALID(vq); -- 2.39.5