2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/dev/virtio/virtqueue.c,v 1.2 2012/04/14 05:48:04 grehan Exp $
30 * Implements the virtqueue interface as basically described
31 * in the original VirtIO paper.
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/sglist.h>
41 #include <sys/spinlock.h>
42 #include <sys/spinlock2.h>
44 #include <machine/cpu.h>
45 #include <machine/atomic.h>
50 #include "virtqueue.h"
51 #include "virtio_ring.h"
53 #include "virtio_bus_if.h"
57 char vq_name[VIRTQUEUE_MAX_NAME_SZ];
58 uint16_t vq_queue_index;
62 #define VIRTQUEUE_FLAG_EVENT_IDX 0x0002
68 virtqueue_intr_t *vq_intrhand;
69 void *vq_intrhand_arg;
73 uint16_t vq_queued_cnt;
75 * Head of the free chain in the descriptor table. If
76 * there are no free descriptors, this will be set to
77 * VQ_RING_DESC_CHAIN_END.
79 uint16_t vq_desc_head_idx;
81 * Last consumed descriptor in the used table,
82 * trails vq_ring.used->idx.
84 uint16_t vq_used_cons_idx;
86 struct vq_desc_extra {
93 * The maximum virtqueue size is 2^15. Use that value as the end of
94 * descriptor chain terminator since it will never be a valid index
95 * in the descriptor table. This is used to verify we are correctly
96 * handling vq_free_cnt.
98 #define VQ_RING_DESC_CHAIN_END 32768
100 #define VQASSERT(_vq, _exp, _msg, ...) \
101 KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name, \
104 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx) \
105 VQASSERT((_vq), (_idx) < (_vq)->vq_nentries, \
106 "invalid ring index: %d, max: %d", (_idx), \
109 #define VQ_RING_ASSERT_CHAIN_TERM(_vq) \
110 VQASSERT((_vq), (_vq)->vq_desc_head_idx == \
111 VQ_RING_DESC_CHAIN_END, "full ring terminated " \
112 "incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
114 static void vq_ring_init(struct virtqueue *);
115 static void vq_ring_update_avail(struct virtqueue *, uint16_t);
116 static uint16_t vq_ring_enqueue_segments(struct virtqueue *,
117 struct vring_desc *, uint16_t, struct sglist *, int, int);
118 static int vq_ring_must_notify_host(struct virtqueue *);
119 static void vq_ring_notify_host(struct virtqueue *);
120 static void vq_ring_free_chain(struct virtqueue *, uint16_t);
123 virtqueue_filter_features(uint64_t features)
127 mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
128 mask |= VIRTIO_RING_F_EVENT_IDX;
130 return (features & mask);
134 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
135 vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
137 struct virtqueue *vq;
145 "virtqueue %d (%s) does not exist (size is zero)\n",
146 queue, info->vqai_name);
148 } else if (!powerof2(size)) {
150 "virtqueue %d (%s) size is not a power of 2: %d\n",
151 queue, info->vqai_name, size);
155 vq = kmalloc(sizeof(struct virtqueue) +
156 size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
158 device_printf(dev, "cannot allocate virtqueue\n");
163 strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
164 vq->vq_queue_index = queue;
165 vq->vq_alignment = align;
166 vq->vq_nentries = size;
167 vq->vq_free_cnt = size;
168 vq->vq_intrhand = info->vqai_intr;
169 vq->vq_intrhand_arg = info->vqai_intr_arg;
171 if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
172 vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;
174 vq->vq_ring_size = round_page(vring_size(size, align));
175 vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
176 M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
177 if (vq->vq_ring_mem == NULL) {
179 "cannot allocate memory for virtqueue ring\n");
185 virtqueue_disable_intr(vq);
197 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
199 struct vq_desc_extra *dxp;
202 if (vq->vq_nentries != size) {
203 device_printf(vq->vq_dev,
204 "%s: '%s' changed size; old=%hu, new=%hu\n",
205 __func__, vq->vq_name, vq->vq_nentries, size);
209 /* Warn if the virtqueue was not properly cleaned up. */
210 if (vq->vq_free_cnt != vq->vq_nentries) {
211 device_printf(vq->vq_dev,
212 "%s: warning, '%s' virtqueue not empty, "
213 "leaking %d entries\n", __func__, vq->vq_name,
214 vq->vq_nentries - vq->vq_free_cnt);
217 vq->vq_desc_head_idx = 0;
218 vq->vq_used_cons_idx = 0;
219 vq->vq_queued_cnt = 0;
220 vq->vq_free_cnt = vq->vq_nentries;
222 /* To be safe, reset all our allocated memory. */
223 bzero(vq->vq_ring_mem, vq->vq_ring_size);
224 for (i = 0; i < vq->vq_nentries; i++) {
225 dxp = &vq->vq_descx[i];
231 virtqueue_disable_intr(vq);
237 virtqueue_free(struct virtqueue *vq)
240 if (vq->vq_free_cnt != vq->vq_nentries) {
241 device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
242 "leaking %d entries\n", vq->vq_name,
243 vq->vq_nentries - vq->vq_free_cnt);
246 if (vq->vq_ring_mem != NULL) {
247 contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
248 vq->vq_ring_size = 0;
249 vq->vq_ring_mem = NULL;
256 virtqueue_paddr(struct virtqueue *vq)
258 return (vtophys(vq->vq_ring_mem));
262 virtqueue_size(struct virtqueue *vq)
264 return (vq->vq_nentries);
268 virtqueue_empty(struct virtqueue *vq)
271 return (vq->vq_nentries == vq->vq_free_cnt);
275 virtqueue_full(struct virtqueue *vq)
278 return (vq->vq_free_cnt == 0);
282 virtqueue_notify(struct virtqueue *vq, struct spinlock *interlock)
284 /* Ensure updated avail->idx is visible to host. */
287 if (vq_ring_must_notify_host(vq)) {
288 spin_unlock(interlock);
289 vq_ring_notify_host(vq);
290 spin_lock(interlock);
292 vq->vq_queued_cnt = 0;
296 virtqueue_nused(struct virtqueue *vq)
298 uint16_t used_idx, nused;
300 used_idx = vq->vq_ring.used->idx;
301 nused = (uint16_t)(used_idx - vq->vq_used_cons_idx);
302 VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
308 virtqueue_intr(struct virtqueue *vq)
311 if (vq->vq_intrhand == NULL ||
312 vq->vq_used_cons_idx == vq->vq_ring.used->idx)
315 vq->vq_intrhand(vq->vq_intrhand_arg);
321 * Enable interrupts on a given virtqueue. Returns 1 if there are
322 * additional entries to process on the virtqueue after we return.
325 virtqueue_enable_intr(struct virtqueue *vq)
328 * Enable interrupts, making sure we get the latest
329 * index of what's already been consumed.
331 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
332 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
333 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx;
335 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
341 * Additional items may have been consumed in the time between
342 * since we last checked and enabled interrupts above. Let our
343 * caller know so it processes the new entries.
345 if (vq->vq_used_cons_idx != vq->vq_ring.used->idx)
352 virtqueue_postpone_intr(struct virtqueue *vq)
357 * Postpone until at least half of the available descriptors
358 * have been consumed.
360 * XXX Adaptive factor? (Linux uses 3/4)
362 ndesc = (uint16_t)(vq->vq_ring.avail->idx - vq->vq_used_cons_idx) / 2;
364 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX)
365 vring_used_event(&vq->vq_ring) = vq->vq_used_cons_idx + ndesc;
367 vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
372 * Enough items may have already been consumed to meet our
373 * threshold since we last checked. Let our caller know so
374 * it processes the new entries.
376 if (virtqueue_nused(vq) > ndesc)
383 virtqueue_disable_intr(struct virtqueue *vq)
386 * Note this is only considered a hint to the host.
388 if ((vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) == 0)
389 vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
393 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
394 int readable, int writable)
396 struct vq_desc_extra *dxp;
398 uint16_t head_idx, idx;
400 needed = readable + writable;
402 VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
403 VQASSERT(vq, needed == sg->sg_nseg,
404 "segment count mismatch, %d, %d", needed, sg->sg_nseg);
408 if (vq->vq_free_cnt == 0)
410 if (vq->vq_free_cnt < needed)
413 head_idx = vq->vq_desc_head_idx;
414 VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
415 dxp = &vq->vq_descx[head_idx];
417 VQASSERT(vq, dxp->cookie == NULL,
418 "cookie already exists for index %d", head_idx);
419 dxp->cookie = cookie;
420 dxp->ndescs = needed;
422 idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
423 sg, readable, writable);
425 vq->vq_desc_head_idx = idx;
426 vq->vq_free_cnt -= needed;
427 if (vq->vq_free_cnt == 0)
428 VQ_RING_ASSERT_CHAIN_TERM(vq);
430 VQ_RING_ASSERT_VALID_IDX(vq, idx);
432 vq_ring_update_avail(vq, head_idx);
438 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
440 struct vring_used_elem *uep;
442 uint16_t used_idx, desc_idx;
444 if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
447 used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
448 uep = &vq->vq_ring.used->ring[used_idx];
451 desc_idx = (uint16_t) uep->id;
455 vq_ring_free_chain(vq, desc_idx);
457 cookie = vq->vq_descx[desc_idx].cookie;
458 VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
459 vq->vq_descx[desc_idx].cookie = NULL;
465 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
469 /* We only poll the virtqueue when dumping to virtio-blk */
470 while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
477 virtqueue_drain(struct virtqueue *vq, int *last)
485 while (idx < vq->vq_nentries && cookie == NULL) {
486 if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
487 vq->vq_descx[idx].cookie = NULL;
488 /* Free chain to keep free count consistent. */
489 vq_ring_free_chain(vq, idx);
500 virtqueue_dump(struct virtqueue *vq)
506 kprintf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
507 "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
508 "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
509 vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
510 virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
511 vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
512 vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
513 vq->vq_ring.used->flags);
517 vq_ring_init(struct virtqueue *vq)
523 ring_mem = vq->vq_ring_mem;
524 size = vq->vq_nentries;
527 vring_init(vr, size, ring_mem, vq->vq_alignment);
529 for (i = 0; i < size - 1; i++)
530 vr->desc[i].next = i + 1;
531 vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
535 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
540 * Place the head of the descriptor chain into the next slot and make
541 * it usable to the host. The chain is made available now rather than
542 * deferring to virtqueue_notify() in the hopes that if the host is
543 * currently running on another CPU, we can keep it processing the new
546 avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
547 vq->vq_ring.avail->ring[avail_idx] = desc_idx;
550 vq->vq_ring.avail->idx++;
552 /* Keep pending count until virtqueue_notify() for debugging. */
557 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
558 uint16_t head_idx, struct sglist *sg, int readable, int writable)
560 struct sglist_seg *seg;
561 struct vring_desc *dp;
565 needed = readable + writable;
567 for (i = 0, idx = head_idx, seg = sg->sg_segs;
569 i++, idx = dp->next, seg++) {
570 VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
571 "premature end of free desc chain");
574 dp->addr = seg->ss_paddr;
575 dp->len = seg->ss_len;
579 dp->flags |= VRING_DESC_F_NEXT;
581 dp->flags |= VRING_DESC_F_WRITE;
588 vq_ring_must_notify_host(struct virtqueue *vq)
590 uint16_t new_idx, prev_idx, event_idx;
592 if (vq->vq_flags & VIRTQUEUE_FLAG_EVENT_IDX) {
593 new_idx = vq->vq_ring.avail->idx;
594 prev_idx = new_idx - vq->vq_queued_cnt;
595 event_idx = vring_avail_event(&vq->vq_ring);
597 return (vring_need_event(event_idx, new_idx, prev_idx) != 0);
600 return ((vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0);
604 vq_ring_notify_host(struct virtqueue *vq)
606 VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
610 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
612 struct vring_desc *dp;
613 struct vq_desc_extra *dxp;
615 VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
616 dp = &vq->vq_ring.desc[desc_idx];
617 dxp = &vq->vq_descx[desc_idx];
619 if (vq->vq_free_cnt == 0)
620 VQ_RING_ASSERT_CHAIN_TERM(vq);
622 vq->vq_free_cnt += dxp->ndescs;
625 while (dp->flags & VRING_DESC_F_NEXT) {
626 VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
627 dp = &vq->vq_ring.desc[dp->next];
630 VQASSERT(vq, dxp->ndescs == 0, "failed to free entire desc chain");
633 * We must append the existing free chain, if any, to the end of
634 * newly freed chain. If the virtqueue was completely used, then
635 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
637 dp->next = vq->vq_desc_head_idx;
638 vq->vq_desc_head_idx = desc_idx;