DragonFly BSD
DragonFly kernel List (threaded) for 2011-06
[Date Prev][Date Next]  [Thread Prev][Thread Next]  [Date Index][Thread Index]

[gsoc] use of lwkt messages


From: Stéphanie Ouillon <stephanie@xxxxxxxxx>
Date: Thu, 23 Jun 2011 13:22:45 +0200

Hello,

I have been working on the virtio network device driver (see attach file)
I am using lwkt messages for dealing with vioif_rx_vq_done() and vioif_populate_rx_mbufs(), but I am not sure of it is okay as I did. I wanted to have an equivalent to software interrups which are used in the NetBSD driver.

Each time there is a free slot, vioif_rx_vq_done calls vioif_populate_rx_mbufs. I created a kthread to execute vioif_populate_rx_mbufs.
Using lwkt messages should allow to deal with several requests at a time (I read it was better than locking and unlocking the thread each time there is one call to vioif_populate_rx_mbufs).

1. I create one thread in virtio_net_attach. (l.481 in virtio-net.c)

    error = kthread_create(vioif_rx_thread, sc->dev, sc->sc_td,
            device_get_name(dev));
    if (error){
        kprintf("unable to create rx event thread.\n");
        goto err;
    }
    /* lwkt_initport_thread(&sc->sc_port, *(sc->sc_td)); not declared in any header ? */
    &sc->sc_port->mp_waitport(&sc->sc_port, 0);


2. Here is how I declare vioif_rx_thread (l.303 in virtio-net.c). I don't really know to declare the flags (value ?)

static void
vioif_rx_thread(void *arg){

    device_t dev = arg;
    struct vioif_net_softc *sc = device_get_softc(dev);
    device_t pdev = device_get_parent(dev);
    struct virtio_softc *vsc = device_get_softc(pdev);


    // FLAGS = ?
    while(lwkt_thread_waitport(&sc->sc_port, FLAGS)){

        vioif_populate_rx_mbufs(vsc);
        // int error = 0 ?
        lwkt_replymsg(&sc->sc_lmsg, 0);
    }

}

3. Here is when the lwkt message is initiated and sent (l.286 in virtio-net.c)

static int
vioif_rx_vq_done(struct virtqueue *vq){

    struct virtio_softc *vsc = vq->vq_owner;
    struct vioif_net_softc *sc = device_get_softc(vsc->sc_child);
    int r = 0;

    //flags = 0 ?
    lwkt_initmsg(sc->sc_lmsg, sc->sc_port, 0);

    r = vioif_rx_deq(&sc);
    if (r)
        lwkt_sendmsg(sc->sc_port, sc->sc_lmsg);

    return r;
}



In particular, how can I know how to declared flags, and with which value ?
Is the use of lwkt_thread_waitport() correct ?

Thank you


--
Stéphanie

/*
 * written: probe, attach
 * to be tested: probe, attach
 *
 * current: detach (entirely created)
 * next: vioif_rx_vq_done, vioif_tx_vq_done, vioif_ctrl_vq_done
 * virtio_start_vq_intr, virtio_stop_vq_intr, vioif_deferred_init
 * ifnet functions
 *
 * check if_attach and ether_ifattach
 *
 * */




/* $NetBSD$	*/

/*
 * Copyright (c) 2010 Minoura Makoto.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/uio.h>
#include <sys/fbio.h>
#include <sys/linker_set.h>
#include <sys/device.h>
#include <sys/thread2.h>
#include <sys/rman.h>
#include <sys/disk.h>
#include <sys/buf.h>
#include <sys/devicestat.h>
#include <sys/condvar.h>
//#include <sys/mutex2.h>
#include <sys/sockio.h>
#include <sys/resource.h>
#include <sys/types.h>

#include <bus/pci/pcivar.h>
#include <bus/pci/pcireg.h>
#include <sys/taskqueue.h>

#include <net/ethernet.h>
#include <sys/time.h>
#include <sys/socket.h>
#include <net/if.h>
#include <net/ifq_var.h>
#include <net/if_arp.h>

#include <sys/spinlock.h>
#include <sys/spinlock2.h>
#include <sys/kthread.h>
#include <sys/serialize.h>
#include <sys/msgport.h>
#include <sys/msgport2.h>



#include "virtiovar.h"
#include "virtioreg.h"

//y#define ether_sprintf(x) "<dummy>"
/*
 * if_vioifreg.h:
 */
/* Configuration registers */
#define VIRTIO_NET_CONFIG_MAC		0 /* 8bit x 6byte */
#define VIRTIO_NET_CONFIG_STATUS	6 /* 16bit */

/* Feature bits */
#define VIRTIO_NET_F_CSUM	(1<<0)
#define VIRTIO_NET_F_GUEST_CSUM	(1<<1)
#define VIRTIO_NET_F_MAC	(1<<5)
#define VIRTIO_NET_F_GSO	(1<<6)
#define VIRTIO_NET_F_GUEST_TSO4	(1<<7)
#define VIRTIO_NET_F_GUEST_TSO6	(1<<8)
#define VIRTIO_NET_F_GUEST_ECN	(1<<9)
#define VIRTIO_NET_F_GUEST_UFO	(1<<10)
#define VIRTIO_NET_F_HOST_TSO4	(1<<11)
#define VIRTIO_NET_F_HOST_TSO6	(1<<12)
#define VIRTIO_NET_F_HOST_ECN	(1<<13)
#define VIRTIO_NET_F_HOST_UFO	(1<<14)
#define VIRTIO_NET_F_MRG_RXBUF	(1<<15)
#define VIRTIO_NET_F_STATUS	(1<<16)
#define VIRTIO_NET_F_CTRL_VQ	(1<<17)
#define VIRTIO_NET_F_CTRL_RX	(1<<18)
#define VIRTIO_NET_F_CTRL_VLAN	(1<<19)

/* Status */
#define VIRTIO_NET_S_LINK_UP	1

/* Header Flags */
#define VIRTIO_NET_HDR_F_NEEDS_CSUM	1 /* flags */
#define VIRTIO_NET_HDR_GSO_NONE		0 /* gso_type */
#define VIRTIO_NET_HDR_GSO_TCPV4	1 /* gso_type */
#define VIRTIO_NET_HDR_GSO_UDP		3 /* gso_type */
#define VIRTIO_NET_HDR_GSO_TCPV6	4 /* gso_type */
#define VIRTIO_NET_HDR_GSO_ECN		0x80 /* gso_type, |'ed */

#define VIRTIO_NET_MAX_GSO_LEN		(65536+ETHER_HDR_LEN)
#define VIRTIO_NET_TX_MAXNSEGS		(16) /* XXX */
#define VIRTIO_NET_CTRL_MAC_MAXENTRIES	(64) /* XXX */


#define RX_VQ 0
#define TX_VQ 1
#define CTRL_VQ 2

#define NDEVNAMES	(sizeof(virtio_device_name)/sizeof(char*))
#define MINSEG_INDIRECT     2 /* use indirect if nsegs >= this value */

struct virtio_net_hdr {

	uint8_t		flags;
	uint8_t 	gso_type;
	uint16_t	hdr_len;
	uint16_t	gso_size;
	uint16_t	csum_start;
	uint16_t	csum_offset;

#if 0
	uint16_t	num_buffers;	/* if VIRTIO_NET_F_MRG_RXBUF enabled */
#endif

}__packed;

struct virtio_net_softc {
	device_t dev;
	struct virtio_softc *sc_virtio;
	struct virtqueue sc_vq[3];
	int sc_readonly;
	uint32_t sc_features;
	int maxxfersize;

	/* net specific */
	short sc_ifflags;
	uint8_t sc_mac[ETHER_ADDR_LEN];
	struct arpcom sc_arpcom;



	bus_dma_segment_t	sc_hdr_segs[1];
	struct virtio_net_hdr	*sc_hdrs;
	#define sc_rx_hdrs	sc_hdrs
	struct virtio_net_hdr	*sc_tx_hdrs;
	struct virtio_net_ctrl_cmd *sc_ctrl_cmd;
	struct virtio_net_ctrl_status *sc_ctrl_status;
	struct virtio_net_ctrl_rx *sc_ctrl_rx;
	struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_uc;
	struct virtio_net_ctrl_mac_tbl *sc_ctrl_mac_tbl_mc;

	/* kmem */
	bus_dmamap_t		*sc_arrays;
    #define sc_rxhdr_dmamaps sc_arrays
	bus_dmamap_t		*sc_txhdr_dmamaps;
	bus_dmamap_t		*sc_rx_dmamaps;
	bus_dmamap_t		*sc_tx_dmamaps;
	struct mbuf		**sc_rx_mbufs;
	struct mbuf		**sc_tx_mbufs;
	bus_dmamap_t		sc_ctrl_cmd_dmamap;
		bus_dmamap_t		sc_ctrl_status_dmamap;
		bus_dmamap_t		sc_ctrl_rx_dmamap;
		bus_dmamap_t		sc_ctrl_tbl_uc_dmamap;
		bus_dmamap_t		sc_ctrl_tbl_mc_dmamap;

		enum  {
			FREE, INUSE, DONE
		} sc_ctrl_inuse;
		//kcondvar_t		sc_ctrl_wait;
		//kmutex_t		sc_ctrl_wait_lock;
		struct spinlock sc_ctrl_wait_lock;
		lwkt_serialize_t sc_serializer;

		/* LWKT messages*/
		struct lwkt_msg	sc_lmsg;
		struct lwkt_port sc_port;
		thread_t *sc_td;

};

/* Declarations */

void virtio_net_identify(driver_t *driver, device_t parent);
static int virtio_net_attach(device_t dev);
static int virtio_net_detach(device_t dev);

/* ifnet interface functions */
static int	vioif_init(struct ifnet *);
static void	vioif_down(struct ifnet *, int);
static void	vioif_start(struct ifnet *);
static int	vioif_ioctl(struct ifnet *, u_long, void *);
static void	vioif_watchdog(struct ifnet *);

/* rx */
static int	vioif_add_rx_mbuf(struct vioif_softc *, int);
static void	vioif_free_rx_mbuf(struct vioif_softc *, int);
static void	vioif_populate_rx_mbufs(struct vioif_softc *);
static int	vioif_rx_deq(struct vioif_softc *);
static int	vioif_rx_vq_done(struct virtqueue *);
static void	vioif_rx_thread(void *);
static void	vioif_rx_drain(struct vioif_softc *);

/* tx */
static int	vioif_tx_vq_done(struct virtqueue *);
static void	vioif_tx_drain(struct vioif_softc *);

/* other control */
static int	vioif_updown(struct vioif_softc *, bool);
static int	vioif_ctrl_rx(struct vioif_softc *, int, bool);
static int	vioif_set_promisc(struct vioif_softc *, bool);
static int	vioif_set_allmulti(struct vioif_softc *, bool);
static int	vioif_set_rx_filter(struct vioif_softc *);
static int	vioif_rx_filter(struct vioif_softc *);
static int	vioif_ctrl_vq_done(struct virtqueue *);
static int  vioif_destroy_vq(struct virtio_net_softc *, struct virtio_softc *, int);



static int
vioif_init(struct ifnet *ifp){

	struct vioif_net_softc *sc = ifp->if_softc;

	vioif_down(ifp, 0);
	vioif_populate_rx_mbufs(&sc);
	vioif_updown;
    kprintf("%s\n",__FUNCTION__);

    return 0;
}

static void
vioif_down(struct ifnet *ifp, int){

    kprintf("%s\n",__FUNCTION__);
    return 0;
}

static void
vioif_start(struct ifnet *ifp){

    kprintf("%s\n",__FUNCTION__);
    return 0;
}

static int
vioif_ioctl(struct ifnet *ifp, u_long, void *){

    kprintf("%s\n",__FUNCTION__);
    return 0;
}

static void
vioif_watchdog(struct ifnet *ifp){

    kprintf("%s\n",__FUNCTION__);
    return;
}

static int
vioif_rx_vq_done(struct virtqueue *vq){

	struct virtio_softc *vsc = vq->vq_owner;
	struct vioif_net_softc *sc = device_get_softc(vsc->sc_child);
	int r = 0;

	//flags = 0 ?
	lwkt_initmsg(sc->sc_lmsg, sc->sc_port, 0);

	r = vioif_rx_deq(&sc);
	if (r)
		lwkt_sendmsg(sc->sc_port, sc->sc_lmsg);

	return r;
}

static void
vioif_rx_thread(void *arg){

	device_t dev = arg;
	struct vioif_net_softc *sc = device_get_softc(dev);
	device_t pdev = device_get_parent(dev);
	struct virtio_softc *vsc = device_get_softc(pdev);


	// FLAGS = ?
	while(lwkt_thread_waitport(&sc->sc_port, FLAGS)){

		vioif_populate_rx_mbufs(vsc);
		// int error = 0 ?
		lwkt_replymsg(&sc->sc_lmsg, 0);
	}

}


static int
virtio_net_probe(device_t dev)
{
	
	device_t pdev = device_get_parent(dev);

	if(pci_read_config(dev,PCIR_SUBDEV_0,2) == PCI_PRODUCT_VIRTIO_NETWORK) {
		debug("parent:%p is net\n", pdev);
	} else {
		debug("parent:%p is not net\n", pdev);
		return 1;
	}

	return 0;

}


static int
virtio_net_attach(device_t dev)
{

	struct virtio_net_softc *sc = device_get_softc(dev);
	device_t pdev = device_get_parent(dev);
	struct virtio_softc *vsc = device_get_softc(pdev);
	uint32_t features;
	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
	int error;
	//struct resource *io;

	debug("");

//	int  qsize;

	lwkt_serialize_init(sc->sc_serializer);
	sc->dev = dev;
	sc->sc_virtio = vsc;

	vsc->sc_vqs = &sc->sc_vq[RX_VQ];
	vsc->sc_config_change = 0;
	vsc->sc_child = dev;
	//vsc->sc_ipl = IPL_NET;
	//vsc->sc_ipl = 5 ;

	vsc->sc_config_change = 0; /* keep it?*/
	vsc->sc_intrhand = virtio_vq_intr;

	debug("sc_child is %p\n", vsc->sc_child);

	features = virtio_negotiate_feature(vsc,
						(VIRTIO_NET_F_MAC |
						 VIRTIO_NET_F_STATUS |
						 VIRTIO_NET_F_CTRL_VQ |
						 VIRTIO_NET_F_CTRL_RX |
						 VIRTIO_F_NOTIFY_ON_EMPTY));
		if (features & VIRTIO_NET_F_MAC) {
			sc->sc_mac[0] = virtio_read_device_config_1(vsc,
							    VIRTIO_NET_CONFIG_MAC+0);
			sc->sc_mac[1] = virtio_read_device_config_1(vsc,
							    VIRTIO_NET_CONFIG_MAC+1);
			sc->sc_mac[2] = virtio_read_device_config_1(vsc,
							    VIRTIO_NET_CONFIG_MAC+2);
			sc->sc_mac[3] = virtio_read_device_config_1(vsc,
							    VIRTIO_NET_CONFIG_MAC+3);
			sc->sc_mac[4] = virtio_read_device_config_1(vsc,
							    VIRTIO_NET_CONFIG_MAC+4);
			sc->sc_mac[5] = virtio_read_device_config_1(vsc,
							    VIRTIO_NET_CONFIG_MAC+5);
		} else {
			/* code stolen from sys/net/if_tap.c */
			struct timeval tv;
			uint32_t ui;
			getmicrouptime(&tv);
			ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff;
			memcpy(sc->sc_mac+3, (uint8_t *)&ui, 3);
			virtio_write_device_config_1(vsc,
						     VIRTIO_NET_CONFIG_MAC+0,
						     sc->sc_mac[0]);
			virtio_write_device_config_1(vsc,
						     VIRTIO_NET_CONFIG_MAC+1,
						     sc->sc_mac[1]);
			virtio_write_device_config_1(vsc,
						     VIRTIO_NET_CONFIG_MAC+2,
						     sc->sc_mac[2]);
			virtio_write_device_config_1(vsc,
						     VIRTIO_NET_CONFIG_MAC+3,
						     sc->sc_mac[3]);
			virtio_write_device_config_1(vsc,
						     VIRTIO_NET_CONFIG_MAC+4,
						     sc->sc_mac[4]);
			virtio_write_device_config_1(vsc,
						     VIRTIO_NET_CONFIG_MAC+5,
						     sc->sc_mac[5]);
		}

	kprintf(":Ethernet address %s\n", ether_sprintf(sc->sc_mac));

	kprintf("Attach started ->> %s\n",__FUNCTION__);

	/* Virtqueue allocation for the rx queue. */
	error = virtio_alloc_vq(vsc,&sc->sc_vq[RX_VQ],0,
				MCLBYTES+sizeof(struct virtio_net_hdr),2,
				"rx vq");
	if (error != 0)	{
		kprintf("Virtqueue allocation for rx failed\n");
		goto err;
	}
	vsc->sc_nvqs = 1;
	sc->sc_vq[RX_VQ].vq_done = vioif_rx_vq_done; /* rx interrupt*/

	/* Virtqueue allocation for the tx queue. */
	error = virtio_alloc_vq(vsc, &sc->sc_vq[TX_VQ], 1,
		    	(sizeof(struct virtio_net_hdr)
		    	+ (ETHER_MAX_LEN - ETHER_HDR_LEN)),
		    	VIRTIO_NET_TX_MAXNSEGS + 1,
		    	"tx vq");
	if (error != 0){
		kprintf("Virtqueue allocation for tx failed\n");
		goto err;
	}
	vsc->sc_nvqs = 2;
	sc->sc_vq[TX_VQ].vq_done = vioif_tx_vq_done; /* tx interrupt*/

	virtio_start_vq_intr(vsc, &sc->sc_vq[RX_VQ]);
	virtio_stop_vq_intr(vsc, &sc->sc_vq[TX_VQ]);


	/* Virtqueue allocation for the ctrl queue */
	if ((features & VIRTIO_NET_F_CTRL_VQ)
		&& (features & VIRTIO_NET_F_CTRL_RX)){ /* rx & ctrl queues */
		error = virtio_alloc_vq(vsc, &sc->sc_vq[CTRL_VQ], 2,
			    NBPG, 1, "control vq");

		if (error != 0){
			kprintf("Virtqueue allocation for control failed\n");
			goto err;
		}

		vsc->sc_nvqs = 3;
		sc->sc_vq[CTRL_VQ].vq_done = vioif_ctrl_vq_done;

		//cv_init(&sc->sc_ctrl_wait, "ctrl_vq");
		spin_init(&sc->sc_ctrl_wait_lock);

		sc->sc_ctrl_inuse = FREE;

		virtio_start_vq_intr(vsc, &sc->sc_vq[CTRL_VQ]);
	}

	/* Software interrupt <-> we create a kernel thread instead */

	/*sc->sc_rx_softint = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
						      vioif_rx_softint, sc);
	if (sc->sc_rx_softint == NULL) {
		kprintf("cannot establish softint\n");
		goto err;
	}*/

	error = kthread_create(vioif_rx_thread, sc->dev, sc->sc_td,
			device_get_name(dev));
	if (error){
		kprintf("unable to create rx event thread.\n");
		goto err;
	}
	//lwkt_initport_thread(&sc->sc_port, *(sc->sc_td));
	&sc->sc_port->mp_waitport(&sc->sc_port, 0);


	/* Memory allocation for the control queue (for virtio_softc) */
	if (vioif_alloc_mems(sc) < 0)
		goto err;

	if (vsc->sc_nvqs == 3)
		config_interrupts(dev, vioif_deferred_init);

	/* Interface for the device switch */
	strlcpy(ifp->if_xname, device_get_name(dev), IFNAMSIZ);
	ifp->if_softc = vsc;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_start = vioif_start;
	ifp->if_ioctl = vioif_ioctl;
	ifp->if_init = vioif_init;
	//doesn't exist in the ifnet structure
	//ifp->if_down = vioif_down;
	ifp->if_capabilities = 0;
	ifp->if_watchdog = vioif_watchdog;

	lwkt_serialize_enter(sc->sc_serializer);
	if_attach(ifp, sc_serializer);
	ether_ifattach(ifp, sc->sc_mac, sc->sc_serializer);

	kprintf("%s","CONFIG_DEVICE_STATUS_DRIVER");
	virtio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK);

    return 0;

err:
	kprintf("%s failure\n", __FUNCTION__);
	if (vsc->sc_nvqs == 3) {
		virtio_free_vq(vsc, &sc->sc_vq[CTRL_VQ]);
		//cv_destroy(&sc->sc_ctrl_wait);
		spin_uninit(&sc->sc_ctrl_wait_lock);
		vsc->sc_nvqs = 2;
	}
	if (vsc->sc_nvqs == 2) {
		virtio_free_vq(vsc, &sc->sc_vq[TX_VQ]);
		vsc->sc_nvqs = 1;
	}
	if (vsc->sc_nvqs == 1) {
		virtio_free_vq(vsc, &sc->sc_vq[RX_VQ]);
		vsc->sc_nvqs = 0;
	}
	vsc->sc_child = (void*)1;
	return 1;
}

static int
virtio_net_detach(device_t dev)
{
	kprintf("%s\n",__FUNCTION__);
	struct virtio_net_softc *sc = device_get_softc(dev);
	device_t pdev = device_get_parent(sc->dev);
	struct virtio_softc *vsc = device_get_softc(pdev);

	vioif_destroy_vq(sc, vsc, RX_VQ); /* destroy rx vq */
	vioif_destroy_vq(sc, vsc, TX_VQ); /* destroy tx vq */
	vioif_destroy_vq(sc, vsc, CTRL_VQ); /* destroy ctrl vq */

	/* anything else ? */
	lwkt_serialize_exit(sc->sc_serializer);

	return 0;
}

/* Unload and free &sc->sc_vq[number] */
static int
vioif_destroy_vq(virtio_net_softc *sc, virtio_softc *vsc, int numq ){

	struct virtqueue *vq = &sc->sc_vq[numq];
	int i;


	/*for (i=0; i<sc->sc_vq[number].vq_num; i++){
		struct virtio_blk_req *vr = &sc->sc_reqs[i];

		bus_dmamap_destroy(vsc->payloads_dmat, vr->payload_dmap);

		bus_dmamap_unload(vsc->requests_dmat, vr->cmd_dmap);
		bus_dmamap_destroy(vsc->requests_dmat, vr->cmd_dmap);
	}*/


	virtio_reset(vsc);
	virtio_free_vq(vsc, &sc->sc_vq[numq]);

	/*unload and free virtqueue*/
	kfree(vq->vq_entries, M_DEVBUF);
	bus_dmamap_unload(vq->vq_dmat, vq->vq_dmamap);
	bus_dammem_free(vq->vq_dmat, vq->vq_addr, vq->vq_dmamap);
	bus_dam_tag_destroy(vq->vq_dmat);
	memset(vq, 0, sizeof(*vq));

	/* free net-related stuff */

	return 0;
}

static device_method_t virtio_net_methods[] = {
	DEVMETHOD(device_probe,         virtio_net_probe),
	DEVMETHOD(device_attach,        virtio_net_attach),
	DEVMETHOD(device_detach,        virtio_net_detach),
	{ 0, 0 }
};

static driver_t virtio_net_driver = {
	"virtio_net",
	virtio_net_methods,
	sizeof(struct virtio_net_softc),
};

static devclass_t virtio_net_devclass;

DRIVER_MODULE(virtio_net, virtiobus, virtio_net_driver, virtio_net_devclass, 0, 0);
MODULE_DEPEND(virtio_net, virtiobus, 0, 0, 0);

/*
 * Copyright (c) 2010 Minoura Makoto.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 */

#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/uio.h>
#include <sys/fbio.h>
#include <sys/linker_set.h>
#include <sys/device.h>
#include <sys/thread2.h>
#include <sys/rman.h>
#include <sys/spinlock.h>
#include <sys/spinlock2.h>

#include <bus/pci/pcivar.h>
#include <bus/pci/pcireg.h>

#include "virtiovar.h"
#include "virtioreg.h"

static const char *virtio_device_name[] = {
	"Unknown (0)",	/* 0 */
	"Network",	/* 1 */
	"Block",	/* 2 */
	"Console",	/* 3 */
	"Entropy",	/* 4 */
	"Memory Balloon",	/* 5 */
	"Unknown (6)",	/* 6 */
	"Unknown (7)",	/* 7 */
	"Unknown (8)",	/* 8 */
	"9P Transport"	/* 9 */ 
};

#define NDEVNAMES	(sizeof(virtio_device_name)/sizeof(char*))
#define MINSEG_INDIRECT	2	/* use indirect if nsegs >= this value */
#define VIRTQUEUE_ALIGN(n)	(((n)+(VIRTIO_PAGE_SIZE-1))& \
				 ~(VIRTIO_PAGE_SIZE-1))
#define virtio_device_reset(sc)	virtio_set_status((sc), 0)

/*
 * Declarations
 */
static inline void      vq_sync_uring(struct virtio_softc *sc,
				      struct virtqueue *vq, int ops);
static inline void      vq_sync_aring(struct virtio_softc *sc,
				      struct virtqueue *vq, int ops);
static void             virtio_init_vq(struct virtio_softc *sc,
				       struct virtqueue *vq);
static void             virtio_helper(void *arg, bus_dma_segment_t *segs,
				      int nseg, int error);
static inline void      vq_sync_indirect(struct virtio_softc *sc,
					 struct virtqueue *vq, int slot, int ops);
static inline void      vq_sync_descs(struct virtio_softc *sc,
				      struct virtqueue *vq, int ops);
static void             vq_free_entry(struct virtqueue *vq,
				      struct vq_entry *qe);
static struct vq_entry *        vq_alloc_entry(struct virtqueue *vq); 
static int              virtio_probe(device_t dev);
static int              virtio_detach(device_t dev);
static int              virtio_intr(void *arg);
static int              virtio_attach(device_t dev);


void virtio_set_status(struct virtio_softc *sc, int status)
{
	int old = 0;

	if (status != 0)
		old = bus_space_read_1(sc->sc_iot, sc->sc_ioh,
				       VIRTIO_CONFIG_DEVICE_STATUS);

	bus_space_write_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_STATUS,
			  status|old);
}

/*
 * Reset the device:
 * To reset the device to a known state, do following:
 *	virtio_reset(sc); this will stop the device activity
 *	<dequeue finished requests>;  virtio_dequeue() still can be called
 *	<revoke pending requests in the vqs if any>;
 *	virtio_reinit_begin(sc);      dequeue prohibitted
 *	newfeatures = virtio_negotiate_features(sc, requestedfeatures);
 *	<some other initialization>;
 *	virtio_reinit_end(sc);	      device activated; enqueue allowed
 * Once attached, feature negotiation can only be allowed after virtio_reset.
 */
void
virtio_reset(struct virtio_softc *sc)
{
	virtio_device_reset(sc);
}

static inline void
vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
{
	bus_dmamap_sync(sc->requests_dmat, vq->vq_dmamap, ops);
}

static inline void
vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
{
	bus_dmamap_sync(sc->requests_dmat, vq->vq_dmamap, ops);
}

/*
 * Initialize vq structure.
 */
static void
virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
{
	int i, j;
	int vq_size = vq->vq_num;

	memset(vq->vq_vaddr, 0, vq->vq_bytesize);
	/* build the indirect descriptor chain */
	if (vq->vq_indirect != NULL) {
		struct vring_desc *vd;
		/*foo*/
		for (i = 0; i < vq_size; i++) {
			vd = vq->vq_indirect;
			vd += vq->vq_maxnsegs * i;
			for (j = 0; j < vq->vq_maxnsegs-1; j++)
				vd[j].next = j + 1;
		}
		MODULE_VERSION(virtiobus, 0);
	}

	/* free slot management */
	TAILQ_INIT(&vq->vq_freelist);
	for (i = 0; i < vq_size; i++) {
		TAILQ_INSERT_TAIL(&vq->vq_freelist, &vq->vq_entries[i],
				  qe_list); 
		vq->vq_entries[i].qe_index = i;
	} 
	spin_init(&vq->vq_freelist_lock);

	/* enqueue/dequeue status */
	vq->vq_avail_idx = 0;
	vq->vq_used_idx = 0;
	vq->vq_queued = 0;
	spin_init(&vq->vq_aring_lock);
	spin_init(&vq->vq_uring_lock);
	vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
	vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
	vq->vq_queued++;
}

static void
virtio_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
	struct virtqueue *vq = (struct virtqueue *) arg;
	debug("%s %u\n",__FUNCTION__,(uint)segs->ds_addr);

	vq->bus_addr = segs->ds_addr;
}

int
virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq)
{
	struct vq_entry *qe;
	int i = 0;

	/*
	 * device must be already deactivated
	 * confirm the vq is empty
	 */ 

	TAILQ_FOREACH(qe, &vq->vq_freelist, qe_list) {
		i++;
	}

	if (i != vq->vq_num) {
		kprintf("%s: freeing non-empty vq, index %d\n", __func__,
			vq->vq_index);
		return EBUSY;
	}

	/* tell device that there's no virtqueue any longer */
	bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT,
			  vq->vq_index);
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_ADDRESS, 0); 
	kfree(vq->vq_entries, M_DEVBUF);
	spin_uninit(&vq->vq_freelist_lock);
	spin_uninit(&vq->vq_uring_lock);
	spin_uninit(&vq->vq_aring_lock);

	return 0;
}

int
virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index,
		int maxsegsize, int maxnsegs, const char *name)
{
	int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
	int  r;
	int error;
	debug("ind:%d, %d %d\n",index,(unsigned int)sc->sc_iot,
	      (unsigned int)sc->sc_ioh);
	memset(vq, 0, sizeof(*vq));

	bus_space_write_2(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_SELECT,
			  index);

	vq_size = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 
				   VIRTIO_CONFIG_QUEUE_SIZE);
	if (vq_size == 0) {
		panic( "virtqueue not exist, index %d for %s\n", index, name);
	}

	/* allocsize1: descriptor table + avail ring + pad */
	allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size + 
				     sizeof(uint16_t)*(2+vq_size));

	/* allocsize2: used ring + pad */
	allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2 + 
				     sizeof(struct vring_used_elem)*vq_size);

	/* allocsize3: indirect table */
	if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
		allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
	else
		allocsize3 = 0;

	allocsize = allocsize1 + allocsize2 + allocsize3;
	debug("a1:%d a2:%d a3:%d a4:%d\n", allocsize1, allocsize2, allocsize3, 
	      allocsize);

	if (sc->virtio_dmat== NULL) {
		kprintf("dmat is null\n");
		return 1;
	}

	error = bus_dma_tag_create(sc->virtio_dmat, 
				   VIRTIO_PAGE_SIZE, 
				   0, 
				   BUS_SPACE_MAXADDR, 
				   BUS_SPACE_MAXADDR, 
				   NULL, NULL, 
				   allocsize,
				   1,
				   allocsize,
				   BUS_DMA_NOWAIT,
				   &vq->vq_dmat);

	if (error) {
		kprintf("could not allocate RX mbuf dma tag\n");
		return error;
	}


	if (bus_dmamem_alloc(vq->vq_dmat, (void **)&vq->vq_vaddr, 
			     BUS_DMA_NOWAIT,&vq->vq_dmamap)) {
		kprintf("bus_dmammem_load bad");
		return(ENOMEM);
	}

	if (bus_dmamap_load(vq->vq_dmat, vq->vq_dmamap, vq->vq_vaddr, allocsize,
			    virtio_helper, vq, BUS_DMA_NOWAIT) != 0) {
		kprintf("bus_dmamap_load bad");
	}

	/* set the vq address */
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_ADDRESS,
			  (vq->bus_addr / VIRTIO_PAGE_SIZE));

	/* remember addresses and offsets for later use */
	vq->vq_owner = sc;
	vq->vq_num = vq_size;
	vq->vq_index = index;
	vq->vq_desc = vq->vq_vaddr;
	vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
	vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
	vq->vq_usedoffset = allocsize1;
	vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
	if (allocsize3 > 0) {
		vq->vq_indirectoffset = allocsize1 + allocsize2; 
		vq->vq_indirect = (void*)(((char*)vq->vq_desc) + 
					  vq->vq_indirectoffset);
	}

	vq->vq_bytesize = allocsize;
	vq->vq_maxsegsize = maxsegsize;
	vq->vq_maxnsegs = maxnsegs;

	/* free slot management */
	vq->vq_entries = kmalloc(sizeof(struct vq_entry)*vq_size, M_DEVBUF, 
				 M_NOWAIT);
	if (vq->vq_entries == NULL) {
		r = ENOMEM;
		goto err;
	}

	virtio_init_vq(sc, vq);

	kprintf("allocated %u byte for virtqueue %d for %s, size %d\n", 
		allocsize, index, name, vq_size);
	if (allocsize3 > 0) {
		kprintf( "using %d byte (%d entries) indirect descriptors\n",
			 allocsize3, maxnsegs * vq_size);
	}
	return 0;


err:
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_QUEUE_ADDRESS, 0);
	if (vq->vq_entries) {
		kfree(vq->vq_entries,M_DEVBUF);
	}
	bus_dmamap_unload(vq->vq_dmat, vq->vq_dmamap);
	bus_dmamem_free(vq->vq_dmat, vq->vq_vaddr, vq->vq_dmamap);
	bus_dma_tag_destroy(vq->vq_dmat);
	memset(vq, 0, sizeof(*vq));

	return -1;
}

uint8_t
virtio_read_device_config_1(struct virtio_softc *sc, int index)
{
	return bus_space_read_1(sc->sc_iot, sc->sc_ioh, 
				sc->sc_config_offset + index);
}

uint16_t
virtio_read_device_config_2(struct virtio_softc *sc, int index)
{
	return bus_space_read_2(sc->sc_iot, sc->sc_ioh,
				sc->sc_config_offset + index);
}

uint32_t
virtio_read_device_config_4(struct virtio_softc *sc, int index)
{
	return bus_space_read_4(sc->sc_iot, sc->sc_ioh,
				sc->sc_config_offset + index);
}

uint64_t
virtio_read_device_config_8(struct virtio_softc *sc, int index)
{
	uint64_t r;

	r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 
			     sc->sc_config_offset + index + sizeof(uint32_t));
	r <<= 32;
	r += bus_space_read_4(sc->sc_iot, sc->sc_ioh,
			      sc->sc_config_offset + index);
	return r;
}

static inline void
vq_sync_indirect(struct virtio_softc *sc, struct virtqueue *vq, int slot,
		 int ops)
{
	bus_dmamap_sync(sc->requests_dmat, vq->vq_dmamap, ops);
}

/*
 * dmamap sync operations for a virtqueue.
 */
static inline void
vq_sync_descs(struct virtio_softc *sc, struct virtqueue *vq, int ops)
{
	bus_dmamap_sync(sc->requests_dmat, vq->vq_dmamap, ops);
}

static void
vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
{
	kprintf("call of q_free_entry(): vq_num=%u", vq->vq_num);
	spin_lock(&vq->vq_freelist_lock);
	TAILQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list);
	spin_unlock(&vq->vq_freelist_lock);

	return;
}


/*
 * enqueue_commit: add it to the aring.
 */
int
virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot,
		      bool notifynow)
{
	struct vq_entry *qe1;

	if (slot < 0) {
		spin_lock(&vq->vq_aring_lock);
		goto notify;
	}
	vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE);

	qe1 = &vq->vq_entries[slot];
	if (qe1->qe_indirect)
		vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);

	spin_lock(&vq->vq_aring_lock);
	vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot;

notify:
	if (notifynow) {
		vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
		vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);


		bus_space_barrier(sc->sc_iot, sc->sc_ioh, vq->vq_avail->idx, 2,
				  BUS_SPACE_BARRIER_WRITE);

		vq->vq_avail->idx = vq->vq_avail_idx;

		vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);    


		bus_space_barrier(sc->sc_iot, sc->sc_ioh, vq->vq_queued, 4,
				  BUS_SPACE_BARRIER_WRITE);

		vq->vq_queued++;

		vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);

		bus_space_barrier(sc->sc_iot, sc->sc_ioh, vq->vq_used->flags, 2,
				  BUS_SPACE_BARRIER_READ);

		if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
			bus_space_write_2(sc->sc_iot, sc->sc_ioh,
					  VIRTIO_CONFIG_QUEUE_NOTIFY,
					  vq->vq_index); 
		}
	}
	spin_unlock(&vq->vq_aring_lock);

	return 0;
}

/* 
 *  Free descriptor management.
 */
static struct vq_entry *
vq_alloc_entry(struct virtqueue *vq) {
	struct vq_entry *qe;

	spin_lock(&vq->vq_freelist_lock);
	if (TAILQ_EMPTY(&vq->vq_freelist)) {
		spin_unlock(&vq->vq_freelist_lock);
		return NULL; 
	}
	qe = TAILQ_FIRST(&vq->vq_freelist);
	TAILQ_REMOVE(&vq->vq_freelist, qe, qe_list);
	spin_unlock(&vq->vq_freelist_lock);

	return qe;
}

/*
 * enqueue_reserve: allocate remaining slots and build the descriptor chain.
 */
int
virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq, int slot,
		       int nsegs)
{
	int indirect;
	int i, s;
	struct vq_entry *qe1 = &vq->vq_entries[slot];
	struct vq_entry *qe;
	struct vring_desc *vd;

	KKASSERT(qe1->qe_next == -1);
	KKASSERT(1 <= nsegs && nsegs <= vq->vq_num);

	if ((vq->vq_indirect != NULL) &&
	    (nsegs >= MINSEG_INDIRECT) &&
	    (nsegs <= vq->vq_maxnsegs))
		indirect = 1;
	else
		indirect = 0;
	qe1->qe_indirect = indirect;

	if (indirect) {

		vd = &vq->vq_desc[qe1->qe_index];
		vd->addr = vq->bus_addr + vq->vq_indirectoffset;
		vd->addr += sizeof(struct vring_desc) * vq->vq_maxnsegs *
			    qe1->qe_index;
		vd->len = sizeof(struct vring_desc) * nsegs;
		vd->flags = VRING_DESC_F_INDIRECT;

		vd = vq->vq_indirect;
		vd += vq->vq_maxnsegs * qe1->qe_index;
		qe1->qe_desc_base = vd;
		for (i = 0; i < nsegs-1; i++) {
			vd[i].flags = VRING_DESC_F_NEXT;
		}
		vd[i].flags = 0;
		qe1->qe_next = 0;

		return 0;
	} else {
		vd = &vq->vq_desc[0];
		qe1->qe_desc_base = vd;
		qe1->qe_next = qe1->qe_index;
		s = slot;
		for (i = 0; i < nsegs - 1; i++) {
			qe = vq_alloc_entry(vq);
			if (qe == NULL) {
				vd[s].flags = 0;
				kprintf("here\n");
				return EAGAIN;
			}
			vd[s].flags = VRING_DESC_F_NEXT;
			vd[s].next = qe->qe_index;
			s = qe->qe_index;
		}
		vd[s].flags = 0;

		return 0;
	}
}

int
virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq, int slot,
		 bus_addr_t ds_addr, bus_size_t ds_len, bus_dmamap_t dmamap,
		 bus_addr_t start, bus_size_t len, bool write)
{
	struct vq_entry *qe1 = &vq->vq_entries[slot];
	struct vring_desc *vd = qe1->qe_desc_base;
	int s = qe1->qe_next;

	KKASSERT(s >= 0);
	debug("ds_len:%lu, start:%lu, len:%lu\n", ds_len, start, len);
	KKASSERT((ds_len > start) && (ds_len >= start + len));

	vd[s].addr = ds_addr + start;
	vd[s].len = len;
	if (!write)
		vd[s].flags |= VRING_DESC_F_WRITE;
	qe1->qe_next = vd[s].next;

	return 0;
}

/*
 * enqueue: enqueue a single dmamap.
 */
int
virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
	       bus_dma_segment_t *segs, int nseg, bus_dmamap_t dmamap, bool write)
{
	struct vq_entry *qe1 = &vq->vq_entries[slot];
	struct vring_desc *vd = qe1->qe_desc_base;
	int i;
	int s = qe1->qe_next;

	KKASSERT(s >= 0);
	for (i = 0; i < nseg; i++) {
		vd[s].addr = segs[i].ds_addr;
		vd[s].len = segs[i].ds_len;
		if (!write)
			vd[s].flags |= VRING_DESC_F_WRITE;
		debug("s:%d addr:0x%llu len:%lu\n", s, 
		      (unsigned long long)vd[s].addr,(unsigned long) vd[s].len);
		s = vd[s].next;
	}

	qe1->qe_next = s;

	return 0;
}

/*
 * enqueue_prep: allocate a slot number
 */
int
virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp)
{
	struct vq_entry *qe1;

	KKASSERT(slotp != NULL);

	qe1 = vq_alloc_entry(vq);
	if (qe1 == NULL)
		return EAGAIN;
	/* next slot is not allocated yet */
	qe1->qe_next = -1;
	*slotp = qe1->qe_index;

	return 0;
}

/*
 * Scan vq, bus_dmamap_sync for the vqs (not for the payload),
 * and calls (*vq_done)() if some entries are consumed.
 */
int
virtio_vq_intr(struct virtio_softc *sc)
{
	struct virtqueue *vq;
	int i, r = 0;

	for (i = 0; i < sc->sc_nvqs; i++) {
		vq = &sc->sc_vqs[i];
		if (vq->vq_queued) {
			vq->vq_queued = 0;
			vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
		}
		vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
		bus_space_barrier(sc->sc_iot, sc->sc_ioh, vq->vq_used_idx, 2, 
				  BUS_SPACE_BARRIER_READ);
		if (vq->vq_used_idx != vq->vq_used->idx) {
			if (vq->vq_done)
				r |= (vq->vq_done)(vq);
		}
	}

	return r;
}

/*
 * Dequeue a request: dequeue a request from uring; dmamap_sync for uring is 
 * already done in the interrupt handler.
 */
int
virtio_dequeue(struct virtio_softc *sc, struct virtqueue *vq, int *slotp,
	       int *lenp)
{
	uint16_t slot, usedidx;
	struct vq_entry *qe;

	if (vq->vq_used_idx == vq->vq_used->idx)
		return ENOENT;
	spin_lock(&vq->vq_uring_lock);
	usedidx = vq->vq_used_idx++;
	spin_unlock(&vq->vq_uring_lock);
	usedidx %= vq->vq_num;
	slot = vq->vq_used->ring[usedidx].id;
	qe = &vq->vq_entries[slot];

	if (qe->qe_indirect)
		vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE);

	if (slotp)
		*slotp = slot;
	if (lenp)
		*lenp = vq->vq_used->ring[usedidx].len;

	return 0;
}

/*
 * dequeue_commit: complete dequeue; the slot is recycled for future use. 
 * 	if you forget to call this the slot will be leaked.
 */
int
virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot)
{
	struct vq_entry *qe = &vq->vq_entries[slot];
	struct vring_desc *vd = &vq->vq_desc[0];
	int s = slot;

	while (vd[s].flags & VRING_DESC_F_NEXT) {
		//kprintf("vringdescnext\n");
		s = vd[s].next;
		vq_free_entry(vq, qe);
		qe = &vq->vq_entries[s];
	}
	vq_free_entry(vq, qe);

	return 0;
}
/*
 * Feature negotiation.
 */
uint32_t
virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
{

	uint32_t r;

	guest_features |= VIRTIO_F_RING_INDIRECT_DESC;

	r = bus_space_read_4(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_DEVICE_FEATURES);
	r &= guest_features;
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_GUEST_FEATURES, r);
	sc->sc_features = r;
	if (r & VIRTIO_F_RING_INDIRECT_DESC) {
		sc->sc_indirect = true;
	} else {
		sc->sc_indirect = false;
	}

	return r;

}


static int 
virtio_probe(device_t dev)
{
	uint32_t id = pci_get_device(dev);
	if (id >= 0x1000  && id <= 0x103f) {
		return 0;
	}

	return 1;
}

static int 
virtio_detach(device_t dev)
{   

	struct virtio_softc *sc = device_get_softc(dev);
	debug("");


	/*destroy parent DMA tag*/
	if (sc->virtio_dmat)
		bus_dma_tag_destroy(sc->virtio_dmat);

	/* disconnect the interrupt handler */
	if (sc->virtio_intr)
		bus_teardown_intr(sc->dev, sc->res_irq, sc->virtio_intr);

	if (sc->res_irq != NULL)
		bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res_irq);

	/* release the register window mapping */
	if (sc->io!= NULL)
		bus_release_resource(sc->dev, SYS_RES_IOPORT, PCIR_MAPS, sc->io);

	if (sc->sc_child) {
		debug("Deleting child\n");
		if (device_delete_child(sc->dev, sc->sc_child)!=0)
			debug("Couldn't delete child device\n");
	}
	return 0;
}

static int
virtio_intr(void *arg)
{ 
	struct virtio_softc *sc = arg;
	int isr, r = 0;

	/* check and ack the interrupt */
	isr = bus_space_read_1(sc->sc_iot, sc->sc_ioh, VIRTIO_CONFIG_ISR_STATUS);

	if (isr == 0)
		return 0;
	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
	    (sc->sc_config_change != NULL)) {
		kprintf("config change\n");
		r = (sc->sc_config_change)(sc);
	}

	if (sc->sc_intrhand != NULL) {
		r |= (sc->sc_intrhand)(sc);
	}

	return r;
};

static int 
virtio_attach(device_t dev)
{
	struct virtio_softc *sc = device_get_softc(dev);
	int rid, error;
	device_t child;
	sc->dev = dev;
	int virtio_type;
	rid = PCIR_BAR(0);
	sc->io = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
				    RF_ACTIVE);
	if (!sc->io) {
		device_printf(dev, "No I/O space?!\n");
		return ENOMEM;
	}

	sc->sc_iot = rman_get_bustag(sc->io);
	sc->sc_ioh = rman_get_bushandle(sc->io);
	sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
	sc->sc_config_change = 0;
	sc->sc_intrhand = virtio_vq_intr;

	virtio_type = pci_read_config(dev, PCIR_SUBDEV_0, 2);
	kprintf("Virtio %s Device (rev. 0x%02x) %p\n",
		(virtio_type<NDEVNAMES?
		 virtio_device_name[virtio_type]:"Unknown"),
		pci_read_config(dev, PCIR_REVID, 1),dev);

	sc->res_irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->rid_irq,
					     RF_SHAREABLE|RF_ACTIVE);
	if (sc->res_irq == NULL) {
		kprintf("Couldn't alloc res_irq\n");
	}
	error = bus_setup_intr(sc->dev,
			       sc->res_irq,
			       0, 
			       (driver_intr_t *)virtio_intr,
			       (void *)sc,
			       &(sc->virtio_intr), 
			       NULL);

	if (error) {
		kprintf("Couldn't setup intr\n");
		return(1);
	}

	virtio_device_reset(sc);
	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);

	error = bus_dma_tag_create(NULL, 1,
				   0,
				   BUS_SPACE_MAXADDR,
				   BUS_SPACE_MAXADDR,
				   NULL, NULL,
				   BUS_SPACE_MAXSIZE_32BIT,
				   0,
				   BUS_SPACE_MAXSIZE_32BIT,
				   0, &sc->virtio_dmat);
	if (error != 0) {
		goto handle_error;

	}

	if (virtio_type == PCI_PRODUCT_VIRTIO_NETWORK) {
		child = device_add_child(dev, "virtio_net",0);
	} else if (virtio_type == PCI_PRODUCT_VIRTIO_BLOCK) {
		child = device_add_child(dev, "virtio_blk",0);
	} else {
		kprintf("Dev %s not supported\n",
			virtio_device_name[virtio_type]); 
		goto handle_error;
	}
	return 0;

handle_error:
	if (sc->io) {
		bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), sc->io);
	}
	if (sc->res_irq) {
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->res_irq);
	}
	return 1;
}

static device_method_t virtio_methods[] = {
	DEVMETHOD(device_probe,         virtio_probe),
	DEVMETHOD(device_attach,        virtio_attach),
	DEVMETHOD(device_detach,        virtio_detach),
	{ 0, 0}
};

static driver_t virtio_driver = {
	"virtiobus",
	virtio_methods,
	sizeof(struct virtio_softc),
};

static devclass_t virtio_devclass;

DRIVER_MODULE(virtiobus, pci, virtio_driver, virtio_devclass, 0, 0);
MODULE_VERSION(virtiobus, 0);

/*
 * Copyright (c) 2010 Minoura Makoto.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/*
 * Part of the file derived from `Virtio PCI Card Specification v0.8.6 DRAFT'
 * Appendix A.
 */
/* An interface for efficient virtio implementation.
 *
 * This header is BSD licensed so anyone can use the definitions
 * to implement compatible drivers/servers.
 *
 * Copyright 2007, 2009, IBM Corporation
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the name of IBM nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 */

#ifndef _VIRTIOREG_H_
#define _VIRTIOREG_H_

#include <sys/types.h>

/* Virtio product id (subsystem) */
#define PCI_PRODUCT_VIRTIO_NETWORK		1
#define PCI_PRODUCT_VIRTIO_BLOCK		2
#define PCI_PRODUCT_VIRTIO_CONSOLE		3
#define PCI_PRODUCT_VIRTIO_ENTROPY		4
#define PCI_PRODUCT_VIRTIO_BALLOON		5
#define PCI_PRODUCT_VIRTIO_9P			9

/* Virtio header */
#define VIRTIO_CONFIG_DEVICE_FEATURES		0	/* 32bit */
#define VIRTIO_CONFIG_GUEST_FEATURES		4	/* 32bit */
#define VIRTIO_F_NOTIFY_ON_EMPTY		(1<<24)
#define VIRTIO_F_RING_INDIRECT_DESC		(1<<28)
#define VIRTIO_F_BAD_FEATURE			(1<<30)
#define VIRTIO_CONFIG_QUEUE_ADDRESS		8	/* 32bit */
#define VIRTIO_CONFIG_QUEUE_SIZE		12	/* 16bit */
#define VIRTIO_CONFIG_QUEUE_SELECT		14	/* 16bit */
#define VIRTIO_CONFIG_QUEUE_NOTIFY		16	/* 16bit */
#define VIRTIO_CONFIG_DEVICE_STATUS		18	/* 8bit */
#define VIRTIO_CONFIG_DEVICE_STATUS_RESET	0
#define VIRTIO_CONFIG_DEVICE_STATUS_ACK		1
#define VIRTIO_CONFIG_DEVICE_STATUS_DRIVER	2
#define VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK	4
#define VIRTIO_CONFIG_DEVICE_STATUS_FAILED	128
#define VIRTIO_CONFIG_ISR_STATUS		19 /* 8bit */
#define VIRTIO_CONFIG_ISR_CONFIG_CHANGE		2
#define VIRTIO_CONFIG_CONFIG_VECTOR		20 /* 16bit, optional */
#define VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI	20
#define VIRTIO_CONFIG_DEVICE_CONFIG_MSI		22

/*
* Virtqueue 
*/

/* This marks a buffer as continuing via the next field. */
#define VRING_DESC_F_NEXT		1

/* This marks a buffer as write-only (otherwise read-only). */
#define VRING_DESC_F_WRITE		2

/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT		4

/*
 * The Host uses this in used->flags to advise the Guest: don't kick me
 * when you add a buffer.  It's unreliable, so it's simply an
 * optimization.  Guest will still kick if it's out of buffers.
 */ 
#define VRING_USED_F_NO_NOTIFY		1

/*
 * The Guest uses this in avail->flags to advise the Host: don't
 * interrupt me when you consume a buffer.  It's unreliable, so it's
 * simply an optimization.
 */ 
#define VRING_AVAIL_F_NO_INTERRUPT	1

/*
 * Virtio ring descriptors: 16 bytes.
 * These can chain together via "next". 
 */ 
struct vring_desc {
	u_int64_t	addr;	/* Address (guest-physical). */
	u_int32_t	len;	/* Length. */
	u_int16_t	flags;	/* The flags as indicated above. */
	u_int16_t	next;	/* We chain unused descriptors via this, too */
} __packed;

struct vring_avail {
	u_int16_t	flags;
	u_int16_t	idx;
	u_int16_t	ring[0];
} __packed;

/* u32 is used here for ids for padding reasons. */
struct vring_used_elem {
	u_int32_t	id; /* Index of start of used descriptor chain. */
	u_int32_t	len; /* Tot len of the descriptor chain written to. */
} __packed;

struct vring_used {	
	u_int16_t	flags; 
	u_int16_t	idx; 
	struct vring_used_elem	ring[0];
} __packed;

#define VIRTIO_PAGE_SIZE	(4096)

#endif /* _VIRTIOREG_H_ */

/*
 * Copyright (c) 2010 Minoura Makoto.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/*
 * Part of the file derived from `Virtio PCI Card Specification v0.8.6 DRAFT'
 * Appendix A.
 */
/* An interface for efficient virtio implementation.
 *
 * This header is BSD licensed so anyone can use the definitions
 * to implement compatible drivers/servers.
 *
 * Copyright 2007, 2009, IBM Corporation
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the name of IBM nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 * 
 */

#ifndef _VIRTIOVAR_H_
#define _VIRTIOVAR_H_

/* change to VIRTIO_DEBUG for dmesg info*/
#define VIRTIO_DEBUG

#ifdef VIRTIO_DEBUG 
	#define debug(fmt, args...) do { kprintf("%s: " fmt, __func__ , ##args); } \
	while(0)
#else 
	#define debug( fmt, args...)
#endif

//#include "virtioreg.h"
struct vq_entry {
	TAILQ_ENTRY(vq_entry)	qe_list;	/* free list */ 
	uint16_t		qe_index;	/* index in vq_desc array */

	/* followings are used only when it is the `head' entry */ 
	int16_t			qe_next;	/* next enq slot */ 
	bool			qe_indirect;	/* 1 if using indirect */ 
	struct vring_desc	*qe_desc_base;
};

struct virtqueue {
	struct virtio_softc	*vq_owner; 
	u_int32_t		vq_num;	/* queue size (# of entries) */ 
	int32_t			vq_index;	/* queue number (0, 1, ...) */

	/* vring pointers (KVA) */
	struct vring_desc       *vq_desc;
	struct vring_avail      *vq_avail;
	struct vring_used       *vq_used;
	void			*vq_indirect;

	/* virtqueue allocation info */
	void			*vq_vaddr;
	int32_t			 vq_availoffset;
	int32_t			vq_usedoffset;
	int32_t			vq_indirectoffset;
	bus_dma_segment_t	vq_segs[1];
	u_int32_t		vq_bytesize;
	bus_dma_tag_t		vq_dmat;
	bus_dmamap_t		vq_dmamap;
	bus_addr_t		bus_addr;

	int32_t			vq_maxsegsize;
	int32_t			vq_maxnsegs;

	/* free entry management */
	struct vq_entry		*vq_entries;
	TAILQ_HEAD(, vq_entry)	vq_freelist;
	struct spinlock		vq_freelist_lock;

	/* enqueue/dequeue status */
	u_int16_t		vq_avail_idx;
	u_int16_t		vq_used_idx;
	int32_t			vq_queued;
	struct spinlock		vq_aring_lock;
	struct spinlock		vq_uring_lock;

	int (*vq_done)(struct virtqueue*);	/* interrupt handler */
};

struct virtio_softc {
	device_t		dev;
	int32_t			rid_ioport;
	int32_t			rid_memory;
	int32_t			rid_irq;

	int32_t			regs_rid;	/* resource id*/
	struct resource		*res_memory;	/* Resource for mem range. */
	struct resource		*res_irq;	/* Resource for irq range. */
	struct resource		*io; 

	bus_dma_tag_t		virtio_dmat;	/*Master tag*/

	int32_t			sc_config_offset;

	bus_space_tag_t		sc_iot;
	bus_space_handle_t	sc_ioh;

	int			sc_nvqs;	/* set by child */
	struct virtqueue	*sc_vqs;

	bus_dma_tag_t		requests_dmat;
	bus_dmamap_t		cmds_dmamap;
	bus_dma_tag_t		payloads_dmat;

	vm_paddr_t		phys_next;	/* next page from mem range */
	uint32_t		sc_features;
	bool			sc_indirect;
	int			sc_childdevid;
	device_t		sc_child;	/* set by child */
	void			*virtio_intr;

	int (*sc_config_change)(struct virtio_softc*);	/* set by child */
	int (*sc_intrhand)(struct virtio_softc*);	/* set by child */
};

/* The standard layout for the ring is a continuous chunk of memory which
 * looks like this.  We assume num is a power of 2.
 *
 * struct vring {
 * The actual descriptors (16 bytes each)
 *      struct vring_desc desc[num];
 *
 *      // A ring of available descriptor heads with free-running index.
 *      __u16 avail_flags;
 *      __u16 avail_idx;
 *      __u16 available[num];
 *
 *      // Padding to the next align boundary.
 *      char pad[];
 *
 *      // A ring of used descriptor heads with free-running index.
 *      __u16 used_flags;
 *      __u16 used_idx;
 *      struct vring_used_elem used[num];
 * };
 * Note: for virtio PCI, align is 4096.
 */

/* public interface */
uint32_t	virtio_negotiate_features(struct virtio_softc*, uint32_t);
void		virtio_set_status(struct virtio_softc *sc, int32_t );
uint8_t		virtio_read_device_config_1(struct virtio_softc *sc, int index);
uint16_t	virtio_read_device_config_2(struct virtio_softc *sc, int index);
uint32_t	virtio_read_device_config_4(struct virtio_softc *sc, int index);
uint64_t	virtio_read_device_config_8(struct virtio_softc *sc, int index);
void		virtio_write_device_config_1(struct virtio_softc *sc,
					     int32_t index, uint8_t value);

int	virtio_alloc_vq(struct virtio_softc*, struct virtqueue*, int, int, int,
		    const char*);
int	virtio_free_vq(struct virtio_softc*, struct virtqueue*);
void	virtio_reset(struct virtio_softc *);

int	virtio_enqueue_prep(struct virtio_softc*, struct virtqueue*, int*);
int	virtio_enqueue_p(struct virtio_softc *sc, struct virtqueue *vq,
			 int slot, bus_addr_t ds_addr, bus_size_t ds_len,
			 bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len,
			 bool write);
int	virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot,
		       bus_dma_segment_t *segs, int nseg, bus_dmamap_t dmamap, 
		       bool write);
int	virtio_enqueue_reserve(struct virtio_softc*, struct virtqueue*, int, int);
int	virtio_enqueue_commit(struct virtio_softc*, struct virtqueue*, int, bool);

int	virtio_dequeue_commit(struct virtio_softc*, struct virtqueue*, int);
int	virtio_dequeue(struct virtio_softc*, struct virtqueue*, int *, int *);

int	virtio_vq_intr(struct virtio_softc *);
void	virtio_stop_vq_intr(struct virtio_softc *, struct virtqueue *);
void	virtio_start_vq_intr(struct virtio_softc *, struct virtqueue *);

#endif /* _VIRTIOVAR_H_ */



[Date Prev][Date Next]  [Thread Prev][Thread Next]  [Date Index][Thread Index]