Whoever originally wrote the virtio code quite rudely didn't configure their
editor to use spaces instead of tabs. This patch corrects this for all virtio
related files.
Signed-off-by: Anthony Liguori <[EMAIL PROTECTED]>
diff --git a/qemu/hw/virtio-balloon.c b/qemu/hw/virtio-balloon.c
index 111f27d..9b34e87 100644
--- a/qemu/hw/virtio-balloon.c
+++ b/qemu/hw/virtio-balloon.c
@@ -79,26 +79,26 @@ static void virtio_balloon_handle_output(VirtIODevice
*vdev, VirtQueue *vq)
VirtQueueElement elem;
while (virtqueue_pop(vq, &elem)) {
- size_t offset = 0;
- uint32_t pfn;
+ size_t offset = 0;
+ uint32_t pfn;
- while (memcpy_from_iovector(&pfn, offset, 4,
+ while (memcpy_from_iovector(&pfn, offset, 4,
elem.out_sg, elem.out_num) == 4) {
- ram_addr_t pa;
- ram_addr_t addr;
+ ram_addr_t pa;
+ ram_addr_t addr;
- pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT;
- offset += 4;
+ pa = (ram_addr_t)ldl_p(&pfn) << VIRTIO_BALLOON_PFN_SHIFT;
+ offset += 4;
- addr = cpu_get_physical_page_desc(pa);
- if ((addr & ~TARGET_PAGE_MASK) != IO_MEM_RAM)
- continue;
+ addr = cpu_get_physical_page_desc(pa);
+ if ((addr & ~TARGET_PAGE_MASK) != IO_MEM_RAM)
+ continue;
- balloon_page(phys_ram_base + addr, !!(vq == s->dvq));
- }
+ balloon_page(phys_ram_base + addr, !!(vq == s->dvq));
+ }
- virtqueue_push(vq, &elem, offset);
- virtio_notify(vdev, vq);
+ virtqueue_push(vq, &elem, offset);
+ virtio_notify(vdev, vq);
}
}
@@ -114,7 +114,7 @@ static void virtio_balloon_get_config(VirtIODevice *vdev,
uint8_t *config_data)
}
static void virtio_balloon_set_config(VirtIODevice *vdev,
- const uint8_t *config_data)
+ const uint8_t *config_data)
{
VirtIOBalloon *dev = to_virtio_balloon(vdev);
struct virtio_balloon_config config;
@@ -132,11 +132,11 @@ static ram_addr_t virtio_balloon_to_target(void *opaque,
ram_addr_t target)
VirtIOBalloon *dev = opaque;
if (target > ram_size)
- target = ram_size;
+ target = ram_size;
if (target) {
- dev->num_pages = (ram_size - target) >> VIRTIO_BALLOON_PFN_SHIFT;
- virtio_notify_config(&dev->vdev);
+ dev->num_pages = (ram_size - target) >> VIRTIO_BALLOON_PFN_SHIFT;
+ virtio_notify_config(&dev->vdev);
}
return ram_size - (dev->actual << VIRTIO_BALLOON_PFN_SHIFT);
@@ -172,10 +172,10 @@ void *virtio_balloon_init(PCIBus *bus)
VirtIOBalloon *s;
s = (VirtIOBalloon *)virtio_init_pci(bus, "virtio-balloon",
- 6900, 0x1002,
- 0, VIRTIO_ID_BALLOON,
- 0x05, 0x00, 0x00,
- 8, sizeof(VirtIOBalloon));
+ 6900, 0x1002,
+ 0, VIRTIO_ID_BALLOON,
+ 0x05, 0x00, 0x00,
+ 8, sizeof(VirtIOBalloon));
if (s == NULL)
return NULL;
diff --git a/qemu/hw/virtio-balloon.h b/qemu/hw/virtio-balloon.h
index 0466a48..45e4b03 100644
--- a/qemu/hw/virtio-balloon.h
+++ b/qemu/hw/virtio-balloon.h
@@ -18,10 +18,10 @@
/* from Linux's linux/virtio_balloon.h */
/* The ID for virtio_balloon */
-#define VIRTIO_ID_BALLOON 5
+#define VIRTIO_ID_BALLOON 5
/* The feature bitmap for virtio balloon */
-#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming
pages */
+#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
diff --git a/qemu/hw/virtio-blk.c b/qemu/hw/virtio-blk.c
index 727119b..c9a91ba 100644
--- a/qemu/hw/virtio-blk.c
+++ b/qemu/hw/virtio-blk.c
@@ -19,13 +19,13 @@
/* from Linux's linux/virtio_blk.h */
/* The ID for virtio_block */
-#define VIRTIO_ID_BLOCK 2
+#define VIRTIO_ID_BLOCK 2
/* Feature bits */
-#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
-#define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */
-#define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */
-#define VIRTIO_BLK_F_GEOMETRY 4 /* Indicates support of legacy geometry
*/
+#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */
+#define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */
+#define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */
+#define VIRTIO_BLK_F_GEOMETRY 4 /* Indicates support of legacy
geometry */
struct virtio_blk_config
{
@@ -38,14 +38,14 @@ struct virtio_blk_config
} __attribute__((packed));
/* These two define direction. */
-#define VIRTIO_BLK_T_IN 0
-#define VIRTIO_BLK_T_OUT 1
+#define VIRTIO_BLK_T_IN 0
+#define VIRTIO_BLK_T_OUT 1
/* This bit says it's a scsi command, not an actual read or write. */
-#define VIRTIO_BLK_T_SCSI_CMD 2
+#define VIRTIO_BLK_T_SCSI_CMD 2
/* Barrier before this op. */
-#define VIRTIO_BLK_T_BARRIER 0x80000000
+#define VIRTIO_BLK_T_BARRIER 0x80000000
/* This is the first element of the read scatter-gather list. */
struct virtio_blk_outhdr
@@ -58,9 +58,9 @@ struct virtio_blk_outhdr
uint64_t sector;
};
-#define VIRTIO_BLK_S_OK 0
-#define VIRTIO_BLK_S_IOERR 1
-#define VIRTIO_BLK_S_UNSUPP 2
+#define VIRTIO_BLK_S_OK 0
+#define VIRTIO_BLK_S_IOERR 1
+#define VIRTIO_BLK_S_UNSUPP 2
/* This is the first element of the write scatter-gather list */
struct virtio_blk_inhdr
@@ -97,21 +97,21 @@ static void virtio_blk_rw_complete(void *opaque, int ret)
/* Copy read data to the guest */
if (!ret && !(req->out->type & VIRTIO_BLK_T_OUT)) {
- size_t offset = 0;
- int i;
+ size_t offset = 0;
+ int i;
- for (i = 0; i < req->elem.in_num - 1; i++) {
- size_t len;
+ for (i = 0; i < req->elem.in_num - 1; i++) {
+ size_t len;
- /* Be pretty defensive wrt malicious guests */
- len = MIN(req->elem.in_sg[i].iov_len,
- req->size - offset);
+ /* Be pretty defensive wrt malicious guests */
+ len = MIN(req->elem.in_sg[i].iov_len,
+ req->size - offset);
- memcpy(req->elem.in_sg[i].iov_base,
- req->buffer + offset,
- len);
- offset += len;
- }
+ memcpy(req->elem.in_sg[i].iov_base,
+ req->buffer + offset,
+ len);
+ offset += len;
+ }
}
req->in->status = ret ? VIRTIO_BLK_S_IOERR : VIRTIO_BLK_S_OK;
@@ -128,12 +128,12 @@ static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock
*s)
req = qemu_mallocz(sizeof(*req));
if (req == NULL)
- return NULL;
+ return NULL;
req->dev = s;
if (!virtqueue_pop(s->vq, &req->elem)) {
- qemu_free(req);
- return NULL;
+ qemu_free(req);
+ return NULL;
}
return req;
@@ -145,76 +145,76 @@ static void virtio_blk_handle_output(VirtIODevice *vdev,
VirtQueue *vq)
VirtIOBlockReq *req;
while ((req = virtio_blk_get_request(s))) {
- int i;
-
- if (req->elem.out_num < 1 || req->elem.in_num < 1) {
- fprintf(stderr, "virtio-blk missing headers\n");
- exit(1);
- }
-
- if (req->elem.out_sg[0].iov_len < sizeof(*req->out) ||
- req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) {
- fprintf(stderr, "virtio-blk header not in correct element\n");
- exit(1);
- }
-
- req->out = (void *)req->elem.out_sg[0].iov_base;
- req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base;
-
- if (req->out->type & VIRTIO_BLK_T_SCSI_CMD) {
- unsigned int len = sizeof(*req->in);
-
- req->in->status = VIRTIO_BLK_S_UNSUPP;
- virtqueue_push(vq, &req->elem, len);
- virtio_notify(vdev, vq);
- qemu_free(req);
- } else if (req->out->type & VIRTIO_BLK_T_OUT) {
- size_t offset;
-
- for (i = 1; i < req->elem.out_num; i++)
- req->size += req->elem.out_sg[i].iov_len;
-
- req->buffer = qemu_memalign(512, req->size);
- if (req->buffer == NULL) {
- qemu_free(req);
- break;
- }
-
- /* We copy the data from the SG list to avoid splitting up the
request. This helps
- performance a lot until we can pass full sg lists as AIO
operations */
- offset = 0;
- for (i = 1; i < req->elem.out_num; i++) {
- size_t len;
-
- len = MIN(req->elem.out_sg[i].iov_len,
- req->size - offset);
- memcpy(req->buffer + offset,
- req->elem.out_sg[i].iov_base,
- len);
- offset += len;
- }
-
- bdrv_aio_write(s->bs, req->out->sector,
- req->buffer,
- req->size / 512,
- virtio_blk_rw_complete,
- req);
- } else {
- for (i = 0; i < req->elem.in_num - 1; i++)
- req->size += req->elem.in_sg[i].iov_len;
-
- req->buffer = qemu_memalign(512, req->size);
- if (req->buffer == NULL) {
- qemu_free(req);
- break;
- }
-
- bdrv_aio_read(s->bs, req->out->sector,
- req->buffer,
- req->size / 512,
- virtio_blk_rw_complete,
- req);
- }
+ int i;
+
+ if (req->elem.out_num < 1 || req->elem.in_num < 1) {
+ fprintf(stderr, "virtio-blk missing headers\n");
+ exit(1);
+ }
+
+ if (req->elem.out_sg[0].iov_len < sizeof(*req->out) ||
+ req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) {
+ fprintf(stderr, "virtio-blk header not in correct element\n");
+ exit(1);
+ }
+
+ req->out = (void *)req->elem.out_sg[0].iov_base;
+ req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base;
+
+ if (req->out->type & VIRTIO_BLK_T_SCSI_CMD) {
+ unsigned int len = sizeof(*req->in);
+
+ req->in->status = VIRTIO_BLK_S_UNSUPP;
+ virtqueue_push(vq, &req->elem, len);
+ virtio_notify(vdev, vq);
+ qemu_free(req);
+ } else if (req->out->type & VIRTIO_BLK_T_OUT) {
+ size_t offset;
+
+ for (i = 1; i < req->elem.out_num; i++)
+ req->size += req->elem.out_sg[i].iov_len;
+
+ req->buffer = qemu_memalign(512, req->size);
+ if (req->buffer == NULL) {
+ qemu_free(req);
+ break;
+ }
+
+ /* We copy the data from the SG list to avoid splitting up the
request. This helps
+ performance a lot until we can pass full sg lists as AIO
operations */
+ offset = 0;
+ for (i = 1; i < req->elem.out_num; i++) {
+ size_t len;
+
+ len = MIN(req->elem.out_sg[i].iov_len,
+ req->size - offset);
+ memcpy(req->buffer + offset,
+ req->elem.out_sg[i].iov_base,
+ len);
+ offset += len;
+ }
+
+ bdrv_aio_write(s->bs, req->out->sector,
+ req->buffer,
+ req->size / 512,
+ virtio_blk_rw_complete,
+ req);
+ } else {
+ for (i = 0; i < req->elem.in_num - 1; i++)
+ req->size += req->elem.in_sg[i].iov_len;
+
+ req->buffer = qemu_memalign(512, req->size);
+ if (req->buffer == NULL) {
+ qemu_free(req);
+ break;
+ }
+
+ bdrv_aio_read(s->bs, req->out->sector,
+ req->buffer,
+ req->size / 512,
+ virtio_blk_rw_complete,
+ req);
+ }
}
/*
* FIXME: Want to check for completions before returning to guest mode,
@@ -265,7 +265,7 @@ static int virtio_blk_load(QEMUFile *f, void *opaque, int
version_id)
VirtIOBlock *s = opaque;
if (version_id != 1)
- return -EINVAL;
+ return -EINVAL;
virtio_load(&s->vdev, f);
@@ -273,18 +273,18 @@ static int virtio_blk_load(QEMUFile *f, void *opaque, int
version_id)
}
void *virtio_blk_init(PCIBus *bus, uint16_t vendor, uint16_t device,
- BlockDriverState *bs)
+ BlockDriverState *bs)
{
VirtIOBlock *s;
int cylinders, heads, secs;
static int virtio_blk_id;
s = (VirtIOBlock *)virtio_init_pci(bus, "virtio-blk", vendor, device,
- 0, VIRTIO_ID_BLOCK,
- 0x01, 0x80, 0x00,
- sizeof(struct virtio_blk_config),
sizeof(VirtIOBlock));
+ 0, VIRTIO_ID_BLOCK,
+ 0x01, 0x80, 0x00,
+ sizeof(struct virtio_blk_config),
sizeof(VirtIOBlock));
if (!s)
- return NULL;
+ return NULL;
s->vdev.get_config = virtio_blk_update_config;
s->vdev.get_features = virtio_blk_get_features;
@@ -297,7 +297,7 @@ void *virtio_blk_init(PCIBus *bus, uint16_t vendor,
uint16_t device,
s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output);
register_savevm("virtio-blk", virtio_blk_id++, 1,
- virtio_blk_save, virtio_blk_load, s);
+ virtio_blk_save, virtio_blk_load, s);
return s;
}
diff --git a/qemu/hw/virtio-net.c b/qemu/hw/virtio-net.c
index b5d5f9e..b179fcc 100644
--- a/qemu/hw/virtio-net.c
+++ b/qemu/hw/virtio-net.c
@@ -19,27 +19,27 @@
/* from Linux's virtio_net.h */
/* The ID for virtio_net */
-#define VIRTIO_ID_NET 1
+#define VIRTIO_ID_NET 1
/* The feature bitmap for virtio net */
-#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
-#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/
partial csum */
-#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
-#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
-#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
-#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
-#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
-#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
-#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
-#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
-#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
-#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
-#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
+#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
+#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum
*/
+#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
+#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */
+#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
+#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
+#define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in.
*/
+#define VIRTIO_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
+#define VIRTIO_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
+#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
+#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
+#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
#define TX_TIMER_INTERVAL 150000 /* 150 us */
/* Maximum packet size we can receive from tap device: header + 64k */
-#define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 << 10))
+#define VIRTIO_NET_MAX_BUFSIZE (sizeof(struct virtio_net_hdr) + (64 << 10))
/* The config defining mac address (6 bytes) */
struct virtio_net_config
@@ -51,13 +51,13 @@ struct virtio_net_config
* specify GSO or CSUM features, you can simply ignore the header. */
struct virtio_net_hdr
{
-#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
+#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
uint8_t flags;
-#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame
-#define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO)
-#define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO)
-#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP
-#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set
+#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame
+#define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO)
+#define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO)
+#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP
+#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set
uint8_t gso_type;
uint16_t hdr_len;
uint16_t gso_size;
@@ -110,17 +110,17 @@ static uint32_t virtio_net_get_features(VirtIODevice
*vdev)
uint32_t features = (1 << VIRTIO_NET_F_MAC);
if (tap_has_vnet_hdr(host)) {
- tap_using_vnet_hdr(host, 1);
- features |= (1 << VIRTIO_NET_F_CSUM);
- features |= (1 << VIRTIO_NET_F_GUEST_CSUM);
- features |= (1 << VIRTIO_NET_F_GUEST_TSO4);
- features |= (1 << VIRTIO_NET_F_GUEST_TSO6);
- features |= (1 << VIRTIO_NET_F_GUEST_ECN);
- features |= (1 << VIRTIO_NET_F_HOST_TSO4);
- features |= (1 << VIRTIO_NET_F_HOST_TSO6);
- features |= (1 << VIRTIO_NET_F_HOST_ECN);
- features |= (1 << VIRTIO_NET_F_MRG_RXBUF);
- /* Kernel can't actually handle UFO in software currently. */
+ tap_using_vnet_hdr(host, 1);
+ features |= (1 << VIRTIO_NET_F_CSUM);
+ features |= (1 << VIRTIO_NET_F_GUEST_CSUM);
+ features |= (1 << VIRTIO_NET_F_GUEST_TSO4);
+ features |= (1 << VIRTIO_NET_F_GUEST_TSO6);
+ features |= (1 << VIRTIO_NET_F_GUEST_ECN);
+ features |= (1 << VIRTIO_NET_F_HOST_TSO4);
+ features |= (1 << VIRTIO_NET_F_HOST_TSO6);
+ features |= (1 << VIRTIO_NET_F_HOST_ECN);
+ features |= (1 << VIRTIO_NET_F_MRG_RXBUF);
+ /* Kernel can't actually handle UFO in software currently. */
}
return features;
@@ -134,13 +134,13 @@ static void virtio_net_set_features(VirtIODevice *vdev,
uint32_t features)
n->mergeable_rx_bufs = !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF));
if (!tap_has_vnet_hdr(host) || !host->set_offload)
- return;
+ return;
host->set_offload(host,
- (features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
- (features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
- (features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
- (features >> VIRTIO_NET_F_GUEST_ECN) & 1);
+ (features >> VIRTIO_NET_F_GUEST_CSUM) & 1,
+ (features >> VIRTIO_NET_F_GUEST_TSO4) & 1,
+ (features >> VIRTIO_NET_F_GUEST_TSO6) & 1,
+ (features >> VIRTIO_NET_F_GUEST_ECN) & 1);
}
/* RX */
@@ -150,7 +150,7 @@ static void virtio_net_handle_rx(VirtIODevice *vdev,
VirtQueue *vq)
/* We now have RX buffers, signal to the IO thread to break out of the
select to re-poll the tap file descriptor */
if (kvm_enabled())
- qemu_kvm_notify_work();
+ qemu_kvm_notify_work();
}
static int virtio_net_can_receive(void *opaque)
@@ -158,14 +158,14 @@ static int virtio_net_can_receive(void *opaque)
VirtIONet *n = opaque;
if (n->rx_vq->vring.avail == NULL ||
- !(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
- return 0;
+ !(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
+ return 0;
if (n->rx_vq->vring.avail->idx == n->rx_vq->last_avail_idx ||
- (n->mergeable_rx_bufs &&
- !virtqueue_avail_bytes(n->rx_vq, VIRTIO_NET_MAX_BUFSIZE, 0))) {
- n->rx_vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
- return 0;
+ (n->mergeable_rx_bufs &&
+ !virtqueue_avail_bytes(n->rx_vq, VIRTIO_NET_MAX_BUFSIZE, 0))) {
+ n->rx_vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
+ return 0;
}
n->rx_vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
@@ -206,17 +206,17 @@ static int iov_fill(struct iovec *iov, int iovcnt, const
void *buf, int count)
offset = i = 0;
while (offset < count && i < iovcnt) {
- int len = MIN(iov[i].iov_len, count - offset);
- memcpy(iov[i].iov_base, buf + offset, len);
- offset += len;
- i++;
+ int len = MIN(iov[i].iov_len, count - offset);
+ memcpy(iov[i].iov_base, buf + offset, len);
+ offset += len;
+ i++;
}
return offset;
}
static int receive_header(VirtIONet *n, struct iovec *iov, int iovcnt,
- const void *buf, int size, int hdr_len)
+ const void *buf, int size, int hdr_len)
{
struct virtio_net_hdr *hdr = iov[0].iov_base;
int offset;
@@ -225,9 +225,9 @@ static int receive_header(VirtIONet *n, struct iovec *iov,
int iovcnt,
hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
if (tap_has_vnet_hdr(n->vc->vlan->first_client)) {
- memcpy(hdr, buf, sizeof(*hdr));
- offset = sizeof(*hdr);
- work_around_broken_dhclient(hdr, buf + offset, size - offset);
+ memcpy(hdr, buf, sizeof(*hdr));
+ offset = sizeof(*hdr);
+ work_around_broken_dhclient(hdr, buf + offset, size - offset);
}
/* We only ever receive a struct virtio_net_hdr from the tapfd,
@@ -247,56 +247,56 @@ static void virtio_net_receive(void *opaque, const
uint8_t *buf, int size)
/* hdr_len refers to the header we supply to the guest */
hdr_len = n->mergeable_rx_bufs ?
- sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
+ sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct
virtio_net_hdr);
offset = i = 0;
while (offset < size) {
- VirtQueueElement elem;
- int len, total;
-
- len = total = 0;
-
- if ((i != 0 && !n->mergeable_rx_bufs) ||
- virtqueue_pop(n->rx_vq, &elem) == 0) {
- if (i == 0)
- return;
- fprintf(stderr, "virtio-net truncating packet\n");
- exit(1);
- }
-
- if (elem.in_num < 1) {
- fprintf(stderr, "virtio-net receive queue contains no in
buffers\n");
- exit(1);
- }
-
- if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) {
- fprintf(stderr, "virtio-net header not in first element\n");
- exit(1);
- }
-
- if (i == 0) {
- if (n->mergeable_rx_bufs)
- mhdr = (struct virtio_net_hdr_mrg_rxbuf
*)elem.in_sg[0].iov_base;
-
- offset += receive_header(n, &elem.in_sg[0], elem.in_num,
- buf + offset, size - offset, hdr_len);
- total += hdr_len;
- }
-
- /* copy in packet. ugh */
- len = iov_fill(&elem.in_sg[0], elem.in_num,
- buf + offset, size - offset);
- total += len;
-
- /* signal other side */
- virtqueue_fill(n->rx_vq, &elem, total, i++);
-
- offset += len;
+ VirtQueueElement elem;
+ int len, total;
+
+ len = total = 0;
+
+ if ((i != 0 && !n->mergeable_rx_bufs) ||
+ virtqueue_pop(n->rx_vq, &elem) == 0) {
+ if (i == 0)
+ return;
+ fprintf(stderr, "virtio-net truncating packet\n");
+ exit(1);
+ }
+
+ if (elem.in_num < 1) {
+ fprintf(stderr, "virtio-net receive queue contains no in
buffers\n");
+ exit(1);
+ }
+
+ if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) {
+ fprintf(stderr, "virtio-net header not in first element\n");
+ exit(1);
+ }
+
+ if (i == 0) {
+ if (n->mergeable_rx_bufs)
+ mhdr = (struct virtio_net_hdr_mrg_rxbuf
*)elem.in_sg[0].iov_base;
+
+ offset += receive_header(n, &elem.in_sg[0], elem.in_num,
+ buf + offset, size - offset, hdr_len);
+ total += hdr_len;
+ }
+
+ /* copy in packet. ugh */
+ len = iov_fill(&elem.in_sg[0], elem.in_num,
+ buf + offset, size - offset);
+ total += len;
+
+ /* signal other side */
+ virtqueue_fill(n->rx_vq, &elem, total, i++);
+
+ offset += len;
}
if (mhdr)
- mhdr->num_buffers = i;
+ mhdr->num_buffers = i;
virtqueue_flush(n->rx_vq, i);
virtio_notify(&n->vdev, n->rx_vq);
@@ -312,37 +312,37 @@ static void virtio_net_flush_tx(VirtIONet *n, VirtQueue
*vq)
return;
while (virtqueue_pop(vq, &elem)) {
- ssize_t len = 0;
- unsigned int out_num = elem.out_num;
- struct iovec *out_sg = &elem.out_sg[0];
- unsigned hdr_len;
+ ssize_t len = 0;
+ unsigned int out_num = elem.out_num;
+ struct iovec *out_sg = &elem.out_sg[0];
+ unsigned hdr_len;
/* hdr_len refers to the header received from the guest */
- hdr_len = n->mergeable_rx_bufs ?
- sizeof(struct virtio_net_hdr_mrg_rxbuf) :
- sizeof(struct virtio_net_hdr);
-
- if (out_num < 1 || out_sg->iov_len != hdr_len) {
- fprintf(stderr, "virtio-net header not in first element\n");
- exit(1);
- }
-
- /* ignore the header if GSO is not supported */
- if (!has_vnet_hdr) {
- out_num--;
- out_sg++;
- len += hdr_len;
- } else if (n->mergeable_rx_bufs) {
- /* tapfd expects a struct virtio_net_hdr */
- hdr_len -= sizeof(struct virtio_net_hdr);
- out_sg->iov_len -= hdr_len;
- len += hdr_len;
- }
-
- len += qemu_sendv_packet(n->vc, out_sg, out_num);
-
- virtqueue_push(vq, &elem, len);
- virtio_notify(&n->vdev, vq);
+ hdr_len = n->mergeable_rx_bufs ?
+ sizeof(struct virtio_net_hdr_mrg_rxbuf) :
+ sizeof(struct virtio_net_hdr);
+
+ if (out_num < 1 || out_sg->iov_len != hdr_len) {
+ fprintf(stderr, "virtio-net header not in first element\n");
+ exit(1);
+ }
+
+ /* ignore the header if GSO is not supported */
+ if (!has_vnet_hdr) {
+ out_num--;
+ out_sg++;
+ len += hdr_len;
+ } else if (n->mergeable_rx_bufs) {
+ /* tapfd expects a struct virtio_net_hdr */
+ hdr_len -= sizeof(struct virtio_net_hdr);
+ out_sg->iov_len -= hdr_len;
+ len += hdr_len;
+ }
+
+ len += qemu_sendv_packet(n->vc, out_sg, out_num);
+
+ virtqueue_push(vq, &elem, len);
+ virtio_notify(&n->vdev, vq);
}
}
@@ -351,15 +351,15 @@ static void virtio_net_handle_tx(VirtIODevice *vdev,
VirtQueue *vq)
VirtIONet *n = to_virtio_net(vdev);
if (n->tx_timer_active) {
- vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
- qemu_del_timer(n->tx_timer);
- n->tx_timer_active = 0;
- virtio_net_flush_tx(n, vq);
+ vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;
+ qemu_del_timer(n->tx_timer);
+ n->tx_timer_active = 0;
+ virtio_net_flush_tx(n, vq);
} else {
- qemu_mod_timer(n->tx_timer,
- qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL);
- n->tx_timer_active = 1;
- vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
+ qemu_mod_timer(n->tx_timer,
+ qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL);
+ n->tx_timer_active = 1;
+ vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
}
}
@@ -392,7 +392,7 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int
version_id)
VirtIONet *n = opaque;
if (version_id != 1)
- return -EINVAL;
+ return -EINVAL;
virtio_load(&n->vdev, f);
@@ -400,8 +400,8 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int
version_id)
n->tx_timer_active = qemu_get_be32(f);
if (n->tx_timer_active) {
- qemu_mod_timer(n->tx_timer,
- qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL);
+ qemu_mod_timer(n->tx_timer,
+ qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL);
}
return 0;
@@ -413,11 +413,11 @@ PCIDevice *virtio_net_init(PCIBus *bus, NICInfo *nd, int
devfn)
static int virtio_net_id;
n = (VirtIONet *)virtio_init_pci(bus, "virtio-net", 6900, 0x1000,
- 0, VIRTIO_ID_NET,
- 0x02, 0x00, 0x00,
- 6, sizeof(VirtIONet));
+ 0, VIRTIO_ID_NET,
+ 0x02, 0x00, 0x00,
+ 6, sizeof(VirtIONet));
if (!n)
- return NULL;
+ return NULL;
n->vdev.get_config = virtio_net_update_config;
n->vdev.get_features = virtio_net_get_features;
@@ -433,7 +433,7 @@ PCIDevice *virtio_net_init(PCIBus *bus, NICInfo *nd, int
devfn)
n->mergeable_rx_bufs = 0;
register_savevm("virtio-net", virtio_net_id++, 1,
- virtio_net_save, virtio_net_load, n);
+ virtio_net_save, virtio_net_load, n);
return (PCIDevice *)n;
}
diff --git a/qemu/hw/virtio.c b/qemu/hw/virtio.c
index 303f5e7..6d7425c 100644
--- a/qemu/hw/virtio.c
+++ b/qemu/hw/virtio.c
@@ -20,35 +20,35 @@
/* from Linux's linux/virtio_pci.h */
/* A 32-bit r/o bitmask of the features supported by the host */
-#define VIRTIO_PCI_HOST_FEATURES 0
+#define VIRTIO_PCI_HOST_FEATURES 0
/* A 32-bit r/w bitmask of features activated by the guest */
-#define VIRTIO_PCI_GUEST_FEATURES 4
+#define VIRTIO_PCI_GUEST_FEATURES 4
/* A 32-bit r/w PFN for the currently selected queue */
-#define VIRTIO_PCI_QUEUE_PFN 8
+#define VIRTIO_PCI_QUEUE_PFN 8
/* A 16-bit r/o queue size for the currently selected queue */
-#define VIRTIO_PCI_QUEUE_NUM 12
+#define VIRTIO_PCI_QUEUE_NUM 12
/* A 16-bit r/w queue selector */
-#define VIRTIO_PCI_QUEUE_SEL 14
+#define VIRTIO_PCI_QUEUE_SEL 14
/* A 16-bit r/w queue notifier */
-#define VIRTIO_PCI_QUEUE_NOTIFY 16
+#define VIRTIO_PCI_QUEUE_NOTIFY 16
/* An 8-bit device status register. */
-#define VIRTIO_PCI_STATUS 18
+#define VIRTIO_PCI_STATUS 18
/* An 8-bit r/o interrupt status register. Reading the value will return the
* current contents of the ISR and will also clear it. This is effectively
* a read-and-acknowledge. */
-#define VIRTIO_PCI_ISR 19
+#define VIRTIO_PCI_ISR 19
-#define VIRTIO_PCI_CONFIG 20
+#define VIRTIO_PCI_CONFIG 20
/* Virtio ABI version, if we increment this, we break the guest driver. */
-#define VIRTIO_PCI_ABI_VERSION 0
+#define VIRTIO_PCI_ABI_VERSION 0
/* QEMU doesn't strictly need write barriers since everything runs in
* lock-step. We'll leave the calls to wmb() in though to make it obvious for
@@ -65,29 +65,29 @@ static void *virtio_map_gpa(target_phys_addr_t addr, size_t
size)
off = cpu_get_physical_page_desc(addr);
if ((off & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
- fprintf(stderr, "virtio DMA to IO ram\n");
- exit(1);
+ fprintf(stderr, "virtio DMA to IO ram\n");
+ exit(1);
}
off = (off & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK);
for (addr1 = addr + TARGET_PAGE_SIZE;
- addr1 < TARGET_PAGE_ALIGN(addr + size);
- addr1 += TARGET_PAGE_SIZE) {
- ram_addr_t off1;
-
- off1 = cpu_get_physical_page_desc(addr1);
- if ((off1 & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
- fprintf(stderr, "virtio DMA to IO ram\n");
- exit(1);
- }
-
- off1 = (off1 & TARGET_PAGE_MASK) | (addr1 & ~TARGET_PAGE_MASK);
-
- if (off1 != (off + (addr1 - addr))) {
- fprintf(stderr, "discontigous virtio memory\n");
- exit(1);
- }
+ addr1 < TARGET_PAGE_ALIGN(addr + size);
+ addr1 += TARGET_PAGE_SIZE) {
+ ram_addr_t off1;
+
+ off1 = cpu_get_physical_page_desc(addr1);
+ if ((off1 & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
+ fprintf(stderr, "virtio DMA to IO ram\n");
+ exit(1);
+ }
+
+ off1 = (off1 & TARGET_PAGE_MASK) | (addr1 & ~TARGET_PAGE_MASK);
+
+ if (off1 != (off + (addr1 - addr))) {
+ fprintf(stderr, "discontigous virtio memory\n");
+ exit(1);
+ }
}
return phys_ram_base + off;
@@ -96,8 +96,8 @@ static void *virtio_map_gpa(target_phys_addr_t addr, size_t
size)
static size_t virtqueue_size(int num)
{
return TARGET_PAGE_ALIGN((sizeof(VRingDesc) * num) +
- (sizeof(VRingAvail) + sizeof(uint16_t) * num)) +
- (sizeof(VRingUsed) + sizeof(VRingUsedElem) * num);
+ (sizeof(VRingAvail) + sizeof(uint16_t) * num)) +
+ (sizeof(VRingUsed) + sizeof(VRingUsedElem) * num);
}
static void virtqueue_init(VirtQueue *vq, void *p)
@@ -108,7 +108,7 @@ static void virtqueue_init(VirtQueue *vq, void *p)
}
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
- unsigned int len, unsigned int idx)
+ unsigned int len, unsigned int idx)
{
VRingUsedElem *used;
@@ -129,7 +129,7 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
}
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
- unsigned int len)
+ unsigned int len)
{
virtqueue_fill(vq, elem, len, 0);
virtqueue_flush(vq, 1);
@@ -141,8 +141,8 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int
idx)
/* Check it isn't doing very strange things with descriptor numbers. */
if (num_heads > vq->vring.num)
- errx(1, "Guest moved used index from %u to %u",
- idx, vq->vring.avail->idx);
+ errx(1, "Guest moved used index from %u to %u",
+ idx, vq->vring.avail->idx);
return num_heads;
}
@@ -157,7 +157,7 @@ static unsigned int virtqueue_get_head(VirtQueue *vq,
unsigned int idx)
/* If their number is silly, that's a fatal mistake. */
if (head >= vq->vring.num)
- errx(1, "Guest says index %u is available", head);
+ errx(1, "Guest says index %u is available", head);
return head;
}
@@ -168,7 +168,7 @@ static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned
int i)
/* If this descriptor says it doesn't chain, we're done. */
if (!(vq->vring.desc[i].flags & VRING_DESC_F_NEXT))
- return vq->vring.num;
+ return vq->vring.num;
/* Check they're not leading us off end of descriptors. */
next = vq->vring.desc[i].next;
@@ -176,7 +176,7 @@ static unsigned virtqueue_next_desc(VirtQueue *vq, unsigned
int i)
wmb();
if (next >= vq->vring.num)
- errx(1, "Desc next is %u", next);
+ errx(1, "Desc next is %u", next);
return next;
}
@@ -190,24 +190,24 @@ int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes,
int out_bytes)
num_bufs = in_total = out_total = 0;
while (virtqueue_num_heads(vq, idx)) {
- int i;
-
- i = virtqueue_get_head(vq, idx++);
- do {
- /* If we've got too many, that implies a descriptor loop. */
- if (++num_bufs > vq->vring.num)
- errx(1, "Looped descriptor");
-
- if (vq->vring.desc[i].flags & VRING_DESC_F_WRITE) {
- if (in_bytes > 0 &&
- (in_total += vq->vring.desc[i].len) >= in_bytes)
- return 1;
- } else {
- if (out_bytes > 0 &&
- (out_total += vq->vring.desc[i].len) >= out_bytes)
- return 1;
- }
- } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
+ int i;
+
+ i = virtqueue_get_head(vq, idx++);
+ do {
+ /* If we've got too many, that implies a descriptor loop. */
+ if (++num_bufs > vq->vring.num)
+ errx(1, "Looped descriptor");
+
+ if (vq->vring.desc[i].flags & VRING_DESC_F_WRITE) {
+ if (in_bytes > 0 &&
+ (in_total += vq->vring.desc[i].len) >= in_bytes)
+ return 1;
+ } else {
+ if (out_bytes > 0 &&
+ (out_total += vq->vring.desc[i].len) >= out_bytes)
+ return 1;
+ }
+ } while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
}
return 0;
@@ -218,29 +218,29 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
unsigned int i, head;
if (!virtqueue_num_heads(vq, vq->last_avail_idx))
- return 0;
+ return 0;
/* When we start there are none of either input nor output. */
elem->out_num = elem->in_num = 0;
i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
do {
- struct iovec *sg;
-
- if (vq->vring.desc[i].flags & VRING_DESC_F_WRITE)
- sg = &elem->in_sg[elem->in_num++];
- else
- sg = &elem->out_sg[elem->out_num++];
-
- /* Grab the first descriptor, and check it's OK. */
- sg->iov_len = vq->vring.desc[i].len;
- sg->iov_base = virtio_map_gpa(vq->vring.desc[i].addr, sg->iov_len);
- if (sg->iov_base == NULL)
- errx(1, "Invalid mapping\n");
-
- /* If we've got too many, that implies a descriptor loop. */
- if ((elem->in_num + elem->out_num) > vq->vring.num)
- errx(1, "Looped descriptor");
+ struct iovec *sg;
+
+ if (vq->vring.desc[i].flags & VRING_DESC_F_WRITE)
+ sg = &elem->in_sg[elem->in_num++];
+ else
+ sg = &elem->out_sg[elem->out_num++];
+
+ /* Grab the first descriptor, and check it's OK. */
+ sg->iov_len = vq->vring.desc[i].len;
+ sg->iov_base = virtio_map_gpa(vq->vring.desc[i].addr, sg->iov_len);
+ if (sg->iov_base == NULL)
+ errx(1, "Invalid mapping\n");
+
+ /* If we've got too many, that implies a descriptor loop. */
+ if ((elem->in_num + elem->out_num) > vq->vring.num)
+ errx(1, "Looped descriptor");
} while ((i = virtqueue_next_desc(vq, i)) != vq->vring.num);
elem->index = head;
@@ -294,34 +294,34 @@ static void virtio_ioport_write(void *opaque, uint32_t
addr, uint32_t val)
switch (addr) {
case VIRTIO_PCI_GUEST_FEATURES:
- if (vdev->set_features)
- vdev->set_features(vdev, val);
- vdev->features = val;
- break;
+ if (vdev->set_features)
+ vdev->set_features(vdev, val);
+ vdev->features = val;
+ break;
case VIRTIO_PCI_QUEUE_PFN:
- pa = (ram_addr_t)val << TARGET_PAGE_BITS;
- vdev->vq[vdev->queue_sel].pfn = val;
- if (pa == 0) {
+ pa = (ram_addr_t)val << TARGET_PAGE_BITS;
+ vdev->vq[vdev->queue_sel].pfn = val;
+ if (pa == 0) {
virtio_reset(vdev);
- } else {
- size_t size = virtqueue_size(vdev->vq[vdev->queue_sel].vring.num);
- virtqueue_init(&vdev->vq[vdev->queue_sel],
- virtio_map_gpa(pa, size));
- }
- break;
+ } else {
+ size_t size = virtqueue_size(vdev->vq[vdev->queue_sel].vring.num);
+ virtqueue_init(&vdev->vq[vdev->queue_sel],
+ virtio_map_gpa(pa, size));
+ }
+ break;
case VIRTIO_PCI_QUEUE_SEL:
- if (val < VIRTIO_PCI_QUEUE_MAX)
- vdev->queue_sel = val;
- break;
+ if (val < VIRTIO_PCI_QUEUE_MAX)
+ vdev->queue_sel = val;
+ break;
case VIRTIO_PCI_QUEUE_NOTIFY:
- if (val < VIRTIO_PCI_QUEUE_MAX && vdev->vq[val].vring.desc)
- vdev->vq[val].handle_output(vdev, &vdev->vq[val]);
- break;
+ if (val < VIRTIO_PCI_QUEUE_MAX && vdev->vq[val].vring.desc)
+ vdev->vq[val].handle_output(vdev, &vdev->vq[val]);
+ break;
case VIRTIO_PCI_STATUS:
- vdev->status = val & 0xFF;
- if (vdev->status == 0)
- virtio_reset(vdev);
- break;
+ vdev->status = val & 0xFF;
+ if (vdev->status == 0)
+ virtio_reset(vdev);
+ break;
}
}
@@ -334,32 +334,32 @@ static uint32_t virtio_ioport_read(void *opaque, uint32_t
addr)
switch (addr) {
case VIRTIO_PCI_HOST_FEATURES:
- ret = vdev->get_features(vdev);
- ret |= (1 << VIRTIO_F_NOTIFY_ON_EMPTY);
- break;
+ ret = vdev->get_features(vdev);
+ ret |= (1 << VIRTIO_F_NOTIFY_ON_EMPTY);
+ break;
case VIRTIO_PCI_GUEST_FEATURES:
- ret = vdev->features;
- break;
+ ret = vdev->features;
+ break;
case VIRTIO_PCI_QUEUE_PFN:
- ret = vdev->vq[vdev->queue_sel].pfn;
- break;
+ ret = vdev->vq[vdev->queue_sel].pfn;
+ break;
case VIRTIO_PCI_QUEUE_NUM:
- ret = vdev->vq[vdev->queue_sel].vring.num;
- break;
+ ret = vdev->vq[vdev->queue_sel].vring.num;
+ break;
case VIRTIO_PCI_QUEUE_SEL:
- ret = vdev->queue_sel;
- break;
+ ret = vdev->queue_sel;
+ break;
case VIRTIO_PCI_STATUS:
- ret = vdev->status;
- break;
+ ret = vdev->status;
+ break;
case VIRTIO_PCI_ISR:
- /* reading from the ISR also clears it. */
- ret = vdev->isr;
- vdev->isr = 0;
- virtio_update_irq(vdev);
- break;
+ /* reading from the ISR also clears it. */
+ ret = vdev->isr;
+ vdev->isr = 0;
+ virtio_update_irq(vdev);
+ break;
default:
- break;
+ break;
}
return ret;
@@ -374,7 +374,7 @@ static uint32_t virtio_config_readb(void *opaque, uint32_t
addr)
addr -= vdev->addr + VIRTIO_PCI_CONFIG;
if (addr > (vdev->config_len - sizeof(val)))
- return (uint32_t)-1;
+ return (uint32_t)-1;
memcpy(&val, vdev->config + addr, sizeof(val));
return val;
@@ -389,7 +389,7 @@ static uint32_t virtio_config_readw(void *opaque, uint32_t
addr)
addr -= vdev->addr + VIRTIO_PCI_CONFIG;
if (addr > (vdev->config_len - sizeof(val)))
- return (uint32_t)-1;
+ return (uint32_t)-1;
memcpy(&val, vdev->config + addr, sizeof(val));
return val;
@@ -404,7 +404,7 @@ static uint32_t virtio_config_readl(void *opaque, uint32_t
addr)
addr -= vdev->addr + VIRTIO_PCI_CONFIG;
if (addr > (vdev->config_len - sizeof(val)))
- return (uint32_t)-1;
+ return (uint32_t)-1;
memcpy(&val, vdev->config + addr, sizeof(val));
return val;
@@ -417,7 +417,7 @@ static void virtio_config_writeb(void *opaque, uint32_t
addr, uint32_t data)
addr -= vdev->addr + VIRTIO_PCI_CONFIG;
if (addr > (vdev->config_len - sizeof(val)))
- return;
+ return;
memcpy(vdev->config + addr, &val, sizeof(val));
@@ -432,7 +432,7 @@ static void virtio_config_writew(void *opaque, uint32_t
addr, uint32_t data)
addr -= vdev->addr + VIRTIO_PCI_CONFIG;
if (addr > (vdev->config_len - sizeof(val)))
- return;
+ return;
memcpy(vdev->config + addr, &val, sizeof(val));
@@ -447,7 +447,7 @@ static void virtio_config_writel(void *opaque, uint32_t
addr, uint32_t data)
addr -= vdev->addr + VIRTIO_PCI_CONFIG;
if (addr > (vdev->config_len - sizeof(val)))
- return;
+ return;
memcpy(vdev->config + addr, &val, sizeof(val));
@@ -456,47 +456,47 @@ static void virtio_config_writel(void *opaque, uint32_t
addr, uint32_t data)
}
static void virtio_map(PCIDevice *pci_dev, int region_num,
- uint32_t addr, uint32_t size, int type)
+ uint32_t addr, uint32_t size, int type)
{
VirtIODevice *vdev = to_virtio_device(pci_dev);
int i;
vdev->addr = addr;
for (i = 0; i < 3; i++) {
- register_ioport_write(addr, 20, 1 << i, virtio_ioport_write, vdev);
- register_ioport_read(addr, 20, 1 << i, virtio_ioport_read, vdev);
+ register_ioport_write(addr, 20, 1 << i, virtio_ioport_write, vdev);
+ register_ioport_read(addr, 20, 1 << i, virtio_ioport_read, vdev);
}
if (vdev->config_len) {
- register_ioport_write(addr + 20, vdev->config_len, 1,
- virtio_config_writeb, vdev);
- register_ioport_write(addr + 20, vdev->config_len, 2,
- virtio_config_writew, vdev);
- register_ioport_write(addr + 20, vdev->config_len, 4,
- virtio_config_writel, vdev);
- register_ioport_read(addr + 20, vdev->config_len, 1,
- virtio_config_readb, vdev);
- register_ioport_read(addr + 20, vdev->config_len, 2,
- virtio_config_readw, vdev);
- register_ioport_read(addr + 20, vdev->config_len, 4,
- virtio_config_readl, vdev);
-
- vdev->get_config(vdev, vdev->config);
+ register_ioport_write(addr + 20, vdev->config_len, 1,
+ virtio_config_writeb, vdev);
+ register_ioport_write(addr + 20, vdev->config_len, 2,
+ virtio_config_writew, vdev);
+ register_ioport_write(addr + 20, vdev->config_len, 4,
+ virtio_config_writel, vdev);
+ register_ioport_read(addr + 20, vdev->config_len, 1,
+ virtio_config_readb, vdev);
+ register_ioport_read(addr + 20, vdev->config_len, 2,
+ virtio_config_readw, vdev);
+ register_ioport_read(addr + 20, vdev->config_len, 4,
+ virtio_config_readl, vdev);
+
+ vdev->get_config(vdev, vdev->config);
}
}
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
- void (*handle_output)(VirtIODevice *, VirtQueue *))
+ void (*handle_output)(VirtIODevice *, VirtQueue *))
{
int i;
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
- if (vdev->vq[i].vring.num == 0)
- break;
+ if (vdev->vq[i].vring.num == 0)
+ break;
}
if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
- abort();
+ abort();
vdev->vq[i].vring.num = queue_size;
vdev->vq[i].handle_output = handle_output;
@@ -508,8 +508,8 @@ void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
{
/* Always notify when queue is empty */
if ((vq->inuse || vq->vring.avail->idx != vq->last_avail_idx) &&
- (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- return;
+ (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ return;
vdev->isr |= 0x01;
virtio_update_irq(vdev);
@@ -536,19 +536,19 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
qemu_put_buffer(f, vdev->config, vdev->config_len);
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
- if (vdev->vq[i].vring.num == 0)
- break;
+ if (vdev->vq[i].vring.num == 0)
+ break;
}
qemu_put_be32(f, i);
for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
- if (vdev->vq[i].vring.num == 0)
- break;
+ if (vdev->vq[i].vring.num == 0)
+ break;
- qemu_put_be32(f, vdev->vq[i].vring.num);
- qemu_put_be32s(f, &vdev->vq[i].pfn);
- qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
+ qemu_put_be32(f, vdev->vq[i].vring.num);
+ qemu_put_be32s(f, &vdev->vq[i].pfn);
+ qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
}
}
@@ -569,29 +569,29 @@ void virtio_load(VirtIODevice *vdev, QEMUFile *f)
num = qemu_get_be32(f);
for (i = 0; i < num; i++) {
- vdev->vq[i].vring.num = qemu_get_be32(f);
- qemu_get_be32s(f, &vdev->vq[i].pfn);
- qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
-
- if (vdev->vq[i].pfn) {
- size_t size;
- target_phys_addr_t pa;
-
- pa = (ram_addr_t)vdev->vq[i].pfn << TARGET_PAGE_BITS;
- size = virtqueue_size(vdev->vq[i].vring.num);
- virtqueue_init(&vdev->vq[i], virtio_map_gpa(pa, size));
- }
+ vdev->vq[i].vring.num = qemu_get_be32(f);
+ qemu_get_be32s(f, &vdev->vq[i].pfn);
+ qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
+
+ if (vdev->vq[i].pfn) {
+ size_t size;
+ target_phys_addr_t pa;
+
+ pa = (ram_addr_t)vdev->vq[i].pfn << TARGET_PAGE_BITS;
+ size = virtqueue_size(vdev->vq[i].vring.num);
+ virtqueue_init(&vdev->vq[i], virtio_map_gpa(pa, size));
+ }
}
virtio_update_irq(vdev);
}
VirtIODevice *virtio_init_pci(PCIBus *bus, const char *name,
- uint16_t vendor, uint16_t device,
- uint16_t subvendor, uint16_t subdevice,
- uint8_t class_code, uint8_t subclass_code,
- uint8_t pif, size_t config_size,
- size_t struct_size)
+ uint16_t vendor, uint16_t device,
+ uint16_t subvendor, uint16_t subdevice,
+ uint8_t class_code, uint8_t subclass_code,
+ uint8_t pif, size_t config_size,
+ size_t struct_size)
{
VirtIODevice *vdev;
PCIDevice *pci_dev;
@@ -599,9 +599,9 @@ VirtIODevice *virtio_init_pci(PCIBus *bus, const char *name,
uint32_t size;
pci_dev = pci_register_device(bus, name, struct_size,
- -1, NULL, NULL);
+ -1, NULL, NULL);
if (!pci_dev)
- return NULL;
+ return NULL;
vdev = to_virtio_device(pci_dev);
@@ -633,16 +633,16 @@ VirtIODevice *virtio_init_pci(PCIBus *bus, const char
*name,
vdev->name = name;
vdev->config_len = config_size;
if (vdev->config_len)
- vdev->config = qemu_mallocz(config_size);
+ vdev->config = qemu_mallocz(config_size);
else
- vdev->config = NULL;
+ vdev->config = NULL;
size = 20 + config_size;
if (size & (size-1))
size = 1 << fls(size);
pci_register_io_region(pci_dev, 0, size, PCI_ADDRESS_SPACE_IO,
- virtio_map);
+ virtio_map);
qemu_register_reset(virtio_reset, vdev);
return vdev;
diff --git a/qemu/hw/virtio.h b/qemu/hw/virtio.h
index 15f020b..40c9e14 100644
--- a/qemu/hw/virtio.h
+++ b/qemu/hw/virtio.h
@@ -22,13 +22,13 @@
/* Status byte for guest to report progress, and synchronize features. */
/* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
-#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
+#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
/* We have found a driver for the device. */
-#define VIRTIO_CONFIG_S_DRIVER 2
+#define VIRTIO_CONFIG_S_DRIVER 2
/* Driver has used its parts of the config, and is happy */
-#define VIRTIO_CONFIG_S_DRIVER_OK 4
+#define VIRTIO_CONFIG_S_DRIVER_OK 4
/* We've given up on this device. */
-#define VIRTIO_CONFIG_S_FAILED 0x80
+#define VIRTIO_CONFIG_S_FAILED 0x80
/* We notify when the ring is completely used, even if the guest is supressing
* callbacks */
@@ -37,14 +37,14 @@
/* from Linux's linux/virtio_ring.h */
/* This marks a buffer as continuing via the next field. */
-#define VRING_DESC_F_NEXT 1
+#define VRING_DESC_F_NEXT 1
/* This marks a buffer as write-only (otherwise read-only). */
-#define VRING_DESC_F_WRITE 2
+#define VRING_DESC_F_WRITE 2
/* This means don't notify other side when buffer added. */
-#define VRING_USED_F_NO_NOTIFY 1
+#define VRING_USED_F_NO_NOTIFY 1
/* This means don't interrupt guest when buffer consumed. */
-#define VRING_AVAIL_F_NO_INTERRUPT 1
+#define VRING_AVAIL_F_NO_INTERRUPT 1
typedef struct VirtQueue VirtQueue;
typedef struct VirtIODevice VirtIODevice;
@@ -105,7 +105,7 @@ typedef struct VirtQueueElement
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
} VirtQueueElement;
-#define VIRTIO_PCI_QUEUE_MAX 16
+#define VIRTIO_PCI_QUEUE_MAX 16
struct VirtIODevice
{
@@ -127,21 +127,21 @@ struct VirtIODevice
};
VirtIODevice *virtio_init_pci(PCIBus *bus, const char *name,
- uint16_t vendor, uint16_t device,
- uint16_t subvendor, uint16_t subdevice,
- uint8_t class_code, uint8_t subclass_code,
- uint8_t pif, size_t config_size,
- size_t struct_size);
+ uint16_t vendor, uint16_t device,
+ uint16_t subvendor, uint16_t subdevice,
+ uint8_t class_code, uint8_t subclass_code,
+ uint8_t pif, size_t config_size,
+ size_t struct_size);
VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
- void (*handle_output)(VirtIODevice *,
- VirtQueue *));
+ void (*handle_output)(VirtIODevice *,
+ VirtQueue *));
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
- unsigned int len);
+ unsigned int len);
void virtqueue_flush(VirtQueue *vq, unsigned int count);
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
- unsigned int len, unsigned int idx);
+ unsigned int len, unsigned int idx);
int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
int virtqueue_avail_bytes(VirtQueue *vq, int in_bytes, int out_bytes);
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html