Rusty Russell <ru...@rustcorp.com.au> writes: > Virtio is currently defined to work as "guest endian", but this is a > problem if the guest can change endian. As most targets can't change > endian, we make it a per-target option to avoid pessimising. > > This is based on a simpler patch by Anthony Liguouri, which only handled > the vring accesses. We also need some drivers to access these helpers, > eg. for data which contains headers. > > Signed-off-by: Rusty Russell <ru...@rustcorp.com.au> > --- > hw/virtio/virtio.c | 46 +++++++++---- > include/hw/virtio/virtio-access.h | 138 > ++++++++++++++++++++++++++++++++++++++ > 2 files changed, 170 insertions(+), 14 deletions(-) > create mode 100644 include/hw/virtio/virtio-access.h > > diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c > index 8176c14..2887f17 100644 > --- a/hw/virtio/virtio.c > +++ b/hw/virtio/virtio.c > @@ -18,6 +18,7 @@ > #include "hw/virtio/virtio.h" > #include "qemu/atomic.h" > #include "hw/virtio/virtio-bus.h" > +#include "hw/virtio/virtio-access.h" > > /* The alignment to use between consumer and producer parts of vring. > * x86 pagesize again. */ > @@ -84,6 +85,20 @@ struct VirtQueue > EventNotifier host_notifier; > }; > > +#ifdef TARGET_VIRTIO_SWAPENDIAN > +bool virtio_byteswap; > + > +/* Ask target code if we should swap endian for all vring and config access. > */ > +static void mark_endian(void) > +{ > + virtio_byteswap = virtio_swap_endian(); > +} > +#else > +static void mark_endian(void) > +{ > +} > +#endif > +
It would be very good to avoid a target specific define here. We would like to move to only building a single copy of the virtio code. We have a mechanism to do weak functions via stubs/. I think it would be better to do cpu_get_byteswap() as a stub function and then overload it in the ppc64 code. > /* virt queue functions */ > static void virtqueue_init(VirtQueue *vq) > { > @@ -100,49 +115,49 @@ static inline uint64_t vring_desc_addr(hwaddr desc_pa, > int i) > { > hwaddr pa; > pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr); > - return ldq_phys(pa); > + return virtio_ldq_phys(pa); > } > > static inline uint32_t vring_desc_len(hwaddr desc_pa, int i) > { > hwaddr pa; > pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len); > - return ldl_phys(pa); > + return virtio_ldl_phys(pa); > } > > static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i) > { > hwaddr pa; > pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags); > - return lduw_phys(pa); > + return virtio_lduw_phys(pa); > } > > static inline uint16_t vring_desc_next(hwaddr desc_pa, int i) > { > hwaddr pa; > pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next); > - return lduw_phys(pa); > + return virtio_lduw_phys(pa); > } > > static inline uint16_t vring_avail_flags(VirtQueue *vq) > { > hwaddr pa; > pa = vq->vring.avail + offsetof(VRingAvail, flags); > - return lduw_phys(pa); > + return virtio_lduw_phys(pa); > } > > static inline uint16_t vring_avail_idx(VirtQueue *vq) > { > hwaddr pa; > pa = vq->vring.avail + offsetof(VRingAvail, idx); > - return lduw_phys(pa); > + return virtio_lduw_phys(pa); > } > > static inline uint16_t vring_avail_ring(VirtQueue *vq, int i) > { > hwaddr pa; > pa = vq->vring.avail + offsetof(VRingAvail, ring[i]); > - return lduw_phys(pa); > + return virtio_lduw_phys(pa); > } > > static inline uint16_t vring_used_event(VirtQueue *vq) > @@ -154,42 +169,42 @@ static inline void vring_used_ring_id(VirtQueue *vq, > int i, uint32_t val) > { > hwaddr pa; > pa = vq->vring.used + offsetof(VRingUsed, ring[i].id); > - stl_phys(pa, val); > + virtio_stl_phys(pa, val); > } > > static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val) > { > hwaddr pa; > pa = vq->vring.used + offsetof(VRingUsed, ring[i].len); > - stl_phys(pa, val); > + virtio_stl_phys(pa, val); > } > > static uint16_t vring_used_idx(VirtQueue *vq) > { > hwaddr pa; > pa = vq->vring.used + offsetof(VRingUsed, idx); > - return lduw_phys(pa); > + return virtio_lduw_phys(pa); > } > > static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) > { > hwaddr pa; > pa = vq->vring.used + offsetof(VRingUsed, idx); > - stw_phys(pa, val); > + virtio_stw_phys(pa, val); > } > > static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask) > { > hwaddr pa; > pa = vq->vring.used + offsetof(VRingUsed, flags); > - stw_phys(pa, lduw_phys(pa) | mask); > + virtio_stw_phys(pa, virtio_lduw_phys(pa) | mask); > } > > static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask) > { > hwaddr pa; > pa = vq->vring.used + offsetof(VRingUsed, flags); > - stw_phys(pa, lduw_phys(pa) & ~mask); > + virtio_stw_phys(pa, virtio_lduw_phys(pa) & ~mask); > } > > static inline void vring_avail_event(VirtQueue *vq, uint16_t val) > @@ -199,7 +214,7 @@ static inline void vring_avail_event(VirtQueue *vq, > uint16_t val) > return; > } > pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]); > - stw_phys(pa, val); > + virtio_stw_phys(pa, val); > } > > void virtio_queue_set_notification(VirtQueue *vq, int enable) > @@ -525,6 +540,9 @@ void virtio_set_status(VirtIODevice *vdev, uint8_t val) > VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); > trace_virtio_set_status(vdev, val); > > + /* If guest virtio endian is uncertain, set it now. */ > + mark_endian(); > + > if (k->set_status) { > k->set_status(vdev, val); > } > diff --git a/include/hw/virtio/virtio-access.h > b/include/hw/virtio/virtio-access.h > new file mode 100644 > index 0000000..b1d531e > --- /dev/null > +++ b/include/hw/virtio/virtio-access.h > @@ -0,0 +1,138 @@ > +/* > + * Virtio Accessor Support: In case your target can change endian. > + * > + * Copyright IBM, Corp. 2013 > + * > + * Authors: > + * Rusty Russell <ru...@au.ibm.com> > + * > + * This work is licensed under the terms of the GNU GPL, version 2. See > + * the COPYING file in the top-level directory. > + * > + */ > +#ifndef _QEMU_VIRTIO_ACCESS_H > +#define _QEMU_VIRTIO_ACCESS_H > + > +#ifdef TARGET_VIRTIO_SWAPENDIAN > +/* Architectures which need biendian define this function. */ > +extern bool virtio_swap_endian(void); > + > +extern bool virtio_byteswap; > +#else > +#define virtio_byteswap false > +#endif I suspect this is a premature optimization. With a weak function called directly in the accessors below, I suspect you would see no measurable performance overhead compared to this approach. It's all very predictable so the CPU should do a decent job optimizing the if () away. Regards, Anthony Liguori