Re: [Qemu-devel] [PATCH v1 1/2] virtio_gpu: Handle endian conversion

2017-09-14 Thread Gerd Hoffmann
On Wed, 2017-09-13 at 11:53 -0400, Farhan Ali wrote:
> 
> On 09/13/2017 04:13 AM, Gerd Hoffmann wrote:
> > Please move this to a helper function, maybe by updating the
> > VIRTIO_GPU_FILL_CMD macro.
> > 
> > The header fields should be byteswapped too.  As most structs have
> > 32bit fields only (with the exception of hdr.fence_id) you should
> > be
> > able to create a generic byteswap function which only needs the
> > struct
> > size as argument and handles all structs without addresses/offsets
> > (which are 64bit fields).
> 
> I am not sure if I understand what you mean here. Since struct 
> virtio_gpu_ctrl_hdr is part of every struct, so any such function
> would also need to handle the case of hdr.fence_id, right?

Yes, but that is common in all structs.  You can have one function to
handle the header, one generic function (calls the header function for
the header and just does 32bit byteswaps for everything else), one
specific function for each operation which has a 64bit address
somewhere in the struct (which again can use the header function for
the header).

cheers,
  Gerd




Re: [Qemu-devel] [PATCH v1 1/2] virtio_gpu: Handle endian conversion

2017-09-13 Thread Farhan Ali



On 09/13/2017 04:13 AM, Gerd Hoffmann wrote:

Please move this to a helper function, maybe by updating the
VIRTIO_GPU_FILL_CMD macro.

The header fields should be byteswapped too.  As most structs have
32bit fields only (with the exception of hdr.fence_id) you should be
able to create a generic byteswap function which only needs the struct
size as argument and handles all structs without addresses/offsets
(which are 64bit fields).


I am not sure if I understand what you mean here. Since struct 
virtio_gpu_ctrl_hdr is part of every struct, so any such function

would also need to handle the case of hdr.fence_id, right?



The conversion looks incomplete, at least virtio_gpu_ctrl_response will
need adaptions too.  It probably works by luck because the guest driver
uses fences only in virgl (3d) mode.



Oh right, I need to handle the conversion there as well. Thanks for 
catching that.


Also I believe this conversion patch isn't comprehensive, it's mostly 
the changes I made to get a display working on S390. So I appreciate

you reviewing the changes.


cheers,
  Gerd


Thanks
Farhan




Re: [Qemu-devel] [PATCH v1 1/2] virtio_gpu: Handle endian conversion

2017-09-13 Thread Gerd Hoffmann
  Hi,

> @@ -287,6 +287,12 @@ static void
> virtio_gpu_resource_create_2d(VirtIOGPU *g,
>  struct virtio_gpu_resource_create_2d c2d;
>  
>  VIRTIO_GPU_FILL_CMD(c2d);
> +
> +c2d.resource_id = le32_to_cpu(c2d.resource_id);
> +c2d.format = le32_to_cpu(c2d.format);
> +c2d.width = le32_to_cpu(c2d.width);
> +c2d.height = le32_to_cpu(c2d.height);
> +

Please move this to a helper function, maybe by updating the
VIRTIO_GPU_FILL_CMD macro.

The header fields should be byteswapped too.  As most structs have
32bit fields only (with the exception of hdr.fence_id) you should be
able to create a generic byteswap function which only needs the struct
size as argument and handles all structs without addresses/offsets
(which are 64bit fields).

The conversion looks incomplete, at least virtio_gpu_ctrl_response will
need adaptions too.  It probably works by luck because the guest driver
uses fences only in virgl (3d) mode.

cheers,
  Gerd




[Qemu-devel] [PATCH v1 1/2] virtio_gpu: Handle endian conversion

2017-09-12 Thread Farhan Ali
Virtio GPU code currently only supports litte endian format,
and so using the Virtio GPU device on a big endian machine
does not work.

Let's fix it by supporting the correct host cpu byte order.

Signed-off-by: Farhan Ali 
---
 hw/display/virtio-gpu.c | 53 -
 1 file changed, 44 insertions(+), 9 deletions(-)

diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 6aae147..36e2414 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -236,8 +236,8 @@ virtio_gpu_fill_display_info(VirtIOGPU *g,
 for (i = 0; i < g->conf.max_outputs; i++) {
 if (g->enabled_output_bitmask & (1 << i)) {
 dpy_info->pmodes[i].enabled = 1;
-dpy_info->pmodes[i].r.width = g->req_state[i].width;
-dpy_info->pmodes[i].r.height = g->req_state[i].height;
+dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
+dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
 }
 }
 }
@@ -287,6 +287,12 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
 struct virtio_gpu_resource_create_2d c2d;
 
 VIRTIO_GPU_FILL_CMD(c2d);
+
+c2d.resource_id = le32_to_cpu(c2d.resource_id);
+c2d.format = le32_to_cpu(c2d.format);
+c2d.width = le32_to_cpu(c2d.width);
+c2d.height = le32_to_cpu(c2d.height);
+
 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
c2d.width, c2d.height);
 
@@ -383,6 +389,14 @@ static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
 struct virtio_gpu_transfer_to_host_2d t2d;
 
 VIRTIO_GPU_FILL_CMD(t2d);
+
+t2d.r.x = le32_to_cpu(t2d.r.x);
+t2d.r.y = le32_to_cpu(t2d.r.y);
+t2d.r.width = le32_to_cpu(t2d.r.width);
+t2d.r.height = le32_to_cpu(t2d.r.height);
+t2d.resource_id = le32_to_cpu(t2d.resource_id);
+t2d.offset = le64_to_cpu(t2d.offset);
+
 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
 
 res = virtio_gpu_find_resource(g, t2d.resource_id);
@@ -439,6 +453,13 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
 int i;
 
 VIRTIO_GPU_FILL_CMD(rf);
+
+rf.resource_id = le32_to_cpu(rf.resource_id);
+rf.r.width = le32_to_cpu(rf.r.width);
+rf.r.height = le32_to_cpu(rf.r.height);
+rf.r.x = le32_to_cpu(rf.r.x);
+rf.r.y = le32_to_cpu(rf.r.y);
+
 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
rf.r.width, rf.r.height, rf.r.x, rf.r.y);
 
@@ -511,6 +532,14 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
 struct virtio_gpu_set_scanout ss;
 
 VIRTIO_GPU_FILL_CMD(ss);
+
+ss.scanout_id = le32_to_cpu(ss.scanout_id);
+ss.resource_id = le32_to_cpu(ss.resource_id);
+ss.r.width = le32_to_cpu(ss.r.width);
+ss.r.height = le32_to_cpu(ss.r.height);
+ss.r.x = le32_to_cpu(ss.r.x);
+ss.r.y = le32_to_cpu(ss.r.y);
+
 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
  ss.r.width, ss.r.height, ss.r.x, ss.r.y);
 
@@ -633,13 +662,15 @@ int virtio_gpu_create_mapping_iov(struct 
virtio_gpu_resource_attach_backing *ab,
 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
 }
 for (i = 0; i < ab->nr_entries; i++) {
-hwaddr len = ents[i].length;
-(*iov)[i].iov_len = ents[i].length;
-(*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, , 1);
+uint64_t a = le64_to_cpu(ents[i].addr);
+uint32_t l = le32_to_cpu(ents[i].length);
+hwaddr len = l;
+(*iov)[i].iov_len = l;
+(*iov)[i].iov_base = cpu_physical_memory_map(a, , 1);
 if (addr) {
-(*addr)[i] = ents[i].addr;
+(*addr)[i] = a;
 }
-if (!(*iov)[i].iov_base || len != ents[i].length) {
+if (!(*iov)[i].iov_base || len != l) {
 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
   " resource %d element %d\n",
   __func__, ab->resource_id, i);
@@ -686,6 +717,10 @@ virtio_gpu_resource_attach_backing(VirtIOGPU *g,
 int ret;
 
 VIRTIO_GPU_FILL_CMD(ab);
+
+ab.resource_id = le32_to_cpu(ab.resource_id);
+ab.nr_entries = le32_to_cpu(ab.nr_entries);
+
 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
 
 res = virtio_gpu_find_resource(g, ab.resource_id);
@@ -735,7 +770,7 @@ static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
 {
 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
 
-switch (cmd->cmd_hdr.type) {
+switch (le32_to_cpu(cmd->cmd_hdr.type)) {
 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
 virtio_gpu_get_display_info(g, cmd);
 break;
@@ -1135,7 +1170,7 @@ static void virtio_gpu_device_realize(DeviceState *qdev, 
Error **errp)
 }
 
 g->config_size = sizeof(struct virtio_gpu_config);
-g->virtio_config.num_scanouts = g->conf.max_outputs;
+g->virtio_config.num_scanouts =