Re: [PATCH v2 1/2] system/memory.c: support unaligned access

2024-03-17 Thread Tomoyuki Hirose
ping.

On Mon, Feb 26, 2024 at 4:28 PM Tomoyuki Hirose
 wrote:
>
> Hello,
> I would be happy if you could give me some comments.
>
> ping.
>
> On Thu, Feb 1, 2024 at 5:14 PM Tomoyuki HIROSE
>  wrote:
> >
> > The previous code ignored 'impl.unaligned' and handled unaligned accesses
> > as is. But this implementation cannot emulate specific registers of some
> > devices that allow unaligned access such as xHCI Host Controller Capability
> > Registers.
> > This commit checks 'impl.unaligned' and if it is false, QEMU emulates
> > unaligned access with multiple aligned access.
> >
> > Signed-off-by: Tomoyuki HIROSE 
> > ---
> >  system/memory.c | 38 +-
> >  1 file changed, 25 insertions(+), 13 deletions(-)
> >
> > diff --git a/system/memory.c b/system/memory.c
> > index a229a79988..a7ca0c9f54 100644
> > --- a/system/memory.c
> > +++ b/system/memory.c
> > @@ -535,10 +535,17 @@ static MemTxResult access_with_adjusted_size(hwaddr 
> > addr,
> >MemTxAttrs attrs)
> >  {
> >  uint64_t access_mask;
> > +unsigned access_mask_shift;
> > +unsigned access_mask_start_offset;
> > +unsigned access_mask_end_offset;
> >  unsigned access_size;
> > -unsigned i;
> >  MemTxResult r = MEMTX_OK;
> >  bool reentrancy_guard_applied = false;
> > +bool is_big_endian = memory_region_big_endian(mr);
> > +signed start_diff;
> > +signed current_offset;
> > +signed access_shift;
> > +hwaddr current_addr;
> >
> >  if (!access_size_min) {
> >  access_size_min = 1;
> > @@ -560,19 +567,24 @@ static MemTxResult access_with_adjusted_size(hwaddr 
> > addr,
> >  reentrancy_guard_applied = true;
> >  }
> >
> > -/* FIXME: support unaligned access? */
> >  access_size = MAX(MIN(size, access_size_max), access_size_min);
> > -access_mask = MAKE_64BIT_MASK(0, access_size * 8);
> > -if (memory_region_big_endian(mr)) {
> > -for (i = 0; i < size; i += access_size) {
> > -r |= access_fn(mr, addr + i, value, access_size,
> > -(size - access_size - i) * 8, access_mask, attrs);
> > -}
> > -} else {
> > -for (i = 0; i < size; i += access_size) {
> > -r |= access_fn(mr, addr + i, value, access_size, i * 8,
> > -access_mask, attrs);
> > -}
> > +start_diff = mr->ops->impl.unaligned ? 0 : addr & (access_size - 1);
> > +current_addr = addr - start_diff;
> > +for (current_offset = -start_diff; current_offset < (signed)size;
> > + current_offset += access_size, current_addr += access_size) {
> > +access_shift = is_big_endian
> > +  ? (signed)size - (signed)access_size - 
> > current_offset
> > +  : current_offset;
> > +access_mask_shift = current_offset > 0 ? 0 : -current_offset;
> > +access_mask_start_offset = current_offset > 0 ? current_offset : 0;
> > +access_mask_end_offset = current_offset + access_size > size
> > + ? size
> > + : current_offset + access_size;
> > +access_mask = MAKE_64BIT_MASK(access_mask_shift * 8,
> > +(access_mask_end_offset - access_mask_start_offset) * 8);
> > +
> > +r |= access_fn(mr, current_addr, value, access_size, access_shift 
> > * 8,
> > +   access_mask, attrs);
> >  }
> >  if (mr->dev && reentrancy_guard_applied) {
> >  mr->dev->mem_reentrancy_guard.engaged_in_io = false;
> > --
> > 2.39.2
> >



Re: [PATCH v2 1/2] system/memory.c: support unaligned access

2024-02-25 Thread Tomoyuki Hirose
Hello,
I would be happy if you could give me some comments.

ping.

On Thu, Feb 1, 2024 at 5:14 PM Tomoyuki HIROSE
 wrote:
>
> The previous code ignored 'impl.unaligned' and handled unaligned accesses
> as is. But this implementation cannot emulate specific registers of some
> devices that allow unaligned access such as xHCI Host Controller Capability
> Registers.
> This commit checks 'impl.unaligned' and if it is false, QEMU emulates
> unaligned access with multiple aligned access.
>
> Signed-off-by: Tomoyuki HIROSE 
> ---
>  system/memory.c | 38 +-
>  1 file changed, 25 insertions(+), 13 deletions(-)
>
> diff --git a/system/memory.c b/system/memory.c
> index a229a79988..a7ca0c9f54 100644
> --- a/system/memory.c
> +++ b/system/memory.c
> @@ -535,10 +535,17 @@ static MemTxResult access_with_adjusted_size(hwaddr 
> addr,
>MemTxAttrs attrs)
>  {
>  uint64_t access_mask;
> +unsigned access_mask_shift;
> +unsigned access_mask_start_offset;
> +unsigned access_mask_end_offset;
>  unsigned access_size;
> -unsigned i;
>  MemTxResult r = MEMTX_OK;
>  bool reentrancy_guard_applied = false;
> +bool is_big_endian = memory_region_big_endian(mr);
> +signed start_diff;
> +signed current_offset;
> +signed access_shift;
> +hwaddr current_addr;
>
>  if (!access_size_min) {
>  access_size_min = 1;
> @@ -560,19 +567,24 @@ static MemTxResult access_with_adjusted_size(hwaddr 
> addr,
>  reentrancy_guard_applied = true;
>  }
>
> -/* FIXME: support unaligned access? */
>  access_size = MAX(MIN(size, access_size_max), access_size_min);
> -access_mask = MAKE_64BIT_MASK(0, access_size * 8);
> -if (memory_region_big_endian(mr)) {
> -for (i = 0; i < size; i += access_size) {
> -r |= access_fn(mr, addr + i, value, access_size,
> -(size - access_size - i) * 8, access_mask, attrs);
> -}
> -} else {
> -for (i = 0; i < size; i += access_size) {
> -r |= access_fn(mr, addr + i, value, access_size, i * 8,
> -access_mask, attrs);
> -}
> +start_diff = mr->ops->impl.unaligned ? 0 : addr & (access_size - 1);
> +current_addr = addr - start_diff;
> +for (current_offset = -start_diff; current_offset < (signed)size;
> + current_offset += access_size, current_addr += access_size) {
> +access_shift = is_big_endian
> +  ? (signed)size - (signed)access_size - 
> current_offset
> +  : current_offset;
> +access_mask_shift = current_offset > 0 ? 0 : -current_offset;
> +access_mask_start_offset = current_offset > 0 ? current_offset : 0;
> +access_mask_end_offset = current_offset + access_size > size
> + ? size
> + : current_offset + access_size;
> +access_mask = MAKE_64BIT_MASK(access_mask_shift * 8,
> +(access_mask_end_offset - access_mask_start_offset) * 8);
> +
> +r |= access_fn(mr, current_addr, value, access_size, access_shift * 
> 8,
> +   access_mask, attrs);
>  }
>  if (mr->dev && reentrancy_guard_applied) {
>  mr->dev->mem_reentrancy_guard.engaged_in_io = false;
> --
> 2.39.2
>



[PATCH v2 2/2] hw/usb/hcd-xhci.c: allow unaligned access to Capability Registers

2024-02-01 Thread Tomoyuki HIROSE
According to xHCI spec rev 1.2, unaligned access to xHCI Host
Controller Capability Registers is not prohibited. In Addition, the
limit of access size is also unspecified. Actually, some real devices
allow unaligned access and 8-byte access to these registers.
This commit makes it possible to unaligned access and 8-byte access
to Host Controller Capability Registers.

Resolves: https://gitlab.com/qemu-project/qemu/-/issues/143
Signed-off-by: Tomoyuki HIROSE 
---
 hw/usb/hcd-xhci.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index ad40232eb6..9e0b24c93e 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -3181,9 +3181,11 @@ static const MemoryRegionOps xhci_cap_ops = {
 .read = xhci_cap_read,
 .write = xhci_cap_write,
 .valid.min_access_size = 1,
-.valid.max_access_size = 4,
+.valid.max_access_size = 8,
+.valid.unaligned = true,
 .impl.min_access_size = 4,
 .impl.max_access_size = 4,
+.impl.unaligned = false,
 .endianness = DEVICE_LITTLE_ENDIAN,
 };
 
-- 
2.39.2




[PATCH v2 1/2] system/memory.c: support unaligned access

2024-02-01 Thread Tomoyuki HIROSE
The previous code ignored 'impl.unaligned' and handled unaligned accesses
as is. But this implementation cannot emulate specific registers of some
devices that allow unaligned access such as xHCI Host Controller Capability
Registers.
This commit checks 'impl.unaligned' and if it is false, QEMU emulates
unaligned access with multiple aligned access.

Signed-off-by: Tomoyuki HIROSE 
---
 system/memory.c | 38 +-
 1 file changed, 25 insertions(+), 13 deletions(-)

diff --git a/system/memory.c b/system/memory.c
index a229a79988..a7ca0c9f54 100644
--- a/system/memory.c
+++ b/system/memory.c
@@ -535,10 +535,17 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
   MemTxAttrs attrs)
 {
 uint64_t access_mask;
+unsigned access_mask_shift;
+unsigned access_mask_start_offset;
+unsigned access_mask_end_offset;
 unsigned access_size;
-unsigned i;
 MemTxResult r = MEMTX_OK;
 bool reentrancy_guard_applied = false;
+bool is_big_endian = memory_region_big_endian(mr);
+signed start_diff;
+signed current_offset;
+signed access_shift;
+hwaddr current_addr;
 
 if (!access_size_min) {
 access_size_min = 1;
@@ -560,19 +567,24 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
 reentrancy_guard_applied = true;
 }
 
-/* FIXME: support unaligned access? */
 access_size = MAX(MIN(size, access_size_max), access_size_min);
-access_mask = MAKE_64BIT_MASK(0, access_size * 8);
-if (memory_region_big_endian(mr)) {
-for (i = 0; i < size; i += access_size) {
-r |= access_fn(mr, addr + i, value, access_size,
-(size - access_size - i) * 8, access_mask, attrs);
-}
-} else {
-for (i = 0; i < size; i += access_size) {
-r |= access_fn(mr, addr + i, value, access_size, i * 8,
-access_mask, attrs);
-}
+start_diff = mr->ops->impl.unaligned ? 0 : addr & (access_size - 1);
+current_addr = addr - start_diff;
+for (current_offset = -start_diff; current_offset < (signed)size;
+ current_offset += access_size, current_addr += access_size) {
+access_shift = is_big_endian
+  ? (signed)size - (signed)access_size - current_offset
+  : current_offset;
+access_mask_shift = current_offset > 0 ? 0 : -current_offset;
+access_mask_start_offset = current_offset > 0 ? current_offset : 0;
+access_mask_end_offset = current_offset + access_size > size
+ ? size
+ : current_offset + access_size;
+access_mask = MAKE_64BIT_MASK(access_mask_shift * 8,
+(access_mask_end_offset - access_mask_start_offset) * 8);
+
+r |= access_fn(mr, current_addr, value, access_size, access_shift * 8,
+   access_mask, attrs);
 }
 if (mr->dev && reentrancy_guard_applied) {
 mr->dev->mem_reentrancy_guard.engaged_in_io = false;
-- 
2.39.2




[PATCH v2 0/2] support unaligned access for some xHCI registers

2024-02-01 Thread Tomoyuki HIROSE
v1 -> v2:
* Improved the calculation of addresses and masks in memory.c.

According to xHCI spec rev 1.2, unaligned access to xHCI Host
Controller Capability Registers are not prohibited. But current
implementation does not support unaligned access to 'MemoryRegion'.
These patches contain 2 changes:
1. support unaligned access to 'MemoryRegion' .
2. allow unaligned access to Host Controller Capability Registers.

Tomoyuki HIROSE (2):
  system/memory.c: support unaligned access
  hw/usb/hcd-xhci.c: allow unaligned access to Capability Registers

 hw/usb/hcd-xhci.c |  4 +++-
 system/memory.c   | 38 +-
 2 files changed, 28 insertions(+), 14 deletions(-)

-- 
2.39.2




Re: [PATCH 1/2] system/memory.c: support unaligned access

2024-01-17 Thread Tomoyuki Hirose
Hello,

Thank you for reviewing my patches.
Examples of corner case you explained is very helpful.
I'm currently working on patches. I will submit v2 as soon
as it's completed, so please wait a little longer.

thanks,
Tomoyuki HIROSE



Re: [PATCH 0/2] support unaligned access for some xHCI registers

2023-12-19 Thread Tomoyuki Hirose
On Tue, Dec 19, 2023 at 8:26 PM Peter Maydell  wrote:
>
> On Tue, 19 Dec 2023 at 04:49, Tomoyuki Hirose
>  wrote:
> >
> > I would be grateful if you would any comments on my patch.
>
> It's on my todo list, but at this point I'm afraid I'm
> not going to be able to get to it before I break for
> the holidays, so it will be January before I can look at
> it. (It's a bit complicated because I'll need to look
> at this patch, at the other suggested patch from the
> past that also tried to address this, at various
> mailing list discussions we've had about the problem,
> and at how the code in general is doing things, so it's
> not a trivial change to review.)

OK, I understand.
Enjoy your holidays!

Regards,
Tomoyuki HIROSE



Re: [PATCH 0/2] support unaligned access for some xHCI registers

2023-12-18 Thread Tomoyuki Hirose
I would be grateful if you would any comments on my patch.

ping,

Tomoyuki HIROSE

On Mon, Dec 11, 2023 at 4:12 PM Tomoyuki HIROSE
 wrote:
>
> According to xHCI spec rev 1.2, unaligned access to xHCI Host
> Controller Capability Registers are not prohibited. But current
> implementation does not support unaligned access to 'MemoryRegion'.
> These patches contain 2 changes:
> 1. support unaligned access to 'MemoryRegion'
> 2. allow unaligned access to Host Controller Capability Registers.
>
> Tomoyuki HIROSE (2):
>   system/memory.c: support unaligned access
>   hw/usb/hcd-xhci.c: allow unaligned access to Capability Registers
>
>  hw/usb/hcd-xhci.c |  4 +++-
>  system/memory.c   | 22 --
>  2 files changed, 19 insertions(+), 7 deletions(-)
>
> --
> 2.39.2
>



Re: [PATCH 2/2] hw/usb/hcd-xhci.c: allow unaligned access to Capability Registers

2023-12-18 Thread Tomoyuki Hirose
On Tue, Dec 12, 2023 at 7:26 PM Peter Maydell  wrote:
>
> On Tue, 12 Dec 2023 at 01:43, Tomoyuki Hirose
>  wrote:
> >
> > Thanks for comment.
> >
> > On Mon, Dec 11, 2023 at 10:57 PM Peter Maydell  
> > wrote:
> > > We should definitely look at fixing the unaligned access
> > > stuff, but the linked bug report is not trying to do an
> > > unaligned access -- it wants to do a 2-byte read from offset 2,
> > > which is aligned. The capability registers in the xHCI spec
> > > are also all at offsets and sizes that mean that a natural
> > > read of them is not unaligned.
> >
> > Shouldn't I link this bug report?
> > Or is it not appropriate to allow unaligned access?
>
> The bug report is definitely relevant. But depending
> on how tricky the unaligned access handling turns out to
> be to get right, we might be able to fix the bug by
> permitting aligned-but-not-4-bytes accesses. (I'm
> a bit surprised that doesn't work already, in fact:
> we use it in other devices.)
>
> thanks
> -- PMM

Thank you for answering my question.
The unaligned access handling of my patch is not so tricky.
If the access is unaligned, just correct the access size
and address and read the value as before.
Also, it is allowed by the specifications, and byte access
was possible even on real devices.

Regards,
Tomoyuki HIROSE



Re: [PATCH 1/2] system/memory.c: support unaligned access

2023-12-14 Thread Tomoyuki Hirose
On Mon, Dec 11, 2023 at 10:31 PM Cédric Le Goater  wrote:
> FWIW, there has been a previous proposal for unaligned accesses support [1].
>
> Thanks,
>
> C.
>
> [1] https://lore.kernel.org/qemu-devel/20170630030058.28943-1-and...@aj.id.au/
>

Thanks for your information.
This patch is an RFC, and unfortunately it doesn't seem to have been merged.
This feature is useful for devices that allow unaligned access.

Regards,

Tomoyuki HIROSE

On Mon, Dec 11, 2023 at 10:31 PM Cédric Le Goater  wrote:
>
> Hello,
>
> On 12/11/23 08:12, Tomoyuki HIROSE wrote:
> > The previous code ignored 'impl.unaligned' and handled unaligned accesses
> > as is. But this implementation cannot emulate specific registers of some
> > devices that allow unaligned access such as xHCI Host Controller Capability
> > Registers.
> > This commit checks 'impl.unaligned' and if it is false, QEMU emulates
> > unaligned access with multiple aligned access.
> >
> > Signed-off-by: Tomoyuki HIROSE 
>
> FWIW, there has been a previous proposal for unaligned accesses support [1].
>
> Thanks,
>
> C.
>
> [1] https://lore.kernel.org/qemu-devel/20170630030058.28943-1-and...@aj.id.au/
>
>
> > ---
> >   system/memory.c | 22 --
> >   1 file changed, 16 insertions(+), 6 deletions(-)
> >
> > diff --git a/system/memory.c b/system/memory.c
> > index 798b6c0a17..b0caa90fef 100644
> > --- a/system/memory.c
> > +++ b/system/memory.c
> > @@ -539,6 +539,9 @@ static MemTxResult access_with_adjusted_size(hwaddr 
> > addr,
> >   unsigned i;
> >   MemTxResult r = MEMTX_OK;
> >   bool reentrancy_guard_applied = false;
> > +hwaddr aligned_addr;
> > +unsigned corrected_size = size;
> > +signed align_diff = 0;
> >
> >   if (!access_size_min) {
> >   access_size_min = 1;
> > @@ -560,18 +563,25 @@ static MemTxResult access_with_adjusted_size(hwaddr 
> > addr,
> >   reentrancy_guard_applied = true;
> >   }
> >
> > -/* FIXME: support unaligned access? */
> >   access_size = MAX(MIN(size, access_size_max), access_size_min);
> >   access_mask = MAKE_64BIT_MASK(0, access_size * 8);
> > +if (!mr->ops->impl.unaligned) {
> > +aligned_addr = addr & ~(access_size - 1);
> > +align_diff = addr - aligned_addr;
> > +corrected_size = size < access_size ? access_size :
> > +size + (align_diff > 0 ? access_size : 0);
> > +addr = aligned_addr;
> > +}
> >   if (memory_region_big_endian(mr)) {
> > -for (i = 0; i < size; i += access_size) {
> > +for (i = 0; i < corrected_size; i += access_size) {
> >   r |= access_fn(mr, addr + i, value, access_size,
> > -(size - access_size - i) * 8, access_mask, attrs);
> > +(size - access_size - i + align_diff) * 8,
> > +access_mask, attrs);
> >   }
> >   } else {
> > -for (i = 0; i < size; i += access_size) {
> > -r |= access_fn(mr, addr + i, value, access_size, i * 8,
> > -access_mask, attrs);
> > +for (i = 0; i < corrected_size; i += access_size) {
> > +r |= access_fn(mr, addr + i, value, access_size,
> > +((signed)i - align_diff) * 8, access_mask, attrs);
> >   }
> >   }
> >   if (mr->dev && reentrancy_guard_applied) {
>



Re: [PATCH 2/2] hw/usb/hcd-xhci.c: allow unaligned access to Capability Registers

2023-12-11 Thread Tomoyuki Hirose
Thanks for comment.

On Mon, Dec 11, 2023 at 10:57 PM Peter Maydell  wrote:
> We should definitely look at fixing the unaligned access
> stuff, but the linked bug report is not trying to do an
> unaligned access -- it wants to do a 2-byte read from offset 2,
> which is aligned. The capability registers in the xHCI spec
> are also all at offsets and sizes that mean that a natural
> read of them is not unaligned.

Shouldn't I link this bug report?
Or is it not appropriate to allow unaligned access?

thanks,
Tomoyuki HIROSE



[PATCH 2/2] hw/usb/hcd-xhci.c: allow unaligned access to Capability Registers

2023-12-10 Thread Tomoyuki HIROSE
According to xHCI spec rev 1.2, unaligned access to xHCI Host
Controller Capability Registers is not prohibited. In Addition, the
limit of access size is also unspecified. Actually, some real devices
allow unaligned access and 8-byte access to these registers.
This commit makes it possible to unaligned access and 8-byte access
to Host Controller Capability Registers.

Resolves: https://gitlab.com/qemu-project/qemu/-/issues/143
Signed-off-by: Tomoyuki HIROSE 
---
 hw/usb/hcd-xhci.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index 4b60114207..41abeb9ac5 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -3181,9 +3181,11 @@ static const MemoryRegionOps xhci_cap_ops = {
 .read = xhci_cap_read,
 .write = xhci_cap_write,
 .valid.min_access_size = 1,
-.valid.max_access_size = 4,
+.valid.max_access_size = 8,
+.valid.unaligned = true,
 .impl.min_access_size = 4,
 .impl.max_access_size = 4,
+.impl.unaligned = false,
 .endianness = DEVICE_LITTLE_ENDIAN,
 };
 
-- 
2.39.2




[PATCH 0/2] support unaligned access for some xHCI registers

2023-12-10 Thread Tomoyuki HIROSE
According to xHCI spec rev 1.2, unaligned access to xHCI Host
Controller Capability Registers are not prohibited. But current
implementation does not support unaligned access to 'MemoryRegion'.
These patches contain 2 changes:
1. support unaligned access to 'MemoryRegion'
2. allow unaligned access to Host Controller Capability Registers.

Tomoyuki HIROSE (2):
  system/memory.c: support unaligned access
  hw/usb/hcd-xhci.c: allow unaligned access to Capability Registers

 hw/usb/hcd-xhci.c |  4 +++-
 system/memory.c   | 22 --
 2 files changed, 19 insertions(+), 7 deletions(-)

-- 
2.39.2




[PATCH 1/2] system/memory.c: support unaligned access

2023-12-10 Thread Tomoyuki HIROSE
The previous code ignored 'impl.unaligned' and handled unaligned accesses
as is. But this implementation cannot emulate specific registers of some
devices that allow unaligned access such as xHCI Host Controller Capability
Registers.
This commit checks 'impl.unaligned' and if it is false, QEMU emulates
unaligned access with multiple aligned access.

Signed-off-by: Tomoyuki HIROSE 
---
 system/memory.c | 22 --
 1 file changed, 16 insertions(+), 6 deletions(-)

diff --git a/system/memory.c b/system/memory.c
index 798b6c0a17..b0caa90fef 100644
--- a/system/memory.c
+++ b/system/memory.c
@@ -539,6 +539,9 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
 unsigned i;
 MemTxResult r = MEMTX_OK;
 bool reentrancy_guard_applied = false;
+hwaddr aligned_addr;
+unsigned corrected_size = size;
+signed align_diff = 0;
 
 if (!access_size_min) {
 access_size_min = 1;
@@ -560,18 +563,25 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
 reentrancy_guard_applied = true;
 }
 
-/* FIXME: support unaligned access? */
 access_size = MAX(MIN(size, access_size_max), access_size_min);
 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
+if (!mr->ops->impl.unaligned) {
+aligned_addr = addr & ~(access_size - 1);
+align_diff = addr - aligned_addr;
+corrected_size = size < access_size ? access_size :
+size + (align_diff > 0 ? access_size : 0);
+addr = aligned_addr;
+}
 if (memory_region_big_endian(mr)) {
-for (i = 0; i < size; i += access_size) {
+for (i = 0; i < corrected_size; i += access_size) {
 r |= access_fn(mr, addr + i, value, access_size,
-(size - access_size - i) * 8, access_mask, attrs);
+(size - access_size - i + align_diff) * 8,
+access_mask, attrs);
 }
 } else {
-for (i = 0; i < size; i += access_size) {
-r |= access_fn(mr, addr + i, value, access_size, i * 8,
-access_mask, attrs);
+for (i = 0; i < corrected_size; i += access_size) {
+r |= access_fn(mr, addr + i, value, access_size,
+((signed)i - align_diff) * 8, access_mask, attrs);
 }
 }
 if (mr->dev && reentrancy_guard_applied) {
-- 
2.39.2