Re: [Xen-devel] [PATCH V4 4/8] xen/common: Introduce _xrealloc function

2019-09-23 Thread Oleksandr


On 23.09.19 16:31, Jan Beulich wrote:

Hi, Jan




+
+    if ( ptr == NULL || ptr == ZERO_BLOCK_PTR )
+    return _xmalloc(size, align);
+
+    ASSERT((align & (align - 1)) == 0);
+    if ( align < MEM_ALIGN )
+    align = MEM_ALIGN;
+
+    tmp_size = size + align - MEM_ALIGN;
+
+    if ( tmp_size < PAGE_SIZE )
+    tmp_size = (tmp_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE :
+    ROUNDUP_SIZE(tmp_size);
+
+    if ( !((unsigned long)ptr & (PAGE_SIZE - 1)) )
+    {
+    curr_size = (unsigned long)PFN_ORDER(virt_to_page(ptr)) << PAGE_SHIFT;
+
+    if ( tmp_size <= curr_size && ((unsigned long)ptr & (align - 1)) == 0 )

You mean "size" here I think, not "tmp_size". See how xmalloc_whole_pages()
gets called from _xmalloc() with an "adjusted back" value.


Yes, thank you for pointing this.


And as said, please clean up the code you move or add anew: Use casts
only where really needed, transform types to appropriate "modern" ones,
etc.


ok, will double check.


--
Regards,

Oleksandr Tyshchenko


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH V4 4/8] xen/common: Introduce _xrealloc function

2019-09-23 Thread Jan Beulich
On 23.09.2019 14:50, Oleksandr wrote:
> Does the diff below is close to what you meant?

Almost.

> @@ -598,10 +621,70 @@ void *_xzalloc(unsigned long size, unsigned long align)
>   return p ? memset(p, 0, size) : p;
>   }
> 
> -void xfree(void *p)
> +void *_xrealloc(void *ptr, unsigned long size, unsigned long align)
>   {
> -    struct bhdr *b;
> +    unsigned long curr_size, tmp_size;
> +    void *p;
> +
> +    if ( !size )
> +    {
> +    xfree(ptr);
> +    return ZERO_BLOCK_PTR;
> +    }
> +
> +    if ( ptr == NULL || ptr == ZERO_BLOCK_PTR )
> +    return _xmalloc(size, align);
> +
> +    ASSERT((align & (align - 1)) == 0);
> +    if ( align < MEM_ALIGN )
> +    align = MEM_ALIGN;
> +
> +    tmp_size = size + align - MEM_ALIGN;
> +
> +    if ( tmp_size < PAGE_SIZE )
> +    tmp_size = (tmp_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE :
> +    ROUNDUP_SIZE(tmp_size);
> +
> +    if ( !((unsigned long)ptr & (PAGE_SIZE - 1)) )
> +    {
> +    curr_size = (unsigned long)PFN_ORDER(virt_to_page(ptr)) << 
> PAGE_SHIFT;
> +
> +    if ( tmp_size <= curr_size && ((unsigned long)ptr & (align - 1)) == 
> 0 )

You mean "size" here I think, not "tmp_size". See how xmalloc_whole_pages()
gets called from _xmalloc() with an "adjusted back" value.

And as said, please clean up the code you move or add anew: Use casts
only where really needed, transform types to appropriate "modern" ones,
etc.

Jan

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH V4 4/8] xen/common: Introduce _xrealloc function

2019-09-23 Thread Oleksandr


On 16.09.19 18:24, Jan Beulich wrote:

Hi, Jan.


+ROUNDUP_SIZE(tmp_size);
+
+if ( tmp_size <= curr_size && ((unsigned long)ptr & (align - 1)) == 0 )
+return ptr; /* the size and alignment fit in already allocated space */

You also don't seem to ever update ptr in case you want to use the
(head) padding, i.e. you'd hand back a pointer to a block which the
caller would assume extends past its actual end. I think you want
to calculate the new tentative pointer (taking the requested
alignment into account), and only from that calculate curr_size
(which perhaps would better be named "usable" or "space" or some
such). Obviously the (head) padding block may need updating, too.

I am afraid I don't completely understand your point here. And sorry for
the maybe naive question, but what is the "(head) padding" here?

The very padding talked about earlier. I did add "(head)" to clarify
it's that specific case - after all tail padding is far more common.



Still unsure, I completely understand your point regarding calculating 
tentative pointer and then curr_size.



--

Does the diff below is close to what you meant?


---
 xen/common/xmalloc_tlsf.c | 113 
++

 xen/include/xen/xmalloc.h |   1 +
 2 files changed, 96 insertions(+), 18 deletions(-)

diff --git a/xen/common/xmalloc_tlsf.c b/xen/common/xmalloc_tlsf.c
index e98ad65..f24c97c 100644
--- a/xen/common/xmalloc_tlsf.c
+++ b/xen/common/xmalloc_tlsf.c
@@ -554,10 +554,40 @@ static void tlsf_init(void)
 #define ZERO_BLOCK_PTR ((void *)-1L)
 #endif

+static void *strip_padding(void *p)
+{
+    struct bhdr *b = (struct bhdr *)(p - BHDR_OVERHEAD);
+
+    if ( b->size & FREE_BLOCK )
+    {
+    p -= b->size & ~FREE_BLOCK;
+    b = (struct bhdr *)(p - BHDR_OVERHEAD);
+    ASSERT(!(b->size & FREE_BLOCK));
+    }
+
+    return p;
+}
+
+static void *add_padding(void *p, unsigned long align)
+{
+    u32 pad;
+
+    if ( (pad = -(long)p & (align - 1)) != 0 )
+    {
+    char *q = (char *)p + pad;
+    struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD);
+
+    ASSERT(q > (char *)p);
+    b->size = pad | FREE_BLOCK;
+    p = q;
+    }
+
+    return p;
+}
+
 void *_xmalloc(unsigned long size, unsigned long align)
 {
 void *p = NULL;
-    u32 pad;

 ASSERT(!in_irq());

@@ -578,14 +608,7 @@ void *_xmalloc(unsigned long size, unsigned long align)
 return xmalloc_whole_pages(size - align + MEM_ALIGN, align);

 /* Add alignment padding. */
-    if ( (pad = -(long)p & (align - 1)) != 0 )
-    {
-    char *q = (char *)p + pad;
-    struct bhdr *b = (struct bhdr *)(q - BHDR_OVERHEAD);
-    ASSERT(q > (char *)p);
-    b->size = pad | FREE_BLOCK;
-    p = q;
-    }
+    p = add_padding(p, align);

 ASSERT(((unsigned long)p & (align - 1)) == 0);
 return p;
@@ -598,10 +621,70 @@ void *_xzalloc(unsigned long size, unsigned long 
align)

 return p ? memset(p, 0, size) : p;
 }

-void xfree(void *p)
+void *_xrealloc(void *ptr, unsigned long size, unsigned long align)
 {
-    struct bhdr *b;
+    unsigned long curr_size, tmp_size;
+    void *p;
+
+    if ( !size )
+    {
+    xfree(ptr);
+    return ZERO_BLOCK_PTR;
+    }
+
+    if ( ptr == NULL || ptr == ZERO_BLOCK_PTR )
+    return _xmalloc(size, align);
+
+    ASSERT((align & (align - 1)) == 0);
+    if ( align < MEM_ALIGN )
+    align = MEM_ALIGN;
+
+    tmp_size = size + align - MEM_ALIGN;
+
+    if ( tmp_size < PAGE_SIZE )
+    tmp_size = (tmp_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE :
+    ROUNDUP_SIZE(tmp_size);
+
+    if ( !((unsigned long)ptr & (PAGE_SIZE - 1)) )
+    {
+    curr_size = (unsigned long)PFN_ORDER(virt_to_page(ptr)) << 
PAGE_SHIFT;

+
+    if ( tmp_size <= curr_size && ((unsigned long)ptr & (align - 
1)) == 0 )

+    return ptr;
+    }
+    else
+    {
+    struct bhdr *b;
+
+    /* Strip alignment padding. */
+    p = strip_padding(ptr);
+
+    b = (struct bhdr *)(p - BHDR_OVERHEAD);
+    curr_size = b->size & BLOCK_SIZE_MASK;
+
+    if ( tmp_size <= curr_size )
+    {
+    /* Add alignment padding. */
+    p = add_padding(p, align);
+
+    ASSERT(((unsigned long)p & (align - 1)) == 0);
+
+    return p;
+    }
+    }
+
+    p = _xzalloc(size, align);
+    if ( p )
+    {
+    memcpy(p, ptr, min(curr_size, size));
+    xfree(ptr);
+    }
+
+    return p;
+}
+
+void xfree(void *p)
+{
 if ( p == NULL || p == ZERO_BLOCK_PTR )
 return;

@@ -626,13 +709,7 @@ void xfree(void *p)
 }

 /* Strip alignment padding. */
-    b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
-    if ( b->size & FREE_BLOCK )
-    {
-    p = (char *)p - (b->size & ~FREE_BLOCK);
-    b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
-    ASSERT(!(b->size & FREE_BLOCK));
-    }
+    p = strip_padding(p);

 xmem_pool_free(p, xenpool);
 }
diff --git 

Re: [Xen-devel] [PATCH V4 4/8] xen/common: Introduce _xrealloc function

2019-09-16 Thread Jan Beulich
On 16.09.2019 17:03, Oleksandr wrote:
> On 16.09.19 13:13, Jan Beulich wrote:
>> On 13.09.2019 17:35, Oleksandr Tyshchenko wrote:
>>> --- a/xen/common/xmalloc_tlsf.c
>>> +++ b/xen/common/xmalloc_tlsf.c
>>> @@ -598,6 +598,58 @@ void *_xzalloc(unsigned long size, unsigned long align)
>>>   return p ? memset(p, 0, size) : p;
>>>   }
>>>   
>>> +void *_xrealloc(void *ptr, unsigned long size, unsigned long align)
>>> +{
>>> +unsigned long curr_size, tmp_size;
>>> +void *p;
>>> +
>>> +if ( !size )
>>> +{
>>> +xfree(ptr);
>>> +return ZERO_BLOCK_PTR;
>>> +}
>>> +
>>> +if ( ptr == NULL || ptr == ZERO_BLOCK_PTR )
>>> +return _xmalloc(size, align);
>>> +
>>> +if ( !((unsigned long)ptr & (PAGE_SIZE - 1)) )
>>> +curr_size = PFN_ORDER(virt_to_page(ptr)) << PAGE_SHIFT;
>> While the present MAX_ORDER setting will prevent allocations of
>> 4GiB or above from succeeding, may I ask that you don't introduce
>> latent issues in case MAX_ORDER would ever need bumping?
> Sure (I assume, you are talking about possible truncation):
> 
> if ( !((unsigned long)ptr & (PAGE_SIZE - 1)) )
>      curr_size = (unsigned long)PFN_ORDER(virt_to_page(ptr)) << PAGE_SHIFT;

Yes.

>>> +ROUNDUP_SIZE(tmp_size);
>>> +
>>> +if ( tmp_size <= curr_size && ((unsigned long)ptr & (align - 1)) == 0 )
>>> +return ptr; /* the size and alignment fit in already allocated 
>>> space */
>> You also don't seem to ever update ptr in case you want to use the
>> (head) padding, i.e. you'd hand back a pointer to a block which the
>> caller would assume extends past its actual end. I think you want
>> to calculate the new tentative pointer (taking the requested
>> alignment into account), and only from that calculate curr_size
>> (which perhaps would better be named "usable" or "space" or some
>> such). Obviously the (head) padding block may need updating, too.
> 
> I am afraid I don't completely understand your point here. And sorry for 
> the maybe naive question, but what is the "(head) padding" here?

The very padding talked about earlier. I did add "(head)" to clarify
it's that specific case - after all tail padding is far more common.

Jan

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH V4 4/8] xen/common: Introduce _xrealloc function

2019-09-16 Thread Oleksandr


On 16.09.19 13:13, Jan Beulich wrote:

Hi, Jan


On 13.09.2019 17:35, Oleksandr Tyshchenko wrote:

--- a/xen/common/xmalloc_tlsf.c
+++ b/xen/common/xmalloc_tlsf.c
@@ -598,6 +598,58 @@ void *_xzalloc(unsigned long size, unsigned long align)
  return p ? memset(p, 0, size) : p;
  }
  
+void *_xrealloc(void *ptr, unsigned long size, unsigned long align)

+{
+unsigned long curr_size, tmp_size;
+void *p;
+
+if ( !size )
+{
+xfree(ptr);
+return ZERO_BLOCK_PTR;
+}
+
+if ( ptr == NULL || ptr == ZERO_BLOCK_PTR )
+return _xmalloc(size, align);
+
+if ( !((unsigned long)ptr & (PAGE_SIZE - 1)) )
+curr_size = PFN_ORDER(virt_to_page(ptr)) << PAGE_SHIFT;

While the present MAX_ORDER setting will prevent allocations of
4GiB or above from succeeding, may I ask that you don't introduce
latent issues in case MAX_ORDER would ever need bumping?

Sure (I assume, you are talking about possible truncation):

if ( !((unsigned long)ptr & (PAGE_SIZE - 1)) )
    curr_size = (unsigned long)PFN_ORDER(virt_to_page(ptr)) << PAGE_SHIFT;





+else
+{
+struct bhdr *b = (struct bhdr *)((char *)ptr - BHDR_OVERHEAD);
+
+if ( b->size & FREE_BLOCK )
+{
+p = (char *)ptr - (b->size & ~FREE_BLOCK);
+b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
+ASSERT(!(b->size & FREE_BLOCK));
+}

This matches the respective xfree() code fragment, and needs to
remain in sync. Which suggests introducing a helper function
instead of duplicating the code. And please omit the unnecessary
casts to char *.


Sounds reasonable, will do.





+curr_size = b->size & BLOCK_SIZE_MASK;

_xmalloc() has

 b->size = pad | FREE_BLOCK;

i.e. aiui what you calculate above is the padding size, not the
overall block size.


I have to admit that I am not familiar with allocator internals enough, but

I meant to calculate overall block size (the alignment padding is 
stripped if present)...




+}
+
+ASSERT((align & (align - 1)) == 0);
+if ( align < MEM_ALIGN )
+align = MEM_ALIGN;
+tmp_size = size + align - MEM_ALIGN;
+
+if ( tmp_size < PAGE_SIZE )
+tmp_size = ( tmp_size < MIN_BLOCK_SIZE ) ? MIN_BLOCK_SIZE :

Stray blanks inside parentheses.


ok





+ROUNDUP_SIZE(tmp_size);
+
+if ( tmp_size <= curr_size && ((unsigned long)ptr & (align - 1)) == 0 )
+return ptr; /* the size and alignment fit in already allocated space */

You also don't seem to ever update ptr in case you want to use the
(head) padding, i.e. you'd hand back a pointer to a block which the
caller would assume extends past its actual end. I think you want
to calculate the new tentative pointer (taking the requested
alignment into account), and only from that calculate curr_size
(which perhaps would better be named "usable" or "space" or some
such). Obviously the (head) padding block may need updating, too.


I am afraid I don't completely understand your point here. And sorry for 
the maybe naive question, but what is the "(head) padding" here?




+p = _xmalloc(size, align);
+if ( p )
+{
+memcpy(p, ptr, min(curr_size, size));
+xfree(ptr);
+}
+
+return p;
+}

As a final remark - did you consider zero(?)-filling the tail
portion? While C's realloc() isn't specified to do so, since there's
no (not going to be a) zeroing counterpart, doing so may be safer
overall.


Probably, worth doing. Will zero it.


--
Regards,

Oleksandr Tyshchenko


___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH V4 4/8] xen/common: Introduce _xrealloc function

2019-09-16 Thread Jan Beulich
On 13.09.2019 17:35, Oleksandr Tyshchenko wrote:
> --- a/xen/common/xmalloc_tlsf.c
> +++ b/xen/common/xmalloc_tlsf.c
> @@ -598,6 +598,58 @@ void *_xzalloc(unsigned long size, unsigned long align)
>  return p ? memset(p, 0, size) : p;
>  }
>  
> +void *_xrealloc(void *ptr, unsigned long size, unsigned long align)
> +{
> +unsigned long curr_size, tmp_size;
> +void *p;
> +
> +if ( !size )
> +{
> +xfree(ptr);
> +return ZERO_BLOCK_PTR;
> +}
> +
> +if ( ptr == NULL || ptr == ZERO_BLOCK_PTR )
> +return _xmalloc(size, align);
> +
> +if ( !((unsigned long)ptr & (PAGE_SIZE - 1)) )
> +curr_size = PFN_ORDER(virt_to_page(ptr)) << PAGE_SHIFT;

While the present MAX_ORDER setting will prevent allocations of
4GiB or above from succeeding, may I ask that you don't introduce
latent issues in case MAX_ORDER would ever need bumping?

> +else
> +{
> +struct bhdr *b = (struct bhdr *)((char *)ptr - BHDR_OVERHEAD);
> +
> +if ( b->size & FREE_BLOCK )
> +{
> +p = (char *)ptr - (b->size & ~FREE_BLOCK);
> +b = (struct bhdr *)((char *)p - BHDR_OVERHEAD);
> +ASSERT(!(b->size & FREE_BLOCK));
> +}

This matches the respective xfree() code fragment, and needs to
remain in sync. Which suggests introducing a helper function
instead of duplicating the code. And please omit the unnecessary
casts to char *.

> +curr_size = b->size & BLOCK_SIZE_MASK;

_xmalloc() has

b->size = pad | FREE_BLOCK;

i.e. aiui what you calculate above is the padding size, not the
overall block size.

> +}
> +
> +ASSERT((align & (align - 1)) == 0);
> +if ( align < MEM_ALIGN )
> +align = MEM_ALIGN;
> +tmp_size = size + align - MEM_ALIGN;
> +
> +if ( tmp_size < PAGE_SIZE )
> +tmp_size = ( tmp_size < MIN_BLOCK_SIZE ) ? MIN_BLOCK_SIZE :

Stray blanks inside parentheses.

> +ROUNDUP_SIZE(tmp_size);
> +
> +if ( tmp_size <= curr_size && ((unsigned long)ptr & (align - 1)) == 0 )
> +return ptr; /* the size and alignment fit in already allocated space 
> */

You also don't seem to ever update ptr in case you want to use the
(head) padding, i.e. you'd hand back a pointer to a block which the
caller would assume extends past its actual end. I think you want
to calculate the new tentative pointer (taking the requested
alignment into account), and only from that calculate curr_size
(which perhaps would better be named "usable" or "space" or some
such). Obviously the (head) padding block may need updating, too.

> +p = _xmalloc(size, align);
> +if ( p )
> +{
> +memcpy(p, ptr, min(curr_size, size));
> +xfree(ptr);
> +}
> +
> +return p;
> +}

As a final remark - did you consider zero(?)-filling the tail
portion? While C's realloc() isn't specified to do so, since there's
no (not going to be a) zeroing counterpart, doing so may be safer
overall.

Jan

___
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel