On Mon, Mar 29, 2021 at 12:55:15PM +1100, Alistair Popple wrote:
> On Friday, 26 March 2021 4:15:36 PM AEDT Balbir Singh wrote:
> > On Fri, Mar 26, 2021 at 12:20:35PM +1100, Alistair Popple wrote:
> > > +static int __region_intersects(resource_size_t start, size_t size,
> > > +                            unsigned long flags, unsigned long desc)
> > > +{
> > > +     struct resource res;
> > > +     int type = 0; int other = 0;
> > > +     struct resource *p;
> > > +
> > > +     res.start = start;
> > > +     res.end = start + size - 1;
> > > +
> > > +     for (p = iomem_resource.child; p ; p = p->sibling) {
> > > +             bool is_type = (((p->flags & flags) == flags) &&
> > > +                             ((desc == IORES_DESC_NONE) ||
> > > +                              (desc == p->desc)));
> > 
> > is_type is a bad name, are we saying "is" as in boolean question?
> > Or is it short for something like intersection_type? I know you've
> > just moved the code over :)
> 
> Yeah, I'm not a fan of that name either but I was just moving code over and 
> couldn't come up with anything better :)
> 
> It is a boolean question though - it is checking to see if resource *p is the 
> same type (flags+desc) of region as what is being checked for intersection.
>
> > > +
> > > +             if (resource_overlaps(p, &res))
> > > +                     is_type ? type++ : other++;
> > > +     }
> > > +
> > > +     if (type == 0)
> > > +             return REGION_DISJOINT;
> > > +
> > > +     if (other == 0)
> > > +             return REGION_INTERSECTS;
> > > +
> > > +     return REGION_MIXED;
> > > +}
> > > +
> > >  /**
> > >   * region_intersects() - determine intersection of region with known 
> resources
> > >   * @start: region start address
> > > @@ -546,31 +574,12 @@ EXPORT_SYMBOL_GPL(page_is_ram);
> > >  int region_intersects(resource_size_t start, size_t size, unsigned long 
> flags,
> > >                     unsigned long desc)
> > >  {
> > > -     struct resource res;
> > > -     int type = 0; int other = 0;
> > > -     struct resource *p;
> > > -
> > > -     res.start = start;
> > > -     res.end = start + size - 1;
> > > +     int rc;
> > >
> > >       read_lock(&resource_lock);
> > > -     for (p = iomem_resource.child; p ; p = p->sibling) {
> > > -             bool is_type = (((p->flags & flags) == flags) &&
> > > -                             ((desc == IORES_DESC_NONE) ||
> > > -                              (desc == p->desc)));
> > > -
> > > -             if (resource_overlaps(p, &res))
> > > -                     is_type ? type++ : other++;
> > > -     }
> > > +     rc = __region_intersects(start, size, flags, desc);
> > >       read_unlock(&resource_lock);
> > > -
> > > -     if (type == 0)
> > > -             return REGION_DISJOINT;
> > > -
> > > -     if (other == 0)
> > > -             return REGION_INTERSECTS;
> > > -
> > > -     return REGION_MIXED;
> > > +     return rc;
> > >  }
> > >  EXPORT_SYMBOL_GPL(region_intersects);
> > >
> > > @@ -1171,31 +1180,17 @@ struct address_space *iomem_get_mapping(void)
> > >       return smp_load_acquire(&iomem_inode)->i_mapping;
> > >  }
> > >
> > > -/**
> > > - * __request_region - create a new busy resource region
> > > - * @parent: parent resource descriptor
> > > - * @start: resource start address
> > > - * @n: resource region size
> > > - * @name: reserving caller's ID string
> > > - * @flags: IO resource flags
> > > - */
> > > -struct resource * __request_region(struct resource *parent,
> > > -                                resource_size_t start, resource_size_t n,
> > > -                                const char *name, int flags)
> > > +static bool request_region_locked(struct resource *parent,
> > > +                                 struct resource *res, resource_size_t 
> start,
> > > +                                 resource_size_t n, const char *name, 
> > > int 
> flags)
> > >  {
> > > -     DECLARE_WAITQUEUE(wait, current);
> > > -     struct resource *res = alloc_resource(GFP_KERNEL);
> > >       struct resource *orig_parent = parent;
> > > -
> > > -     if (!res)
> > > -             return NULL;
> > > +     DECLARE_WAITQUEUE(wait, current);
> > 
> > This part of the diff looks confusing, do we have a waitqueue and we call
> > schedule() within a function called with the lock held?
> 
> Good point. schedule() does get called but the lock is dropped first:
> 
>               if (conflict->flags & flags & IORESOURCE_MUXED) {
>                       add_wait_queue(&muxed_resource_wait, &wait);
>                       write_unlock(&resource_lock);
>                       set_current_state(TASK_UNINTERRUPTIBLE);
>                       schedule();
>                       remove_wait_queue(&muxed_resource_wait, &wait);
>                       write_lock(&resource_lock);
>                       continue;
>               }
> 
> This isn't an issue though as it's only used for request_muxed_region() which 
> isn't used for the ZONE_DEVICE allocation and by design doesn't search for 
> free space. Ie. IORESOURCE_MUXED will never be set for 
> request_free_mem_region().
> 
> > >
> > >       res->name = name;
> > >       res->start = start;
> > >       res->end = start + n - 1;
> > >
> > > -     write_lock(&resource_lock);
> > > -
> > >       for (;;) {
> > >               struct resource *conflict;
> > >
> > > @@ -1230,16 +1225,39 @@ struct resource * __request_region(struct 
> > > resource 
> *parent,
> > >                       write_lock(&resource_lock);
> > >                       continue;
> > >               }
> > > -             /* Uhhuh, that didn't work out.. */
> > > -             free_resource(res);
> > > -             res = NULL;
> > > -             break;
> > > +             return false;
> > >       }
> > > -     write_unlock(&resource_lock);
> > >
> > >       if (res && orig_parent == &iomem_resource)
> > >               revoke_iomem(res);
> > >
> > > +     return true;
> > > +}
> > > +
> > > +/**
> > > + * __request_region - create a new busy resource region
> > > + * @parent: parent resource descriptor
> > > + * @start: resource start address
> > > + * @n: resource region size
> > > + * @name: reserving caller's ID string
> > > + * @flags: IO resource flags
> > > + */
> > > +struct resource *__request_region(struct resource *parent,
> > > +                               resource_size_t start, resource_size_t n,
> > > +                               const char *name, int flags)
> > > +{
> > > +     struct resource *res = alloc_resource(GFP_KERNEL);
> > > +
> > > +     if (!res)
> > > +             return NULL;
> > > +
> > > +     write_lock(&resource_lock);
> > > +     if (!request_region_locked(parent, res, start, n, name, flags)) {
> > > +             /* Uhhuh, that didn't work out.. */
> > > +             free_resource(res);
> > > +             res = NULL;
> > > +     }
> > > +     write_unlock(&resource_lock);
> > 
> > Should the function be called __request_region_locked?
> 
> This is the name of original function, so this is just maintaining the 
> original behaviour by taking the lock and calling the inner function 
> (request_region_locked) rather than having it coded directly there.
> 
> __request_region() is rarely called directly and is mostly called via macros:
> 
> include/linux/ioport.h:#define request_region(start,n,name)             
> __request_region(&ioport_resource, (start), (n), (name), 0)
> include/linux/ioport.h:#define request_muxed_region(start,n,name)       
> __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
> include/linux/ioport.h:#define __request_mem_region(start,n,name, excl) 
> __request_region(&iomem_resource, (start), (n), (name), excl)
> include/linux/ioport.h:#define request_mem_region(start,n,name) 
> __request_region(&iomem_resource, (start), (n), (name), 0)
>

Makes sense, I guess this takes away from the caller having to call
the API again in the case of a conflict (a caller might never succeed
deterministically in the worst case)?

Acked-by: Balbir Singh <bsinghar...@gmail.com>
 

Reply via email to