On (09/10/08 19:19), Andy Whitcroft didst pronounce:
> On Mon, Oct 06, 2008 at 03:57:08PM +0100, Mel Gorman wrote:
> > On (03/10/08 18:37), Andy Whitcroft didst pronounce:
> > > Expose pool allocations for each of the available page sizes via the
> > > --pool-list command.  Exposes the miniumum and maximimum values indicating
> > > the dedicated pages and the size of the overcommit for all supported page
> > > sizes.
> > > 
> > > Signed-off-by: Andy Whitcroft <[EMAIL PROTECTED]>
> > > ---
> > >  hpoolcfg.c              |   35 +++++++++++++++++++
> > >  hugeutils.c             |   86 
> > > +++++++++++++++++++++++++++++++++++++++++++++++
> > >  libhugetlbfs_internal.h |    9 +++++
> > >  3 files changed, 130 insertions(+), 0 deletions(-)
> > > 
> > > diff --git a/hpoolcfg.c b/hpoolcfg.c
> > > index 887b8ed..46e8de4 100644
> > > --- a/hpoolcfg.c
> > > +++ b/hpoolcfg.c
> > > @@ -48,11 +48,40 @@ void print_usage()
> > >   fprintf(stderr, "hugectl [options] target\n");
> > >   fprintf(stderr, "options:\n");
> > >  
> > > + OPTION("--pool-list", "List all pools");
> > > +
> > >   OPTION("--help, -h", "Prints this message");
> > >  }
> > >  
> > >  int opt_dry_run = 0;
> > >  
> > > +/*
> > > + * getopts return values for options which are long only.
> > > + */
> > > +#define LONG_POOL        ('p' << 8)
> > > +#define LONG_POOL_LIST   (LONG_POOL|'l')
> > > +
> > > +#define MAX_POOLS        32
> > > +void pool_list(void)
> > > +{
> > > + struct hpage_pool pools[MAX_POOLS];
> > > + int pos;
> > > + int cnt;
> > > +
> > > + cnt = __lh_hpool_sizes(pools, MAX_POOLS);
> > > + if (cnt < 0) {
> > > +         ERROR("unable to obtain pools list");
> > > +         exit(EXIT_FAILURE);
> > > + }
> > > +
> > > + printf("%10s   %8s %8s %8s\n", "Size", "Minimum", "Current", "Maximum");
> > > + for (pos = 0; cnt--; pos++) {
> > > +         printf("%10ldkB %8ld %8ld %8ld\n", pools[pos].pagesize_kb,
> > > +                 pools[pos].minimum, pools[pos].size,
> > > +                 pools[pos].maximum);
> > > + }
> > > +}
> > > +
> > >  int main(int argc, char** argv)
> > >  {
> > >   char opts[] = "+h";
> > > @@ -60,6 +89,8 @@ int main(int argc, char** argv)
> > >   struct option long_opts[] = {
> > >           {"help",       no_argument, NULL, 'h'},
> > >  
> > > +         {"pool-list", no_argument, NULL, LONG_POOL_LIST},
> > > +
> > >           {0},
> > >   };
> > >  
> > > @@ -79,6 +110,10 @@ int main(int argc, char** argv)
> > >                   print_usage();
> > >                   exit(EXIT_SUCCESS);
> > >  
> > > +         case LONG_POOL_LIST:
> > > +                 pool_list();
> > > +                 break;
> > > +
> > >           default:
> > >                   WARNING("unparsed option %08x\n", ret);
> > >                   ret = -1;
> > > diff --git a/hugeutils.c b/hugeutils.c
> > > index 1ce6494..3eb9af6 100644
> > > --- a/hugeutils.c
> > > +++ b/hugeutils.c
> > > @@ -43,6 +43,7 @@
> > >  #include <sys/syscall.h>
> > >  #include <linux/types.h>
> > >  #include <linux/unistd.h>
> > > +#include <dirent.h>
> > >  
> > >  #include "libhugetlbfs_internal.h"
> > >  #include "hugetlbfs.h"
> > > @@ -446,6 +447,91 @@ void __lh_setup_mounts(void)
> > >           debug_show_page_sizes();
> > >  }
> > >  
> > > +#define MAX_POOLS 32
> > 
> > Where is the constant used in this file?
> 
> Its used just down there, but also it seems to be duplicated there.
> Hmmm.
> 

It's passed in as a parameter there though.

> > > +int __lh_hpool_sizes(struct hpage_pool *pools, int pcnt)
> > 
> > Should this function really be exported in the main library? i.e. do we
> > expect it to be used outside of hpoolcfg?
> > 
> > > +{
> > > + long default_size;
> > > + int which = 0;
> > > + DIR *dir;
> > > + struct dirent *entry;
> > > +
> > > + long nr_static;
> > > + long nr_over;
> > > + long nr_used;
> > > + long nr_resv;
> > > + long nr_surp;
> > > +
> > > + default_size = size_to_smaller_unit(file_read_ulong(MEMINFO,
> > > +                                                 "Hugepagesize:"));
> > > + if (default_size >= 0) {
> > > +         nr_used = get_huge_page_counter(default_size, HUGEPAGES_TOTAL);
> > > +         nr_surp = get_huge_page_counter(default_size, HUGEPAGES_SURP);
> > > +         nr_over = get_huge_page_counter(default_size, HUGEPAGES_OC);
> > > +         nr_resv = get_huge_page_counter(default_size, HUGEPAGES_RSVD);
> > > +
> > > +         /*
> > > +          * XXX: this is racy with respect to ongoing
> > > +          * allocations.  We need to a nr_static_hugepages.
> > > +          */
> > > +         nr_static = nr_used - nr_surp;
> > > +
> > > +         if (which < pcnt && nr_static >= 0) {
> > 
> > Why would we not record the size of the pools in the event the static
> > pool is 0? In the event only dynamic pool resizing is used, would we not
> > skip over pools here?
> 
> We are keeping it if its == 0, only dropping it when its < 0.
> 

Ah.

> > As for the check if which < pcnt, should we not check earlier than pcnt
> > is something resembling a sensible value?
> 
> Well its being consistent in behaviour, only add this one if there is
> space to do so for each update.
> 

Ok.

> > 
> > > +                 DEBUG("default_size<%ld> min<%ld> max<%ld> "
> > > +                         "size<%ld>\n",
> > > +                         default_size, nr_static, nr_static + nr_over,
> > > +                         nr_used + nr_resv);
> > > +                 pools[which].pagesize_kb = default_size;
> > > +                 pools[which].minimum = nr_static;
> > > +                 pools[which].maximum = nr_static + nr_over;
> > > +                 pools[which].size = nr_used + nr_resv;
> > > +                 which++;
> > > +         }
> > > + }
> > > + 
> > > + dir = opendir(SYSFS_HUGEPAGES_DIR);
> > > + if (dir) {
> > > +         while ((entry = readdir(dir))) {
> > 
> > Would it be possible to have some sort of "for_each_pagesize" iterator?
> > This particular code pattern of "check proc and then sysfs" seems to
> > crop up more than once.
> 
> That is possible for sure.  Though in some senses this is trying to make
> a standardised 'list' and allow people to iterate over that.  As we do
> for some of the other implementations of the arguments to this command.
> 

If a specific code pattern is being established that will be useful in
the future, then we don't need the iterator. It may just end up that an
interator that is backwards-compatible will just be harder to read than
this. Lets just drop the iterator idea.

> > > +                 char *name = entry->d_name;
> > > +                 long size;
> > > +
> > > +                 DEBUG("parsing<%s>\n", name);
> > > +                 if (strncmp(name, "hugepages-", 10) != 0)
> > > +                         continue;
> > > +                 name += 10;
> > > +
> > > +                 size = size_to_smaller_unit(atol(name));
> > > +                 if (size < 0 || size == default_size)
> > > +                         continue;
> > > +
> > > +                 nr_used = get_huge_page_counter(size, HUGEPAGES_TOTAL);
> > > +                 nr_surp = get_huge_page_counter(size, HUGEPAGES_SURP);
> > > +                 nr_over = get_huge_page_counter(size, HUGEPAGES_OC);
> > > +                 nr_resv = get_huge_page_counter(size, HUGEPAGES_RSVD);
> > > +
> > > +                 /*
> > > +                  * XXX: this is racy with respect to ongoing
> > > +                  * allocations.  We need to a nr_static_hugepages.
> > > +                  */
> > > +                 nr_static = nr_used - nr_surp;
> > > +
> > > +                 if (which < pcnt && nr_used >= 0) {
> > > +                         DEBUG("size<%ld> min<%ld> max<%ld> "
> > > +                                 "size<%ld>\n",
> > > +                                 size, nr_static, nr_static + nr_over,
> > > +                                 nr_used + nr_resv);
> > > +                         pools[which].pagesize_kb = size;
> > > +                         pools[which].minimum = nr_static;
> > > +                         pools[which].maximum = nr_static + nr_over;
> > > +                         pools[which].size = nr_used + nr_resv;
> > > +                         which++;
> > > +                 }
> > > +         }
> > > +         closedir(dir);
> > > + }
> > > +
> > > + return (which < pcnt) ? which : -1;
> > > +}
> > > +
> > >  /********************************************************************/
> > >  /* Library user visible functions                                   */
> > >  /********************************************************************/
> > > diff --git a/libhugetlbfs_internal.h b/libhugetlbfs_internal.h
> > > index 197fa05..2912c5e 100644
> > > --- a/libhugetlbfs_internal.h
> > > +++ b/libhugetlbfs_internal.h
> > > @@ -89,6 +89,15 @@ struct hpage_size {
> > >   char mount[PATH_MAX+1];
> > >  };
> > >  
> > > +struct hpage_pool {
> > > + unsigned long pagesize_kb;
> > > + unsigned long minimum;
> > > + unsigned long maximum;
> > > + unsigned long size;
> > > +};
> > > +
> > > +extern int __lh_hpool_sizes(struct hpage_pool *, int);
> > > +
> > >  /* Arch-specific callbacks */
> > >  extern int direct_syscall(int sysnum, ...);
> > >  extern ElfW(Word) plt_extrasz(ElfW(Dyn) *dyntab);
> 
> -apw
> 

-- 
Mel Gorman
Part-time Phd Student                          Linux Technology Center
University of Limerick                         IBM Dublin Software Lab

-------------------------------------------------------------------------
This SF.Net email is sponsored by the Moblin Your Move Developer's challenge
Build the coolest Linux based applications with Moblin SDK & win great prizes
Grand prize is a trip for two to an Open Source event anywhere in the world
http://moblin-contest.org/redirect.php?banner_id=100&url=/
_______________________________________________
Libhugetlbfs-devel mailing list
Libhugetlbfs-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/libhugetlbfs-devel

Reply via email to