OK, the bounds check has been committed.
Based on Michael's suggestions, I'm now playing with the diff below.
It really makes the hastable behave faster.
Still have to check if and why more entries than expected are used in
some cases.
-Otto
Index: table.c
===================================================================
RCS file: /cvs/src/bin/ksh/table.c,v
retrieving revision 1.14
diff -u -p -r1.14 table.c
--- table.c 2 Feb 2012 08:42:46 -0000 1.14
+++ table.c 2 Feb 2012 09:21:58 -0000
@@ -18,8 +18,8 @@ hash(const char *n)
unsigned int h = 0;
while (*n != '\0')
- h = 2*h + *n++;
- return h * 32821; /* scatter bits */
+ h = 33*h + *n++;
+ return h;
}
void
@@ -44,7 +44,7 @@ texpand(struct table *tp, int nsize)
for (i = 0; i < nsize; i++)
ntblp[i] = NULL;
tp->size = nsize;
- tp->nfree = 8*nsize/10; /* table can get 80% full */
+ tp->nfree = 7*nsize/10; /* table can get 70% full */
tp->tbls = ntblp;
if (otblp == NULL)
return;
@@ -108,7 +108,7 @@ ktenter(struct table *tp, const char *n,
}
if (tp->nfree <= 0) { /* too full */
- if (tp->size <= SHRT_MAX/2)
+ if (tp->size <= INT_MAX/2)
texpand(tp, 2*tp->size);
else
internal_errorf(1, "too many vars");
Index: table.h
===================================================================
RCS file: /cvs/src/bin/ksh/table.h,v
retrieving revision 1.7
diff -u -p -r1.7 table.h
--- table.h 11 Dec 2005 20:31:21 -0000 1.7
+++ table.h 2 Feb 2012 09:21:58 -0000
@@ -8,7 +8,7 @@
struct table {
Area *areap; /* area to allocate entries */
- short size, nfree; /* hash size (always 2^^n), free entries */
+ int size, nfree; /* hash size (always 2^^n), free entries */
struct tbl **tbls; /* hashed table items */
};