in the function /sys/src/cmd/cc/sub.c:/^arith we emit code
that cast the 64-bit subtraction result to 32-bit LONG *before*
doing the division of the sizeof of the pointer type.
so when the pointers are more than 4gb apart, we will calculate
the wrong result, even tho the result would have fit into a
32-bit LONG *after* the division.
questions:
1) why do we cast to long?
2) if a pointer subtraction has to yield a long, why dont we cast *after* the
division?
if(n->op == OSUB)
if(i == TIND && j == TIND) {
w = n->right->type->link->width;
if(w < 1 || n->left->type->link == T ||
n->left->type->link->width < 1)
goto bad;
n->type = types[ewidth[TIND] <= ewidth[TLONG]? TLONG: TVLONG];
if(1 && ewidth[TIND] > ewidth[TLONG]){ <------- here
n1 = new1(OXXX, Z, Z);
*n1 = *n;
n->op = OCAST;
n->left = n1;
n->right = Z;
n->type = types[TLONG];
}
if(w > 1) {
n1 = new1(OXXX, Z, Z);
*n1 = *n;
n->op = ODIV;
n->left = n1;
n1 = new1(OCONST, Z, Z);
n1->vconst = w;
n1->type = n->type;
n->right = n1;
w = vlog(n1);
if(w >= 0) {
n->op = OASHR;
n1->vconst = w;
}
}
return;
}
--
cinap