I sent a similar patch to the list in March:
https://lists.gnu.org/archive/html/tinycc-devel/2015-03/msg00067.html
The patch is a bit shorter this time because I've called the merged
member "i" instead of "ii".
No one objected much then. I think this time I will commit the patch
if no one objects strongly.
diff --git a/arm-gen.c b/arm-gen.c
index a705073..218ace1 100644
--- a/arm-gen.c
+++ b/arm-gen.c
@@ -528,7 +528,7 @@ void load(int r, SValue *sv)
fr = sv->r;
ft = sv->type.t;
- fc = sv->c.ul;
+ fc = sv->c.i;
if(fc>=0)
sign=0;
@@ -543,14 +543,14 @@ void load(int r, SValue *sv)
if(v == VT_LLOCAL) {
v1.type.t = VT_PTR;
v1.r = VT_LOCAL | VT_LVAL;
- v1.c.ul = sv->c.ul;
+ v1.c.i = sv->c.i;
load(base=14 /* lr */, &v1);
fc=sign=0;
v=VT_LOCAL;
} else if(v == VT_CONST) {
v1.type.t = VT_PTR;
v1.r = fr&~VT_LVAL;
- v1.c.ul = sv->c.ul;
+ v1.c.i = sv->c.i;
v1.sym=sv->sym;
load(base=14, &v1);
fc=sign=0;
@@ -609,38 +609,38 @@ void load(int r, SValue *sv)
}
} else {
if (v == VT_CONST) {
- op=stuff_const(0xE3A00000|(intr(r)<<12),sv->c.ul);
+ op=stuff_const(0xE3A00000|(intr(r)<<12),sv->c.i);
if (fr & VT_SYM || !op) {
o(0xE59F0000|(intr(r)<<12));
o(0xEA000000);
if(fr & VT_SYM)
greloc(cur_text_section, sv->sym, ind, R_ARM_ABS32);
- o(sv->c.ul);
+ o(sv->c.i);
} else
o(op);
return;
} else if (v == VT_LOCAL) {
- op=stuff_const(0xE28B0000|(intr(r)<<12),sv->c.ul);
+ op=stuff_const(0xE28B0000|(intr(r)<<12),sv->c.i);
if (fr & VT_SYM || !op) {
o(0xE59F0000|(intr(r)<<12));
o(0xEA000000);
if(fr & VT_SYM) // needed ?
greloc(cur_text_section, sv->sym, ind, R_ARM_ABS32);
- o(sv->c.ul);
+ o(sv->c.i);
o(0xE08B0000|(intr(r)<<12)|intr(r));
} else
o(op);
return;
} else if(v == VT_CMP) {
- o(mapcc(sv->c.ul)|0x3A00001|(intr(r)<<12));
- o(mapcc(negcc(sv->c.ul))|0x3A00000|(intr(r)<<12));
+ o(mapcc(sv->c.i)|0x3A00001|(intr(r)<<12));
+ o(mapcc(negcc(sv->c.i))|0x3A00000|(intr(r)<<12));
return;
} else if (v == VT_JMP || v == VT_JMPI) {
int t;
t = v & 1;
o(0xE3A00000|(intr(r)<<12)|t);
o(0xEA000000);
- gsym(sv->c.ul);
+ gsym(sv->c.i);
o(0xE3A00000|(intr(r)<<12)|(t^1));
return;
} else if (v < VT_CONST) {
@@ -667,7 +667,7 @@ void store(int r, SValue *sv)
fr = sv->r;
ft = sv->type.t;
- fc = sv->c.ul;
+ fc = sv->c.i;
if(fc>=0)
sign=0;
@@ -686,7 +686,7 @@ void store(int r, SValue *sv)
} else if(v == VT_CONST) {
v1.type.t = ft;
v1.r = fr&~VT_LVAL;
- v1.c.ul = sv->c.ul;
+ v1.c.i = sv->c.i;
v1.sym=sv->sym;
load(base=14, &v1);
fc=sign=0;
@@ -751,7 +751,7 @@ static void gcall_or_jmp(int is_jmp)
if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
uint32_t x;
/* constant case */
- x=encbranch(ind,ind+vtop->c.ul,0);
+ x=encbranch(ind,ind+vtop->c.i,0);
if(x) {
if (vtop->r & VT_SYM) {
/* relocation case */
@@ -765,7 +765,7 @@ static void gcall_or_jmp(int is_jmp)
o(0xE51FF004); // ldr pc,[pc,#-4]
if (vtop->r & VT_SYM)
greloc(cur_text_section, vtop->sym, ind, R_ARM_ABS32);
- o(vtop->c.ul);
+ o(vtop->c.i);
}
} else {
/* otherwise, indirect call */
diff --git a/arm64-gen.c b/arm64-gen.c
index ffdf7e0..d43b50d 100644
--- a/arm64-gen.c
+++ b/arm64-gen.c
@@ -463,7 +463,7 @@ ST_FUNC void load(int r, SValue *sv)
int svtt = sv->type.t;
int svr = sv->r & ~VT_LVAL_TYPE;
int svrv = svr & VT_VALMASK;
- uint64_t svcul = (uint32_t)sv->c.ul;
+ uint64_t svcul = (uint32_t)sv->c.i;
svcul = svcul >> 31 & 1 ? svcul - ((uint64_t)1 << 32) : svcul;
if (svr == (VT_LOCAL | VT_LVAL)) {
@@ -502,7 +502,7 @@ ST_FUNC void load(int r, SValue *sv)
if (svr == VT_CONST) {
if ((svtt & VT_BTYPE) != VT_VOID)
arm64_movimm(intr(r), arm64_type_size(svtt) == 3 ?
- sv->c.ull : (uint32_t)svcul);
+ sv->c.i : (uint32_t)svcul);
return;
}
@@ -558,7 +558,7 @@ ST_FUNC void store(int r, SValue *sv)
int svtt = sv->type.t;
int svr = sv->r & ~VT_LVAL_TYPE;
int svrv = svr & VT_VALMASK;
- uint64_t svcul = (uint32_t)sv->c.ul;
+ uint64_t svcul = (uint32_t)sv->c.i;
svcul = svcul >> 31 & 1 ? svcul - ((uint64_t)1 << 32) : svcul;
if (svr == (VT_LOCAL | VT_LVAL)) {
@@ -1344,16 +1344,10 @@ static int arm64_iconst(uint64_t *val, SValue *sv)
if ((sv->r & (VT_VALMASK | VT_LVAL | VT_SYM)) != VT_CONST)
return 0;
if (val) {
- int t = sv->type.t & (VT_BTYPE | VT_UNSIGNED);
- // It's crazy how TCC has all these alternatives for storing a value:
- if (t == (VT_LLONG | VT_UNSIGNED))
- *val = sv->c.ull;
- else if (t == VT_LLONG)
- *val = sv->c.ll;
- else if (t & VT_UNSIGNED)
- *val = sv->c.ui;
- else
- *val = sv->c.i;
+ int t = sv->type.t;
+ *val = ((t & VT_BTYPE) == VT_LLONG ? sv->c.i :
+ (uint32_t)sv->c.i |
+ (t & VT_UNSIGNED ? 0 : -(sv->c.i & 0x80000000)));
}
return 1;
}
diff --git a/c67-gen.c b/c67-gen.c
index 2182518..5ea9666 100644
--- a/c67-gen.c
+++ b/c67-gen.c
@@ -1567,14 +1567,14 @@ void load(int r, SValue * sv)
fr = sv->r;
ft = sv->type.t;
- fc = sv->c.ul;
+ fc = sv->c.i;
v = fr & VT_VALMASK;
if (fr & VT_LVAL) {
if (v == VT_LLOCAL) {
v1.type.t = VT_INT;
v1.r = VT_LOCAL | VT_LVAL;
- v1.c.ul = fc;
+ v1.c.i = fc;
load(r, &v1);
fr = r;
} else if ((ft & VT_BTYPE) == VT_LDOUBLE) {
@@ -1726,7 +1726,7 @@ void store(int r, SValue * v)
int fr, bt, ft, fc, size, t, element;
ft = v->type.t;
- fc = v->c.ul;
+ fc = v->c.i;
fr = v->r & VT_VALMASK;
bt = ft & VT_BTYPE;
/* XXX: incorrect if float reg to reg */
@@ -2106,13 +2106,12 @@ int gtst(int inv, int t)
/* && or || optimization */
if ((v & 1) == inv) {
/* insert vtop->c jump list in t */
- p = &vtop->c.i;
// I guess the idea is to traverse to the
// null at the end of the list and store t
// there
- n = *p;
+ n = vtop->c.i;
while (n != 0) {
p = (int *) (cur_text_section->data + n);
@@ -2304,7 +2303,7 @@ void gen_opf(int op)
gv2(RC_FLOAT, RC_FLOAT); // make sure src2 is on b side
ft = vtop->type.t;
- fc = vtop->c.ul;
+ fc = vtop->c.i;
r = vtop->r;
fr = vtop[-1].r;
diff --git a/i386-asm.c b/i386-asm.c
index 840a971..bd0825b 100644
--- a/i386-asm.c
+++ b/i386-asm.c
@@ -1301,7 +1301,7 @@ ST_FUNC void subst_asm_operand(CString *add_str,
cstr_ccat(add_str, '$');
if (r & VT_SYM) {
cstr_cat(add_str, get_tok_str(sv->sym->v, NULL));
- if (sv->c.i != 0) {
+ if ((uint32_t)sv->c.i != 0) {
cstr_ccat(add_str, '+');
} else {
return;
@@ -1310,10 +1310,10 @@ ST_FUNC void subst_asm_operand(CString *add_str,
val = sv->c.i;
if (modifier == 'n')
val = -val;
- snprintf(buf, sizeof(buf), "%d", sv->c.i);
+ snprintf(buf, sizeof(buf), "%d", (int)sv->c.i);
cstr_cat(add_str, buf);
} else if ((r & VT_VALMASK) == VT_LOCAL) {
- snprintf(buf, sizeof(buf), "%d(%%ebp)", sv->c.i);
+ snprintf(buf, sizeof(buf), "%d(%%ebp)", (int)sv->c.i);
cstr_cat(add_str, buf);
} else if (r & VT_LVAL) {
reg = r & VT_VALMASK;
@@ -1431,7 +1431,7 @@ ST_FUNC void asm_gen_code(ASMOperand *operands, int nb_operands,
if (op->is_llong) {
SValue sv;
sv = *op->vt;
- sv.c.ul += 4;
+ sv.c.i += 4;
load(TREG_XDX, &sv);
}
}
@@ -1457,7 +1457,7 @@ ST_FUNC void asm_gen_code(ASMOperand *operands, int nb_operands,
if (op->is_llong) {
SValue sv;
sv = *op->vt;
- sv.c.ul += 4;
+ sv.c.i += 4;
store(TREG_XDX, &sv);
}
}
diff --git a/i386-gen.c b/i386-gen.c
index 8284cf0..993293e 100644
--- a/i386-gen.c
+++ b/i386-gen.c
@@ -222,14 +222,14 @@ ST_FUNC void load(int r, SValue *sv)
fr = sv->r;
ft = sv->type.t;
- fc = sv->c.ul;
+ fc = sv->c.i;
v = fr & VT_VALMASK;
if (fr & VT_LVAL) {
if (v == VT_LLOCAL) {
v1.type.t = VT_INT;
v1.r = VT_LOCAL | VT_LVAL;
- v1.c.ul = fc;
+ v1.c.i = fc;
fr = r;
if (!(reg_classes[fr] & RC_INT))
fr = get_reg(RC_INT);
@@ -297,7 +297,7 @@ ST_FUNC void store(int r, SValue *v)
#endif
ft = v->type.t;
- fc = v->c.ul;
+ fc = v->c.i;
fr = v->r & VT_VALMASK;
bt = ft & VT_BTYPE;
/* XXX: incorrect if float reg to reg */
@@ -362,7 +362,7 @@ static void gcall_or_jmp(int is_jmp)
put_elf_reloc(symtab_section, cur_text_section,
ind + 1, R_386_PC32, 0);
}
- oad(0xe8 + is_jmp, vtop->c.ul - 4); /* call/jmp im */
+ oad(0xe8 + is_jmp, vtop->c.i - 4); /* call/jmp im */
} else {
/* otherwise, indirect call */
r = gv(RC_INT);
@@ -671,7 +671,7 @@ ST_FUNC void gjmp_addr(int a)
/* generate a test. set 'inv' to invert test. Stack entry is popped */
ST_FUNC int gtst(int inv, int t)
{
- int v, *p;
+ int v, t1, *p;
v = vtop->r & VT_VALMASK;
if (v == VT_CMP) {
@@ -682,11 +682,13 @@ ST_FUNC int gtst(int inv, int t)
/* && or || optimization */
if ((v & 1) == inv) {
/* insert vtop->c jump list in t */
- p = &vtop->c.i;
+ t1 = vtop->c.i;
+ p = &t1;
while (*p != 0)
p = (int *)(cur_text_section->data + *p);
*p = t;
- t = vtop->c.i;
+ vtop->c.i = t1;
+ t = t1;
} else {
t = gjmp(t);
gsym(vtop->c.i);
@@ -923,7 +925,7 @@ ST_FUNC void gen_opf(int op)
break;
}
ft = vtop->type.t;
- fc = vtop->c.ul;
+ fc = vtop->c.i;
if ((ft & VT_BTYPE) == VT_LDOUBLE) {
o(0xde); /* fxxxp %st, %st(1) */
o(0xc1 + (a << 3));
@@ -935,7 +937,7 @@ ST_FUNC void gen_opf(int op)
r = get_reg(RC_INT);
v1.type.t = VT_INT;
v1.r = VT_LOCAL | VT_LVAL;
- v1.c.ul = fc;
+ v1.c.i = fc;
load(r, &v1);
fc = 0;
}
@@ -1051,7 +1053,7 @@ ST_FUNC void gen_bounded_ptr_add(void)
vtop++;
vtop->r = TREG_EAX | VT_BOUNDED;
/* address of bounding function call point */
- vtop->c.ul = (cur_text_section->reloc->data_offset - sizeof(Elf32_Rel));
+ vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(Elf32_Rel));
}
/* patch pointer addition in vtop so that pointer dereferencing is
@@ -1088,7 +1090,7 @@ ST_FUNC void gen_bounded_ptr_deref(void)
/* patch relocation */
/* XXX: find a better solution ? */
- rel = (Elf32_Rel *)(cur_text_section->reloc->data + vtop->c.ul);
+ rel = (Elf32_Rel *)(cur_text_section->reloc->data + vtop->c.i);
sym = external_global_sym(func, &func_old_type, 0);
if (!sym->c)
put_extern_sym(sym, NULL, 0, 0);
diff --git a/tcc.h b/tcc.h
index 5e86df6..ff846fd 100644
--- a/tcc.h
+++ b/tcc.h
@@ -354,13 +354,8 @@ typedef union CValue {
long double ld;
double d;
float f;
- int i;
- unsigned int ui;
- unsigned int ul; /* address (should be unsigned long on 64 bit cpu) */
- long long ll;
- unsigned long long ull;
+ uint64_t i;
struct CString *cstr;
- addr_t ptr_offset;
int tab[LDOUBLE_SIZE/4];
} CValue;
diff --git a/tccgen.c b/tccgen.c
index 9405761..fe2809f 100644
--- a/tccgen.c
+++ b/tccgen.c
@@ -378,7 +378,7 @@ ST_FUNC void vpushi(int v)
static void vpushs(addr_t v)
{
CValue cval;
- cval.ptr_offset = v;
+ cval.i = v;
vsetc(&size_type, VT_CONST, &cval);
}
@@ -389,7 +389,7 @@ ST_FUNC void vpush64(int ty, unsigned long long v)
CType ctype;
ctype.t = ty;
ctype.ref = NULL;
- cval.ull = v;
+ cval.i = v;
vsetc(&ctype, VT_CONST, &cval);
}
@@ -403,7 +403,7 @@ static inline void vpushll(long long v)
static inline void vpushsym(CType *type, Sym *sym)
{
CValue cval;
- cval.ptr_offset = 0;
+ cval.i = 0;
vsetc(type, VT_CONST | VT_SYM, &cval);
vtop->sym = sym;
}
@@ -560,7 +560,7 @@ ST_FUNC void save_reg(int r)
loc = (loc - size) & -align;
sv.type.t = type->t;
sv.r = VT_LOCAL | VT_LVAL;
- sv.c.ul = loc;
+ sv.c.i = loc;
store(r, &sv);
#if defined(TCC_TARGET_I386) || defined(TCC_TARGET_X86_64)
/* x86 specific: need to pop fp register ST0 if saved */
@@ -571,7 +571,7 @@ ST_FUNC void save_reg(int r)
#if !defined(TCC_TARGET_ARM64) && !defined(TCC_TARGET_X86_64)
/* special long long case */
if ((type->t & VT_BTYPE) == VT_LLONG) {
- sv.c.ul += 4;
+ sv.c.i += 4;
store(p->r2, &sv);
}
#endif
@@ -582,13 +582,13 @@ ST_FUNC void save_reg(int r)
if (p->r & VT_LVAL) {
/* also clear the bounded flag because the
relocation address of the function was stored in
- p->c.ul */
+ p->c.i */
p->r = (p->r & ~(VT_VALMASK | VT_BOUNDED)) | VT_LLOCAL;
} else {
p->r = lvalue_type(p->type.t) | VT_LOCAL;
}
p->r2 = VT_CONST;
- p->c.ul = l;
+ p->c.i = l;
}
}
}
@@ -681,7 +681,7 @@ static void move_reg(int r, int s, int t)
sv.type.t = t;
sv.type.ref = NULL;
sv.r = s;
- sv.c.ul = 0;
+ sv.c.i = 0;
load(r, &sv);
}
}
@@ -800,7 +800,7 @@ ST_FUNC int gv(int rc)
sym = get_sym_ref(&vtop->type, data_section, offset, size << 2);
vtop->r |= VT_LVAL | VT_SYM;
vtop->sym = sym;
- vtop->c.ptr_offset = 0;
+ vtop->c.i = 0;
}
#ifdef CONFIG_TCC_BCHECK
if (vtop->r & VT_MUSTBOUND)
@@ -849,8 +849,8 @@ ST_FUNC int gv(int rc)
#if !defined(TCC_TARGET_ARM64) && !defined(TCC_TARGET_X86_64)
if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST) {
/* load constant */
- ll = vtop->c.ull;
- vtop->c.ui = ll; /* first word */
+ ll = vtop->c.i;
+ vtop->c.i = ll; /* first word */
load(r, vtop);
vtop->r = r; /* save register value */
vpushi(ll >> 32); /* second word */
@@ -1011,11 +1011,11 @@ ST_FUNC void lexpand_nr(void)
vtop->type.t = VT_INT | u;
v=vtop[-1].r & (VT_VALMASK | VT_LVAL);
if (v == VT_CONST) {
- vtop[-1].c.ui = vtop->c.ull;
- vtop->c.ui = vtop->c.ull >> 32;
+ vtop[-1].c.i = vtop->c.i;
+ vtop->c.i = vtop->c.i >> 32;
vtop->r = VT_CONST;
} else if (v == (VT_LVAL|VT_CONST) || v == (VT_LVAL|VT_LOCAL)) {
- vtop->c.ui += 4;
+ vtop->c.i += 4;
vtop->r = vtop[-1].r;
} else if (v > VT_CONST) {
vtop--;
@@ -1085,7 +1085,7 @@ ST_FUNC void vpop(void)
#endif
if (v == VT_JMP || v == VT_JMPI) {
/* need to put correct jump if && or || without test */
- gsym(vtop->c.ul);
+ gsym(vtop->c.i);
}
vtop--;
}
@@ -1128,7 +1128,7 @@ static void gv_dup(void)
r = gv(rc);
r1 = get_reg(rc);
sv.r = r;
- sv.c.ul = 0;
+ sv.c.i = 0;
load(r1, &sv); /* move r to r1 */
vdup();
/* duplicates value */
@@ -1408,19 +1408,15 @@ static void gen_opic(int op)
t1 = v1->type.t & VT_BTYPE;
t2 = v2->type.t & VT_BTYPE;
- if (t1 == VT_LLONG)
- l1 = v1->c.ll;
- else if (v1->type.t & VT_UNSIGNED)
- l1 = v1->c.ui;
- else
- l1 = v1->c.i;
+ l1 = v1->c.i;
+ if (t1 != VT_LLONG)
+ l1 = ((uint32_t)l1 |
+ (v1->type.t & VT_UNSIGNED ? 0 : -(l1 & 0x80000000)));
- if (t2 == VT_LLONG)
- l2 = v2->c.ll;
- else if (v2->type.t & VT_UNSIGNED)
- l2 = v2->c.ui;
- else
- l2 = v2->c.i;
+ l2 = v2->c.i;
+ if (t2 != VT_LLONG)
+ l2 = ((uint32_t)l2 |
+ (v2->type.t & VT_UNSIGNED ? 0 : -(l2 & 0x80000000)));
/* currently, we cannot do computations with forward symbols */
c1 = (v1->r & (VT_VALMASK | VT_LVAL | VT_SYM)) == VT_CONST;
@@ -1472,7 +1468,7 @@ static void gen_opic(int op)
default:
goto general_case;
}
- v1->c.ll = l1;
+ v1->c.i = l1;
vtop--;
} else {
/* if commutative ops, put c2 as constant */
@@ -1495,7 +1491,7 @@ static void gen_opic(int op)
(l2 == 1 && (op == '%' || op == TOK_UMOD)))) {
/* treat (x & 0), (x * 0), (x | -1) and (x % 1) as constant */
if (l2 == 1)
- vtop->c.ll = 0;
+ vtop->c.i = 0;
vswap();
vtop--;
} else if (c2 && (((op == '*' || op == '/' || op == TOK_UDIV ||
@@ -1516,7 +1512,7 @@ static void gen_opic(int op)
l2 >>= 1;
n++;
}
- vtop->c.ll = n;
+ vtop->c.i = n;
if (op == '*')
op = TOK_SHL;
else if (op == TOK_PDIV)
@@ -1532,7 +1528,7 @@ static void gen_opic(int op)
if (op == '-')
l2 = -l2;
vtop--;
- vtop->c.ll += l2;
+ vtop->c.i += l2;
} else {
general_case:
if (!nocode_wanted) {
@@ -1629,9 +1625,10 @@ static inline int is_null_pointer(SValue *p)
{
if ((p->r & (VT_VALMASK | VT_LVAL | VT_SYM)) != VT_CONST)
return 0;
- return ((p->type.t & VT_BTYPE) == VT_INT && p->c.i == 0) ||
- ((p->type.t & VT_BTYPE) == VT_LLONG && p->c.ll == 0) ||
- ((p->type.t & VT_BTYPE) == VT_PTR && p->c.ptr_offset == 0);
+ return ((p->type.t & VT_BTYPE) == VT_INT && (uint32_t)p->c.i == 0) ||
+ ((p->type.t & VT_BTYPE) == VT_LLONG && p->c.i == 0) ||
+ ((p->type.t & VT_BTYPE) == VT_PTR &&
+ (PTR_SIZE == 4 ? (uint32_t)p->c.i == 0 : p->c.i == 0));
}
static inline int is_integer_btype(int bt)
@@ -1994,15 +1991,15 @@ static void gen_cast(CType *type)
if (df) {
if ((sbt & VT_BTYPE) == VT_LLONG) {
- if (sbt & VT_UNSIGNED)
- vtop->c.ld = vtop->c.ull;
+ if ((sbt & VT_UNSIGNED) || !(vtop->c.i >> 63))
+ vtop->c.ld = vtop->c.i;
else
- vtop->c.ld = vtop->c.ll;
+ vtop->c.ld = -(long double)-vtop->c.i;
} else if(!sf) {
- if (sbt & VT_UNSIGNED)
- vtop->c.ld = vtop->c.ui;
+ if ((sbt & VT_UNSIGNED) || !(vtop->c.i >> 31))
+ vtop->c.ld = (uint32_t)vtop->c.i;
else
- vtop->c.ld = vtop->c.i;
+ vtop->c.ld = -(long double)-(uint32_t)vtop->c.i;
}
if (dbt == VT_FLOAT)
@@ -2010,41 +2007,39 @@ static void gen_cast(CType *type)
else if (dbt == VT_DOUBLE)
vtop->c.d = (double)vtop->c.ld;
} else if (sf && dbt == (VT_LLONG|VT_UNSIGNED)) {
- vtop->c.ull = (unsigned long long)vtop->c.ld;
+ vtop->c.i = vtop->c.ld;
} else if (sf && dbt == VT_BOOL) {
vtop->c.i = (vtop->c.ld != 0);
} else {
if(sf)
- vtop->c.ll = (long long)vtop->c.ld;
+ vtop->c.i = vtop->c.ld;
else if (sbt == (VT_LLONG|VT_UNSIGNED))
- vtop->c.ll = vtop->c.ull;
+ ;
else if (sbt & VT_UNSIGNED)
- vtop->c.ll = vtop->c.ui;
+ vtop->c.i = (uint32_t)vtop->c.i;
#if defined(TCC_TARGET_ARM64) || defined(TCC_TARGET_X86_64)
else if (sbt == VT_PTR)
;
#endif
else if (sbt != VT_LLONG)
- vtop->c.ll = vtop->c.i;
+ vtop->c.i = ((uint32_t)vtop->c.i |
+ -(vtop->c.i & 0x80000000));
if (dbt == (VT_LLONG|VT_UNSIGNED))
- vtop->c.ull = vtop->c.ll;
+ ;
else if (dbt == VT_BOOL)
- vtop->c.i = (vtop->c.ll != 0);
+ vtop->c.i = (vtop->c.i != 0);
#if defined(TCC_TARGET_ARM64) || defined(TCC_TARGET_X86_64)
else if (dbt == VT_PTR)
;
#endif
else if (dbt != VT_LLONG) {
- int s = 0;
- if ((dbt & VT_BTYPE) == VT_BYTE)
- s = 24;
- else if ((dbt & VT_BTYPE) == VT_SHORT)
- s = 16;
- if(dbt & VT_UNSIGNED)
- vtop->c.ui = ((unsigned int)vtop->c.ll << s) >> s;
- else
- vtop->c.i = ((int)vtop->c.ll << s) >> s;
+ uint32_t m = ((dbt & VT_BTYPE) == VT_BYTE ? 0xff :
+ (dbt & VT_BTYPE) == VT_SHORT ? 0xffff :
+ 0xffffffff);
+ vtop->c.i &= m;
+ if (!(dbt & VT_UNSIGNED))
+ vtop->c.i |= -(vtop->c.i & ((m >> 1) + 1));
}
}
} else if (p && dbt == VT_BOOL) {
@@ -2669,7 +2664,7 @@ ST_FUNC void vstore(void)
sv.type.t = VT_INT;
#endif
sv.r = VT_LOCAL | VT_LVAL;
- sv.c.ul = vtop[-1].c.ul;
+ sv.c.i = vtop[-1].c.i;
load(t, &sv);
vtop[-1].r = t | VT_LVAL;
}
@@ -3854,7 +3849,7 @@ ST_FUNC void unary(void)
gen_cast(&boolean);
vtop->c.i = !vtop->c.i;
} else if ((vtop->r & VT_VALMASK) == VT_CMP)
- vtop->c.i = vtop->c.i ^ 1;
+ vtop->c.i ^= 1;
else if (!nocode_wanted) {
save_regs(1);
vseti(VT_JMP, gvtst(1, 0));
@@ -3940,13 +3935,13 @@ ST_FUNC void unary(void)
CType type;
next();
skip('(');
- if (tok != TOK_CINT || tokc.i < 0) {
+ if (tok != TOK_CINT) {
tcc_error("%s only takes positive integers",
tok1 == TOK_builtin_return_address ?
"__builtin_return_address" :
"__builtin_frame_address");
}
- level = tokc.i;
+ level = (uint32_t)tokc.i;
next();
skip(')');
type.t = VT_VOID;
@@ -4144,7 +4139,7 @@ ST_FUNC void unary(void)
/* if forward reference, we must point to s */
if (vtop->r & VT_SYM) {
vtop->sym = s;
- vtop->c.ptr_offset = 0;
+ vtop->c.i = 0;
}
break;
}
@@ -4913,7 +4908,8 @@ static void block(int *bsym, int *csym, int *case_sym, int *def_sym,
/* returning structure packed into registers */
int r, size, addr, align;
size = type_size(&func_vt,&align);
- if ((vtop->r != (VT_LOCAL | VT_LVAL) || (vtop->c.i & (ret_align-1)))
+ if ((vtop->r != (VT_LOCAL | VT_LVAL) ||
+ (vtop->c.i & (ret_align-1)))
&& (align & (ret_align-1))) {
loc = (loc - size) & -ret_align;
addr = loc;
@@ -5368,10 +5364,10 @@ static void init_putv(CType *type, Section *sec, unsigned long c,
*(long double *)ptr = vtop->c.ld;
break;
case VT_LLONG:
- *(long long *)ptr |= (vtop->c.ll & bit_mask) << bit_pos;
+ *(long long *)ptr |= (vtop->c.i & bit_mask) << bit_pos;
break;
case VT_PTR: {
- addr_t val = (vtop->c.ptr_offset & bit_mask) << bit_pos;
+ addr_t val = (vtop->c.i & bit_mask) << bit_pos;
#if defined(TCC_TARGET_ARM64) || defined(TCC_TARGET_X86_64)
if (vtop->r & VT_SYM)
greloca(sec, vtop->sym, c, R_DATA_PTR, val);
diff --git a/tccpe.c b/tccpe.c
index 0fd8c36..eb13d2d 100644
--- a/tccpe.c
+++ b/tccpe.c
@@ -1473,9 +1473,9 @@ ST_FUNC SValue *pe_getimport(SValue *sv, SValue *v2)
load(r2, v2);
v2->r = r2;
- if (sv->c.ui) {
+ if ((uint32_t)sv->c.i) {
vpushv(v2);
- vpushi(sv->c.ui);
+ vpushi(sv->c.i);
gen_opi('+');
*v2 = *vtop--;
}
diff --git a/tccpp.c b/tccpp.c
index 77fedb4..8b78deb 100644
--- a/tccpp.c
+++ b/tccpp.c
@@ -293,15 +293,15 @@ ST_FUNC const char *get_tok_str(int v, CValue *cv)
case TOK_CINT:
case TOK_CUINT:
/* XXX: not quite exact, but only useful for testing */
- sprintf(p, "%u", cv->ui);
+ sprintf(p, "%llu", (unsigned long long)cv->i);
break;
case TOK_CLLONG:
case TOK_CULLONG:
/* XXX: not quite exact, but only useful for testing */
#ifdef _WIN32
- sprintf(p, "%u", (unsigned)cv->ull);
+ sprintf(p, "%u", (unsigned)cv->i);
#else
- sprintf(p, "%llu", cv->ull);
+ sprintf(p, "%llu", (unsigned long long)cv->i);
#endif
break;
case TOK_LCHAR:
@@ -2287,9 +2287,9 @@ static void parse_number(const char *p)
}
if (tok == TOK_CINT || tok == TOK_CUINT)
- tokc.ui = n;
+ tokc.i = n;
else
- tokc.ull = n;
+ tokc.i = n;
}
if (ch)
tcc_error("invalid number\n");
diff --git a/x86_64-gen.c b/x86_64-gen.c
index 2d69b75..5394bf5 100644
--- a/x86_64-gen.c
+++ b/x86_64-gen.c
@@ -368,7 +368,7 @@ void load(int r, SValue *sv)
fr = sv->r;
ft = sv->type.t & ~VT_DEFSIGN;
- fc = sv->c.ul;
+ fc = sv->c.i;
#ifndef TCC_TARGET_PE
/* we use indirect access via got */
@@ -393,7 +393,7 @@ void load(int r, SValue *sv)
if (v == VT_LLOCAL) {
v1.type.t = VT_PTR;
v1.r = VT_LOCAL | VT_LVAL;
- v1.c.ul = fc;
+ v1.c.i = fc;
fr = r;
if (!(reg_classes[fr] & (RC_INT|RC_R11)))
fr = get_reg(RC_INT);
@@ -449,7 +449,7 @@ void load(int r, SValue *sv)
#endif
} else if (is64_type(ft)) {
orex(1,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
- gen_le64(sv->c.ull);
+ gen_le64(sv->c.i);
} else {
orex(0,r,0, 0xb8 + REG_VALUE(r)); /* mov $xx, r */
gen_le32(fc);
@@ -531,7 +531,7 @@ void store(int r, SValue *v)
#endif
ft = v->type.t;
- fc = v->c.ul;
+ fc = v->c.i;
fr = v->r & VT_VALMASK;
bt = ft & VT_BTYPE;
@@ -540,7 +540,7 @@ void store(int r, SValue *v)
if (fr == VT_CONST && (v->r & VT_SYM)) {
/* mov xx(%rip), %r11 */
o(0x1d8b4c);
- gen_gotpcrel(TREG_R11, v->sym, v->c.ul);
+ gen_gotpcrel(TREG_R11, v->sym, v->c.i);
pic = is64_type(bt) ? 0x49 : 0x41;
}
#endif
@@ -601,7 +601,7 @@ static void gcall_or_jmp(int is_jmp)
{
int r;
if ((vtop->r & (VT_VALMASK | VT_LVAL)) == VT_CONST &&
- ((vtop->r & VT_SYM) || (vtop->c.ll-4) == (int)(vtop->c.ll-4))) {
+ ((vtop->r & VT_SYM) || (vtop->c.i-4) == (int)(vtop->c.i-4))) {
/* constant case */
if (vtop->r & VT_SYM) {
/* relocation case */
@@ -615,7 +615,7 @@ static void gcall_or_jmp(int is_jmp)
put_elf_reloc(symtab_section, cur_text_section,
ind + 1, R_X86_64_PC32, 0);
}
- oad(0xe8 + is_jmp, vtop->c.ul - 4); /* call/jmp im */
+ oad(0xe8 + is_jmp, vtop->c.i - 4); /* call/jmp im */
} else {
/* otherwise, indirect call */
r = TREG_R11;
@@ -663,7 +663,7 @@ ST_FUNC void gen_bounded_ptr_add(void)
/* relocation offset of the bounding function call point */
- vtop->c.ull = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
+ vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela)));
}
/* patch pointer addition in vtop so that pointer dereferencing is
@@ -705,7 +705,7 @@ ST_FUNC void gen_bounded_ptr_deref(void)
/* patch relocation */
/* XXX: find a better solution ? */
- rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.ull);
+ rel = (ElfW(Rela) *)(cur_text_section->reloc->data + vtop->c.i);
rel->r_info = ELF64_R_INFO(sym->c, ELF64_R_TYPE(rel->r_info));
}
#endif
@@ -1691,7 +1691,7 @@ void gjmp_addr(int a)
/* generate a test. set 'inv' to invert test. Stack entry is popped */
int gtst(int inv, int t)
{
- int v, *p;
+ int v, t1, *p;
v = vtop->r & VT_VALMASK;
if (v == VT_CMP) {
@@ -1720,11 +1720,13 @@ int gtst(int inv, int t)
/* && or || optimization */
if ((v & 1) == inv) {
/* insert vtop->c jump list in t */
- p = &vtop->c.i;
+ t1 = vtop->c.i;
+ p = &t1;
while (*p != 0)
p = (int *)(cur_text_section->data + *p);
*p = t;
- t = vtop->c.i;
+ vtop->c.i = t1;
+ t = t1;
} else {
t = gjmp(t);
gsym(vtop->c.i);
@@ -1749,7 +1751,7 @@ void gen_opi(int op)
case TOK_ADDC1: /* add with carry generation */
opc = 0;
gen_op8:
- if (cc && (!ll || (int)vtop->c.ll == vtop->c.ll)) {
+ if (cc && (!ll || (int)vtop->c.i == vtop->c.i)) {
/* constant case */
vswap();
r = gv(RC_INT);
@@ -1957,7 +1959,7 @@ void gen_opf(int op)
break;
}
ft = vtop->type.t;
- fc = vtop->c.ul;
+ fc = vtop->c.i;
o(0xde); /* fxxxp %st, %st(1) */
o(0xc1 + (a << 3));
vtop--;
@@ -1966,13 +1968,13 @@ void gen_opf(int op)
if (op >= TOK_ULT && op <= TOK_GT) {
/* if saved lvalue, then we must reload it */
r = vtop->r;
- fc = vtop->c.ul;
+ fc = vtop->c.i;
if ((r & VT_VALMASK) == VT_LLOCAL) {
SValue v1;
r = get_reg(RC_INT);
v1.type.t = VT_PTR;
v1.r = VT_LOCAL | VT_LVAL;
- v1.c.ul = fc;
+ v1.c.i = fc;
load(r, &v1);
fc = 0;
}
@@ -2029,7 +2031,7 @@ void gen_opf(int op)
break;
}
ft = vtop->type.t;
- fc = vtop->c.ul;
+ fc = vtop->c.i;
assert((ft & VT_BTYPE) != VT_LDOUBLE);
r = vtop->r;
@@ -2039,7 +2041,7 @@ void gen_opf(int op)
r = get_reg(RC_INT);
v1.type.t = VT_PTR;
v1.r = VT_LOCAL | VT_LVAL;
- v1.c.ul = fc;
+ v1.c.i = fc;
load(r, &v1);
fc = 0;
}
_______________________________________________
Tinycc-devel mailing list
[email protected]
https://lists.nongnu.org/mailman/listinfo/tinycc-devel