Consider the following structs. The first struct uses 1 bit wide unsigned
integers
for two flags set by the user during runtime. The second struct performs
the same
function, but used a regular signed integer instead of an unsigned integer
with
a defined bit width.
typedef struct {
char *dev; /* the device we will
monitor */
pcap_t *dev_handle; /* the handle to this
device after opening */
struct pcap_stat ps; /* packet statistics struct
*/
pcap_dumper_t *pd; /* dump file pointer */
int link_int; /* datalink int of device */
int timeout; /* datalink timeout per
packet */
int pcount; /* packets captured */
const char *link_desc; /* datalink description of
device */
const char *link_name; /* datalink name of device
*/
char errbuf[PCAP_ERRBUF_SIZE]; /* pcap err buffer */
bpf_u_int32 net; /* network of sniffing
interface */
bpf_u_int32 mask; /* netmask of sniffing
interface */
char netstr[INET_ADDRSTRLEN]; /* network string */
char maskstr[INET_ADDRSTRLEN]; /* netmask string */
unsigned int use_dumper : 1; /* flag to use dump file */
unsigned int use_filter : 1; /* flag to compile filter */
char dumpfile[FILESIZE]; /* file to dump packets */
struct bpf_program fp; /* compiled filter
expression */
char filter_exp[FILTER_SIZE]; /* filter expression */
} ether_dev_t;
typedef struct {
char *dev; /* the device we will
monitor */
pcap_t *dev_handle; /* the handle to this
device after opening */
struct pcap_stat ps; /* packet statistics struct
*/
pcap_dumper_t *pd; /* dump file pointer */
int link_int; /* datalink int of device */
int timeout; /* datalink timeout per
packet */
int pcount; /* packets captured */
const char *link_desc; /* datalink description of
device */
const char *link_name; /* datalink name of device
*/
char errbuf[PCAP_ERRBUF_SIZE]; /* pcap err buffer */
bpf_u_int32 net; /* network of sniffing
interface */
bpf_u_int32 mask; /* netmask of sniffing
interface */
char netstr[INET_ADDRSTRLEN]; /* network string */
char maskstr[INET_ADDRSTRLEN]; /* netmask string */
int use_dumper; /* flag to use dump file */
int use_filter; /* flag to compile filter */
char dumpfile[FILESIZE]; /* file to dump packets */
struct bpf_program fp; /* compiled filter
expression */
char filter_exp[FILTER_SIZE]; /* filter expression */
} ether_dev_t;
when I run size on the compiled binary and its stripped version there
seems to be a difference of 32 bits in the text portion and overall size.
$ size regular_int regular_int.stripped
text data bss dec hex
9023 744 352 10119 2787 regular_int
9023 744 352 10119 2787 regular_int.stripped
$ size one_bit_u_int one_bit_u_int.stripped
text data bss dec hex
8991 744 352 10087 2767 one_bit_u_int
8991 744 352 10087 2767 one_bit_u_int.stripped
However, when I run ls -l on these files the non-stripped
one bit integer binary is larger, which I did not expect.
$ ls -l one_bit_u_int{,.stripped}
-rwxr-xr-x 1 user user 29014 Jan 10 16:58 one_bit_u_int
-rwxr-xr-x 1 user user 12440 Jan 10 16:59 one_bit_u_int.stripped
$ ls -l regular_int{,.stripped}
-rwxr-xr-x 1 user user 28942 Jan 10 16:57 regular_int
-rwxr-xr-x 1 user user 12472 Jan 10 16:57 regular_int.stripped
Why is the non-stripped, one bit wide integer binary larger than
the non-stripped, regular integer binary?
These were compiled on amd64 -current