Given that more and more systems are moving towards multi-cpu/core
architectures it would be nice if busybox Top could report SMP centric
information.

I have a patch against Top of the tree (Rev 23420) to do just this.

===========================
The summary of changes is:
===========================

1. Process List output has an additional column "cpu" which reports last CPU
the process was found running on (when /proc sample was taken). This can
come in handy to observe
the effects of calls such as sched_setaffinity( ) which can pin a task to a
set of CPU(s). New Config option ENABLE_FEATURE_TOP_SMP_PROCESS

2. CPU Utilization reporting per CPU. There is a toggle "1" to display the
regular output which is aggregate of all CPUs) vs a  per CPU line. New
Config option ENABLE_FEATURE_TOP_SMP_CPU

Please consider applying the attached patch.

I've tested it on a 4 CPU machine and also a single CPU machine.

===========================
Code Size Changes
===========================
See attached bloat-o-meter results.txt
In short, with both features in, 0.5K

===========================
TODO
===========================
Support for massively parallel machines such that number of CPUs exceeds
number of rows on terminal


Thanks,
Vineet
Index: libbb/procps.c
===================================================================
--- libbb/procps.c	(revision 23420)
+++ libbb/procps.c	(working copy)
@@ -217,9 +217,12 @@
 			char *cp, *comm1;
 			int tty;
 #if !ENABLE_FEATURE_FAST_TOP
+            int num_fields = 11;
 			unsigned long vsz, rss;
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+            num_fields += 15;
 #endif
-
+#endif
 			/* see proc(5) for some details on this */
 			strcpy(filename_tail, "/stat");
 			n = read_to_buf(filename, buf);
@@ -247,9 +250,12 @@
 				"%lu "                 /* start_time */
 				"%lu "                 /* vsize */
 				"%lu "                 /* rss */
-			/*	"%lu %lu %lu %lu %lu %lu " rss_rlim, start_code, end_code, start_stack, kstk_esp, kstk_eip */
-			/*	"%u %u %u %u "         signal, blocked, sigignore, sigcatch */
-			/*	"%lu %lu %lu"          wchan, nswap, cnswap */
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+				"%*s %*s %*s %*s %*s %*s " /*rss_rlim, start_code, end_code, start_stack, kstk_esp, kstk_eip */
+				"%*s %*s %*s %*s "         /*signal, blocked, sigignore, sigcatch */
+				"%*s %*s %*s %*s "         /*wchan, nswap, cnswap, exit_signal */
+                "%d"                      /*cpu last seen on*/
+#endif
 				,
 				sp->state, &sp->ppid,
 				&sp->pgid, &sp->sid, &tty,
@@ -257,9 +263,15 @@
 				&tasknice,
 				&sp->start_time,
 				&vsz,
-				&rss);
-			if (n != 11)
+				&rss
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+                ,&sp->last_seen_on_cpu
+#endif
+                );
+
+			if (n != num_fields)
 				break;
+
 			/* vsz is in bytes and we want kb */
 			sp->vsz = vsz >> 10;
 			/* vsz is in bytes but rss is in *PAGES*! Can you believe that? */
@@ -288,8 +300,16 @@
 			sp->vsz = fast_strtoul_10(&cp) >> 10;
 			/* vsz is in bytes but rss is in *PAGES*! Can you believe that? */
 			sp->rss = fast_strtoul_10(&cp) << sp->shift_pages_to_kb;
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+				/* (6): rss_rlim, start_code, end_code, start_stack, kstk_esp, kstk_eip */
+				/* (4): signal, blocked, sigignore, sigcatch */
+				/* (4): wchan, nswap, cnswap, exit_signal */
+			cp = skip_fields(cp, 14);
+            sp->last_seen_on_cpu = fast_strtoul_10(&cp);
 #endif
 
+#endif
+
 			if (sp->vsz == 0 && sp->state[0] != 'Z')
 				sp->state[1] = 'W';
 			else
Index: include/libbb.h
===================================================================
--- include/libbb.h	(revision 23420)
+++ include/libbb.h	(working copy)
@@ -1225,6 +1225,9 @@
 	 * by link target or interpreter name) */
 	char comm[COMM_LEN];
 	/* user/group? - use passwd/group parsing functions */
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+    int last_seen_on_cpu;
+#endif
 } procps_status_t;
 enum {
 	PSSCAN_PID      = 1 << 0,
@@ -1246,12 +1249,17 @@
 	PSSCAN_ARGVN    = (1 << 16) * (ENABLE_PGREP || ENABLE_PKILL || ENABLE_PIDOF),
 	USE_SELINUX(PSSCAN_CONTEXT = 1 << 17,)
 	PSSCAN_START_TIME = 1 << 18,
+	PSSCAN_CPU      = 1 << 19,
 	/* These are all retrieved from proc/NN/stat in one go: */
 	PSSCAN_STAT     = PSSCAN_PPID | PSSCAN_PGID | PSSCAN_SID
 	                | PSSCAN_COMM | PSSCAN_STATE
 	                | PSSCAN_VSZ | PSSCAN_RSS
-			| PSSCAN_STIME | PSSCAN_UTIME | PSSCAN_START_TIME
-			| PSSCAN_TTY,
+					| PSSCAN_STIME | PSSCAN_UTIME | PSSCAN_START_TIME
+			        | PSSCAN_TTY
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+                    | PSSCAN_CPU
+#endif
+,
 };
 //procps_status_t* alloc_procps_scan(void) FAST_FUNC;
 void free_procps_scan(procps_status_t* sp) FAST_FUNC;
Index: procps/top.c
===================================================================
--- procps/top.c	(revision 23420)
+++ procps/top.c	(working copy)
@@ -16,6 +16,12 @@
  * (C) Eero Tamminen <oak at welho dot com>
  *
  * Rewritten by Vladimir Oleynik (C) 2002 <[EMAIL PROTECTED]>
+ *
+ * Sept 2008: Vineet Gupta <[EMAIL PROTECTED]> 
+ * Added Support for reporting SMP Information
+ *  -CPU where Process was last seen running (to see effect of 
+ *          calls such as sched_setaffinity() )
+ *  -CPU Time Split (idle/IO/wait etc) PER CPU
  */
 
 /* Original code Copyrights */
@@ -41,6 +47,9 @@
 	unsigned uid;
 	char state[4];
 	char comm[COMM_LEN];
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+    int last_seen_on_cpu;
+#endif
 } top_status_t;
 
 typedef struct jiffy_counts_t {
@@ -83,6 +92,14 @@
 	unsigned total_pcpu;
 	/* unsigned long total_vsz; */
 #endif
+#if ENABLE_FEATURE_TOP_SMP_CPU
+    /* Per CPU Samples: Current and Last */
+    jiffy_counts_t *cpu_jif, *cpu_prev_jif;
+    /* User option to display per-cpu line or aggregate line */
+    int aggr_cpu_line;
+    /* Num CPUs in System */
+    int num_cpus;
+#endif
 	char line_buf[80];
 };
 
@@ -104,6 +121,12 @@
 #define prev_hist_count  (G.prev_hist_count   )
 #define jif              (G.jif               )
 #define prev_jif         (G.prev_jif          )
+#if ENABLE_FEATURE_TOP_SMP_CPU
+#define cpu_jif          (G.cpu_jif           )
+#define cpu_prev_jif     (G.cpu_prev_jif      )
+#define num_cpus         (G.num_cpus          )
+#define aggr_cpu_line    (G.aggr_cpu_line     )
+#endif
 #define total_pcpu       (G.total_pcpu        )
 #define line_buf         (G.line_buf          )
 
@@ -161,24 +184,91 @@
 	return 0;
 }
 
+/* NOINLINE so that complier doesnt unfold the call 
+ *   causing multipel copies of the summation lines
+ */
+static NOINLINE int read_cpu_jiffy(FILE *fp, const char *fmt, 
+                                    jiffy_counts_t *p_jif)
+{
+    int ret;
 
+    ret = fscanf(fp, fmt, 
+            &(p_jif->usr),&(p_jif->nic),&(p_jif->sys),&(p_jif->idle),
+            &(p_jif->iowait),&(p_jif->irq),&(p_jif->softirq),&(p_jif->steal));
+
+    if (ret > 4) {
+        p_jif->total = p_jif->usr + p_jif->nic + p_jif->sys + p_jif->idle
+            + p_jif->iowait + p_jif->irq + p_jif->softirq + p_jif->steal;
+
+        /* procps 2.x does not count iowait as busy time */
+        p_jif->busy = p_jif->total - p_jif->idle - p_jif->iowait;
+    }
+
+    return ret;
+}
+
 static void get_jiffy_counts(void)
 {
 	FILE* fp = xfopen_for_read("stat");
+    const char *fmt_aggr ="cpu %lld %lld %lld %lld %lld %lld %lld %lld\n";
+#if ENABLE_FEATURE_TOP_SMP_CPU
+    const char *fmt_pcpu = "cpu%*s %lld %lld %lld %lld %lld %lld %lld %lld\n";
+#endif
+
+    /* Read the aggregate CPU(s) line first */
 	prev_jif = jif;
-	if (fscanf(fp, "cpu  %lld %lld %lld %lld %lld %lld %lld %lld",
-			&jif.usr,&jif.nic,&jif.sys,&jif.idle,
-			&jif.iowait,&jif.irq,&jif.softirq,&jif.steal) < 4) {
+    if (read_cpu_jiffy(fp, fmt_aggr, &jif) < 4)
 		bb_error_msg_and_die("can't read /proc/stat");
-	}
-	fclose(fp);
-	jif.total = jif.usr + jif.nic + jif.sys + jif.idle
-			+ jif.iowait + jif.irq + jif.softirq + jif.steal;
-	/* procps 2.x does not count iowait as busy time */
-	jif.busy = jif.total - jif.idle - jif.iowait;
+
+#if ENABLE_FEATURE_TOP_SMP_CPU
+    if ( aggr_cpu_line){    /* user wants to see cummulative cpu info only */
+		fclose(fp);
+        return;
 }
 
+    /* In case of PerCPu line display, first time, to know how many CPUs */
+    if ( !num_cpus ) {
 
+        jiffy_counts_t tmp;
+
+        /* allocate mem for CPU samples 
+         * There will be at least 1 /proc/stat line with cpu%d
+         */
+
+        while (read_cpu_jiffy(fp, fmt_pcpu, &tmp) > 4) {
+            /* we allocate space for 4 CPUs at a time */
+            cpu_jif = xrealloc_vector(cpu_jif, 2, num_cpus);
+            cpu_jif[num_cpus++] = tmp;
+        }
+
+        cpu_prev_jif = xzalloc(sizeof(jiffy_counts_t)*num_cpus);
+        
+        /* to keep a reasonable time between 1st and 2nd samples
+         * Otherise the first per cpu display shows all 100% idles
+         */
+        usleep(500000);
+    }
+    else /* Non first time invocation */
+    {
+
+        jiffy_counts_t *tmp;
+        int i;
+        
+        /* First switch the samples pointers: no need to copy */
+        tmp = cpu_prev_jif;
+        cpu_prev_jif = cpu_jif;
+        cpu_jif = tmp;
+
+        /* Get the new sample */
+        for (i=0; i< num_cpus; i++) {
+            read_cpu_jiffy(fp, fmt_pcpu, &cpu_jif[i]);
+        }
+    }
+#endif
+
+    fclose(fp);
+}
+
 static void do_stats(void)
 {
 	top_status_t *cur;
@@ -257,15 +347,101 @@
 }
 #endif
 
-static unsigned long display_header(int scr_width)
+static int display_cpus(int scr_width, char *scrbuf)
 {
+    /*
+     * xxx% = (jif.xxx - prev_jif.xxx) / (jif.total - prev_jif.total) * 100%
+     */
+#if ENABLE_FEATURE_TOP_CPU_GLOBAL_PERCENTS
+    unsigned total_diff;
+    jiffy_counts_t *p_jif, *p_prev_jif;
+    int i=0;
+
+#if ENABLE_FEATURE_TOP_SMP_CPU
+    int n_cpu_lines = 1;
+    char cpu_str[4]="(s)";
+#else
+    char cpu_str[4]="   ";
+#endif
+
+    /* using (unsigned) casts to make operations cheaper */
+#define  CALC_TOT_DIFF  ((unsigned)(p_jif->total - p_prev_jif->total) ? : 1)
+
+
+#if ENABLE_FEATURE_TOP_DECIMALS
+#define CALC_STAT(xxx) char xxx[8]
+#define SHOW_STAT(xxx) fmt_100percent_8(xxx, (unsigned)(p_jif->xxx - p_prev_jif->xxx), total_diff)
+#define FMT "%s"
+#else
+#define CALC_STAT(xxx) unsigned xxx = 100 * (unsigned)(p_jif->xxx - p_prev_jif->xxx) / total_diff
+#define SHOW_STAT(xxx) xxx
+#define FMT "%4u%% "
+#endif
+
+#if ENABLE_FEATURE_TOP_SMP_CPU
+
+    /* Loop thru CPU(s), enter with loop end (n_cpu_lines) = 1, 
+     * in case User wants only aggregate samples
+     */
+    for (i = 0; i < n_cpu_lines ; i++)
+    {
+        /* set the real loop end */
+        if (!aggr_cpu_line) { 
+            n_cpu_lines = num_cpus;
+            p_jif=&cpu_jif[i];
+            p_prev_jif=&cpu_prev_jif[i];
+            snprintf(cpu_str,4,"%3d",i);
+        }
+        else 
+#endif
+        {
+            p_jif=&jif;
+            p_prev_jif=&prev_jif;
+        }
+
+        total_diff = CALC_TOT_DIFF;
+
+        {
+            /* need block: CALC_STAT are declarations */
+            CALC_STAT(usr);
+            CALC_STAT(sys);
+            CALC_STAT(nic);
+            CALC_STAT(idle);
+            CALC_STAT(iowait);
+            CALC_STAT(irq);
+            CALC_STAT(softirq);
+            //CALC_STAT(steal);
+
+            snprintf(scrbuf, scr_width,
+                /* Barely fits in 79 chars when in "decimals" mode. */
+                "CPU%s:"FMT"usr"FMT"sys"FMT"nic"FMT"idle"FMT"io"FMT"irq"FMT"sirq",
+                 cpu_str,
+                 SHOW_STAT(usr), SHOW_STAT(sys), SHOW_STAT(nic), SHOW_STAT(idle), 
+                 SHOW_STAT(iowait), SHOW_STAT(irq), SHOW_STAT(softirq)
+                 //, SHOW_STAT(steal) - what is this 'steal' thing?
+                 // I doubt anyone wants to know it
+            );
+            puts(scrbuf);
+        }
+#if ENABLE_FEATURE_TOP_SMP_CPU
+    }
+#endif
+#undef SHOW_STAT
+#undef CALC_STAT
+#undef FMT
+    return i;
+#else       // !ENABLE_FEATURE_TOP_CPU_GLOBAL_PERCENTS
+    return 0;
+#endif
+
+}
+
+static unsigned long display_header(int scr_width, int *count)
+{
 	FILE *fp;
 	char buf[80];
 	char scrbuf[80];
 	unsigned long total, used, mfree, shared, buffers, cached;
-#if ENABLE_FEATURE_TOP_CPU_GLOBAL_PERCENTS
-	unsigned total_diff;
-#endif
 
 	/* read memory info */
 	fp = xfopen_for_read("meminfo");
@@ -324,47 +500,11 @@
 	/* clear screen & go to top */
 	printf(OPT_BATCH_MODE ? "%s\n" : "\e[H\e[J%s\n", scrbuf);
 
-#if ENABLE_FEATURE_TOP_CPU_GLOBAL_PERCENTS
-	/*
-	 * xxx% = (jif.xxx - prev_jif.xxx) / (jif.total - prev_jif.total) * 100%
-	 */
-	/* using (unsigned) casts to make operations cheaper */
-	total_diff = ((unsigned)(jif.total - prev_jif.total) ? : 1);
-#if ENABLE_FEATURE_TOP_DECIMALS
-/* Generated code is approx +0.3k */
-#define CALC_STAT(xxx) char xxx[8]
-#define SHOW_STAT(xxx) fmt_100percent_8(xxx, (unsigned)(jif.xxx - prev_jif.xxx), total_diff)
-#define FMT "%s"
-#else
-#define CALC_STAT(xxx) unsigned xxx = 100 * (unsigned)(jif.xxx - prev_jif.xxx) / total_diff
-#define SHOW_STAT(xxx) xxx
-#define FMT "%4u%% "
-#endif
-	{ /* need block: CALC_STAT are declarations */
-		CALC_STAT(usr);
-		CALC_STAT(sys);
-		CALC_STAT(nic);
-		CALC_STAT(idle);
-		CALC_STAT(iowait);
-		CALC_STAT(irq);
-		CALC_STAT(softirq);
-		//CALC_STAT(steal);
+    /* Display CPU time split as percentage of total time
+     * This displays either a cumulative line or one line per CPU
+     */
+    *count -= display_cpus(scr_width, scrbuf);
 
-		snprintf(scrbuf, scr_width,
-			/* Barely fits in 79 chars when in "decimals" mode. */
-			"CPU:"FMT"usr"FMT"sys"FMT"nice"FMT"idle"FMT"io"FMT"irq"FMT"softirq",
-			SHOW_STAT(usr), SHOW_STAT(sys), SHOW_STAT(nic), SHOW_STAT(idle),
-			SHOW_STAT(iowait), SHOW_STAT(irq), SHOW_STAT(softirq)
-			//, SHOW_STAT(steal) - what is this 'steal' thing?
-			// I doubt anyone wants to know it
-		);
-	}
-	puts(scrbuf);
-#undef SHOW_STAT
-#undef CALC_STAT
-#undef FMT
-#endif
-
 	/* read load average as a string */
 	buf[0] = '\0';
 	open_read_close("loadavg", buf, sizeof("N.NN N.NN N.NN")-1);
@@ -383,23 +523,25 @@
 
 	top_status_t *s = top;
 	char vsz_str_buf[8];
-	unsigned long total_memory = display_header(scr_width); /* or use total_vsz? */
+    unsigned long total_memory = display_header(scr_width, &count); /* or use total_vsz? */
 	/* xxx_shift and xxx_scale variables allow us to replace
 	 * expensive divides with multiply and shift */
 	unsigned pmem_shift, pmem_scale, pmem_half;
 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
 	unsigned pcpu_shift, pcpu_scale, pcpu_half;
 	unsigned busy_jifs;
+#endif
 
 	/* what info of the processes is shown */
 	printf(OPT_BATCH_MODE ? "%.*s" : "\e[7m%.*s\e[0m", scr_width,
-		"  PID  PPID USER     STAT   VSZ %MEM %CPU COMMAND");
-#else
-
-	/* !CPU_USAGE_PERCENTAGE */
-	printf(OPT_BATCH_MODE ? "%.*s" : "\e[7m%.*s\e[0m", scr_width,
-		"  PID  PPID USER     STAT   VSZ %MEM COMMAND");
+        "  PID  PPID USER     STAT   VSZ %MEM"
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+        " CPU"
 #endif
+#if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
+        " %CPU"
+#endif
+    " COMMAND");
 
 #if ENABLE_FEATURE_TOP_DECIMALS
 #define UPSCALE 1000
@@ -469,6 +611,9 @@
 		// PID PPID USER STAT VSZ %MEM [%CPU] COMMAND
 		col = snprintf(line_buf, scr_width,
 				"\n" "%5u%6u %-8.8s %s%s" FMT
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+                "   %d "
+#endif
 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
 				FMT
 #endif
@@ -476,6 +621,9 @@
 				s->pid, s->ppid, get_cached_username(s->uid),
 				s->state, vsz_str_buf,
 				SHOW_STAT(pmem)
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+                ,s->last_seen_on_cpu
+#endif
 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
 				, SHOW_STAT(pcpu)
 #endif
@@ -760,7 +908,10 @@
 
 	interval = 5; /* default update interval is 5 seconds */
 	iterations = 0; /* infinite */
-
+#if ENABLE_FEATURE_TOP_SMP_CPU
+    num_cpus = 0;
+    aggr_cpu_line = 1;  /* to start with show aggregate */
+#endif
 	/* all args are options; -n NUM */
 	opt_complementary = "-:n+";
 	if (getopt32(argv, "d:n:b", &sinterval, &iterations) & OPT_d) {
@@ -823,6 +974,9 @@
 				top[n].uid = p->uid;
 				strcpy(top[n].state, p->state);
 				strcpy(top[n].comm, p->comm);
+#if ENABLE_FEATURE_TOP_SMP_PROCESS
+                top[n].last_seen_on_cpu = p->last_seen_on_cpu;
+#endif
 			} else { /* TOPMEM */
 #if ENABLE_FEATURE_TOPMEM
 				if (!(p->mapped_ro | p->mapped_rw))
@@ -932,7 +1086,28 @@
 			if (c == 'r')
 				inverted ^= 1;
 #endif
+#if ENABLE_FEATURE_TOP_SMP_CPU
+            if (c == '1') {
+                /* If user wants to switch from per cpu line to aggregate line */
+                if (!aggr_cpu_line) {
+                    num_cpus = 0;       // reset the var
+                    free(cpu_prev_jif); // Free the extra mem
+                    free(cpu_jif);
+                    cpu_jif = cpu_prev_jif = NULL;
+                    aggr_cpu_line = 1;
+                }
+                else {
+                    aggr_cpu_line = 0; 
+                }
+
+                /* when switching mode, need to take an extra sample to
+                 *  get reasonable first time % (otherwise all show 0%)
+                 */
+                get_jiffy_counts();
+                //usleep(100000);
+            }
 #endif
+#endif
 		}
 #endif /* FEATURE_USE_TERMIOS */
 	} /* end of "while (1)" */
Index: procps/Config.in
===================================================================
--- procps/Config.in	(revision 23420)
+++ procps/Config.in	(working copy)
@@ -148,6 +148,13 @@
 	help
 	  Makes top display "CPU: NN% usr NN% sys..." line.
 
+config FEATURE_TOP_SMP_CPU
+	bool "(SMP) Show each CPU's Individual usage % (adds 0.5kb)"
+	default n
+	depends on FEATURE_TOP_CPU_GLOBAL_PERCENTS
+	help
+	  (SMP) Show CPU(s) Individual/Cumulative percent loading
+
 config FEATURE_TOP_DECIMALS
 	bool "Show 1/10th of a percent in CPU/mem statistics (adds 0.3k bytes)"
 	default n
@@ -155,6 +162,13 @@
 	help
 	  Show 1/10th of a percent in CPU/mem statistics.
 
+config FEATURE_TOP_SMP_PROCESS
+	bool "(SMP) Show CPU where process last seen running on (adds <0.1k bytes)" 
+	default n
+	depends on TOP
+	help
+	  Show (SMP) Show CPU where process was last found running on
+
 config FEATURE_TOPMEM
 	bool "topmem"
 	default n
(1). Both the new options switched off.

    Code size increases marginally because now by default the aggregate 
    CPU line displays "CPU(s):" instead of "CPU   :" (like the regular 
    procps:top)
    Also it seems that because of header file changes, other non-related
    code (uuencode etc) is also getting changed slightly.

function                                             old     new   delta
read_cpu_jiffy                                         -     124    +124
display_process_list                                1183    1258     +75
refresh                                             1045    1053      +8
uuencode                                             262     267      +5
bb_uuencode                                          151     153      +2
passwd_main                                         1127    1125      -2
.rodata                                           120143  120135      -8
do_stats                                             456     281    -175
------------------------------------------------------------------------------
(add/remove: 1/0 grow/shrink: 4/3 up/down: 214/-185)           Total: 29 bytes


(2). With only ENABLE_FEATURE_TOP_SMP_CPU


function                                             old     new   delta
get_jiffy_counts                                       -     280    +280
read_cpu_jiffy                                         -     124    +124
top_main                                            1135    1243    +108
display_process_list                                1183    1282     +99
.rodata                                           120143  120191     +48
refresh                                             1045    1053      +8
uuencode                                             262     267      +5
bb_uuencode                                          151     153      +2
passwd_main                                         1127    1125      -2
do_stats                                             456     215    -241
------------------------------------------------------------------------------
(add/remove: 2/0 grow/shrink: 6/2 up/down: 674/-243)          Total: 431 bytes

(3). With only ENABLE_FEATURE_TOP_SMP_PROCESS


function                                             old     new   delta
read_cpu_jiffy                                         -     124    +124
display_process_list                                1183    1267     +84
procps_scan                                         1281    1315     +34
top_main                                            1135    1153     +18
refresh                                             1045    1053      +8
uuencode                                             262     267      +5
route_main                                          2089    2091      +2
.rodata                                           120143  120145      +2
xstrtoull_range_sfx                                  296     295      -1
passwd_main                                         1127    1125      -2
do_stats                                             456     281    -175
------------------------------------------------------------------------------
(add/remove: 1/0 grow/shrink: 7/3 up/down: 277/-178)           Total: 99 bytes

(4)  With both ENABLE_FEATURE_TOP_SMP_CPU and ENABLE_FEATURE_TOP_SMP_PROCESS


function                                             old     new   delta
get_jiffy_counts                                       -     280    +280
top_main                                            1135    1261    +126
read_cpu_jiffy                                         -     124    +124
display_process_list                                1183    1305    +122
.rodata                                           120143  120201     +58
procps_scan                                         1281    1315     +34
refresh                                             1045    1053      +8
uuencode                                             262     267      +5
route_main                                          2089    2091      +2
xstrtoull_range_sfx                                  296     295      -1
passwd_main                                         1127    1125      -2
do_stats                                             456     215    -241
------------------------------------------------------------------------------
(add/remove: 2/0 grow/shrink: 7/3 up/down: 759/-244)          Total: 515 bytes
_______________________________________________
busybox mailing list
[email protected]
http://busybox.net/cgi-bin/mailman/listinfo/busybox

Reply via email to