So, a job is the series of shell commands run by make

a:
        cmd1
        expensive_cmd2
        cmd3

so the granularity of expensive should be at the command level,
not the job level.

So, instead of running the code that may start new jobs at
the end of remove_job, let's run it after each change of state.

This should let parallel make perform slightly better, by firing
up jobs sooner in case of several targets with multiple commands.

Index: job.c
===================================================================
RCS file: /cvs/src/usr.bin/make/job.c,v
retrieving revision 1.143
diff -u -p -r1.143 job.c
--- job.c       30 Dec 2019 11:01:16 -0000      1.143
+++ job.c       31 Dec 2019 10:00:39 -0000
@@ -148,6 +148,7 @@ static void may_continue_job(Job *);
 static void continue_job(Job *);
 static Job *reap_finished_job(pid_t);
 static bool reap_jobs(void);
+static void may_continue_heldback_jobs();
 
 static void loop_handle_running_jobs(void);
 static bool expensive_job(Job *);
@@ -746,9 +747,14 @@ remove_job(Job *job)
 {
        nJobs--;
        postprocess_job(job);
+}
+
+static void
+may_continue_heldback_jobs()
+{
        while (!no_new_jobs) {
                if (heldJobs != NULL) {
-                       job = heldJobs;
+                       Job *job = heldJobs;
                        heldJobs = heldJobs->next;
                        if (DEBUG(EXPENSIVE))
                                fprintf(stderr, "[%ld] cheap -> release %s\n",
@@ -803,6 +809,7 @@ reap_jobs(void)
                        job_handle_status(job, status);
                        determine_job_next_step(job);
                }
+               may_continue_heldback_jobs();
        }
        /* sanity check, should not happen */
        if (pid == -1 && errno == ECHILD && runningJobs != NULL)

Reply via email to