Hi, Here is an updated version of my GrpCPURunMins limit patch. It works for both QOS and Associations. GrpCPURunMins was previously discussed in this thread.
https://groups.google.com/group/slurm-devel/browse_thread/thread/d83e2a956be389c2/7a94528452803630?#7a94528452803630
The patch is included in this mail, but you can also pull it from
github.
git pull git://github.com/paran1/slurm.git GrpCPURunMins
The patch is against slurm-2.2. It does apply cleanly against the master
branch, and I have done some minimal testing using that also.
We would like to get this included in 2.3, please let me know if there
are any problems with the code.
Kind regards,
Pär Andersson
NSC
commit 69febb77b470053428f179b01ec16930ad97b5ed
Author: Pär Andersson <[email protected]>
Date: Tue May 17 17:14:38 2011 +0200
Implement support for GrpCPURunMins
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index 13117d6..f6d96a5 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -101,6 +101,7 @@ static int _addto_used_info(slurmdb_association_rec_t
*assoc1,
assoc1->usage->grp_used_cpus += assoc2->usage->grp_used_cpus;
assoc1->usage->grp_used_nodes += assoc2->usage->grp_used_nodes;
assoc1->usage->grp_used_wall += assoc2->usage->grp_used_wall;
+ assoc1->usage->grp_used_cpu_run_secs +=
assoc2->usage->grp_used_cpu_run_secs;
assoc1->usage->used_jobs += assoc2->usage->used_jobs;
assoc1->usage->used_submit_jobs += assoc2->usage->used_submit_jobs;
@@ -116,6 +117,7 @@ static int _clear_used_assoc_info(slurmdb_association_rec_t
*assoc)
assoc->usage->grp_used_cpus = 0;
assoc->usage->grp_used_nodes = 0;
+ assoc->usage->grp_used_cpu_run_secs = 0;
assoc->usage->used_jobs = 0;
assoc->usage->used_submit_jobs = 0;
@@ -134,6 +136,7 @@ static int _clear_used_qos_info(slurmdb_qos_rec_t *qos)
qos->usage->grp_used_cpus = 0;
qos->usage->grp_used_nodes = 0;
+ qos->usage->grp_used_cpu_run_secs = 0;
qos->usage->grp_used_jobs = 0;
qos->usage->grp_used_submit_jobs = 0;
diff --git a/src/common/assoc_mgr.h b/src/common/assoc_mgr.h
index 305b3fe..ff780aa 100644
--- a/src/common/assoc_mgr.h
+++ b/src/common/assoc_mgr.h
@@ -106,6 +106,8 @@ struct assoc_mgr_association_usage {
* (DON'T PACK) */
double grp_used_wall; /* group count of time used in
* running jobs (DON'T PACK) */
+ uint64_t grp_used_cpu_run_secs; /* count of running cpu secs
+ * (DON'T PACK) */
uint32_t level_shares; /* number of shares on this level of
* the tree (DON'T PACK) */
@@ -120,8 +122,6 @@ struct assoc_mgr_association_usage {
long double usage_norm; /* normalized usage (DON'T PACK) */
long double usage_raw; /* measure of resource usage (DON'T PACK) */
- uint64_t used_cpu_run_secs; /* count of running cpu secs
- * (DON'T PACK) */
uint32_t used_jobs; /* count of active jobs (DON'T PACK) */
uint32_t used_submit_jobs; /* count of jobs pending or running
* (DON'T PACK) */
diff --git a/src/plugins/priority/multifactor/priority_multifactor.c
b/src/plugins/priority/multifactor/priority_multifactor.c
index 5471131..e1f3a4e 100644
--- a/src/plugins/priority/multifactor/priority_multifactor.c
+++ b/src/plugins/priority/multifactor/priority_multifactor.c
@@ -661,6 +661,80 @@ static time_t _next_reset(uint16_t reset_period, time_t
last_reset)
return mktime(&last_tm);
}
+/*
+ When restarting slurmctld, acct_policy.c will run
+ acct_policy_job_begin() for already running jobs and set
+ grp_used_cpu_run_secs to the initial value. We have to correct the
+ values here after we know when the decay thread last ran.
+*/
+uint32_t _init_grp_used_cpu_run_secs(time_t last_ran)
+{
+ struct job_record *job_ptr = NULL;
+ ListIterator itr;
+ assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
+ WRITE_LOCK, NO_LOCK, NO_LOCK };
+ slurmctld_lock_t job_read_lock =
+ { NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
+ int delta;
+ slurmdb_qos_rec_t *qos;
+ slurmdb_association_rec_t *assoc;
+
+ if(priority_debug)
+ info("Initializing grp_used_cpu_run_secs");
+ if (!(job_list && list_count(job_list))) {
+ return SLURM_ERROR;
+ }
+
+ lock_slurmctld(job_read_lock);
+ itr = list_iterator_create(job_list);
+ if (itr == NULL)
+ fatal("list_iterator_create: malloc failure");
+
+ while ((job_ptr = list_next(itr))) {
+ if (priority_debug)
+ debug2("job: %u",job_ptr->job_id);
+ qos = NULL;
+ assoc = NULL;
+ delta = 0;
+
+ if (!IS_JOB_RUNNING(job_ptr))
+ continue;
+
+ if (job_ptr->start_time > last_ran) {
+ continue;
+ }
+ delta = last_ran - job_ptr->start_time;
+
+ assoc_mgr_lock(&locks);
+ qos = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
+ assoc = (slurmdb_association_rec_t *)
+ job_ptr->assoc_ptr;
+
+ if(qos) {
+ if (priority_debug)
+ debug4("Subtracting %u from qos %u
grp_used_cpu_run_secs %lu = %lu",
+ job_ptr->total_cpus*delta,
+ qos->id,
+ qos->usage->grp_used_cpu_run_secs,
+ qos->usage->grp_used_cpu_run_secs -
(job_ptr->total_cpus * delta));
+ qos->usage->grp_used_cpu_run_secs -=
job_ptr->total_cpus * delta;
+ }
+ while (assoc) {
+ if (priority_debug)
+ debug4("Subtracting %u from assoc %u
grp_used_cpu_run_secs %lu = %lu",
+ job_ptr->total_cpus*delta,
+ assoc->id,
+ assoc->usage->grp_used_cpu_run_secs,
+ assoc->usage->grp_used_cpu_run_secs -
(job_ptr->total_cpus * delta));
+ assoc->usage->grp_used_cpu_run_secs -=
job_ptr->total_cpus * delta;
+ assoc = assoc->usage->parent_assoc_ptr;
+ }
+ assoc_mgr_unlock(&locks);
+ }
+ list_iterator_destroy(itr);
+ unlock_slurmctld(job_read_lock);
+}
+
static void *_decay_thread(void *no_data)
{
struct job_record *job_ptr = NULL;
@@ -699,6 +773,8 @@ static void *_decay_thread(void *no_data)
if (last_reset == 0)
last_reset = start_time;
+ _init_grp_used_cpu_run_secs(last_ran);
+
while (1) {
time_t now = time(NULL);
int run_delta = 0;
@@ -847,6 +923,23 @@ static void *_decay_thread(void *no_data)
qos->usage->grp_used_wall += run_decay;
qos->usage->usage_raw +=
(long double)real_decay;
+ if (qos->usage->grp_used_cpu_run_secs >=
+ job_ptr->total_cpus * run_delta) {
+ if(priority_debug)
+
debug4("grp_used_cpu_run_secs is %lu, will subtract %u",
+
qos->usage->grp_used_cpu_run_secs,
+
job_ptr->total_cpus * run_delta);
+
qos->usage->grp_used_cpu_run_secs -=
+ job_ptr->total_cpus *
run_delta;
+ } else {
+ if (priority_debug)
+ debug4("jobid %u, qos
%s: setting grp_used_cpu_run_secs "
+ "to 0 because
%lu < %i",
+ job_ptr->job_id,
qos->name,
+
qos->usage->grp_used_cpu_run_secs,
+
job_ptr->total_cpus * run_delta);
+
qos->usage->grp_used_cpu_run_secs = 0;
+ }
}
/* We want to do this all the way up
@@ -856,6 +949,24 @@ static void *_decay_thread(void *no_data)
and use that to normalize against.
*/
while (assoc) {
+ if (assoc->usage->grp_used_cpu_run_secs
>=
+ job_ptr->total_cpus * run_delta) {
+ if(priority_debug)
+
debug4("grp_used_cpu_run_secs is %lu, will subtract %u",
+
assoc->usage->grp_used_cpu_run_secs,
+
job_ptr->total_cpus*run_delta);
+
assoc->usage->grp_used_cpu_run_secs -=
+ job_ptr->total_cpus *
run_delta;
+ } else {
+ if (priority_debug)
+ debug4("jobid %u, assoc
%u: setting grp_used_cpu_run_secs "
+ "to 0 because
%lu < %i",
+ job_ptr->job_id,
assoc->id,
+
assoc->usage->grp_used_cpu_run_secs,
+
job_ptr->total_cpus * run_delta);
+
assoc->usage->grp_used_cpu_run_secs = 0;
+ }
+
assoc->usage->grp_used_wall +=
run_decay;
assoc->usage->usage_raw +=
@@ -865,13 +976,16 @@ static void *_decay_thread(void *no_data)
"assoc %u (user='%s' "
"acct='%s') raw usage "
"is now %Lf. Group wall "
- "added %f making it %f.",
+ "added %f making it %f. "
+ "GrpCPURunMins is %lu",
real_decay, assoc->id,
assoc->user, assoc->acct,
assoc->usage->usage_raw,
run_decay,
assoc->usage->
- grp_used_wall);
+ grp_used_wall,
+ assoc->usage->
+ grp_used_cpu_run_secs);
assoc = assoc->usage->parent_assoc_ptr;
}
assoc_mgr_unlock(&locks);
diff --git a/src/sacctmgr/qos_functions.c b/src/sacctmgr/qos_functions.c
index a2a7c71..56d319b 100644
--- a/src/sacctmgr/qos_functions.c
+++ b/src/sacctmgr/qos_functions.c
@@ -285,6 +285,14 @@ static int _set_rec(int *start, int argc, char *argv[],
&qos->grp_cpu_mins,
"GrpCPUMins") == SLURM_SUCCESS)
set = 1;
+ } else if (!strncasecmp (argv[i], "GrpCPURunMins",
+ MAX(command_len, 7))) {
+ if(!qos)
+ continue;
+ if (get_uint64(argv[i]+end,
+ &qos->grp_cpu_run_mins,
+ "GrpCPURunMins") == SLURM_SUCCESS)
+ set = 1;
} else if (!strncasecmp (argv[i], "GrpCpus",
MAX(command_len, 7))) {
if(!qos)
@@ -746,6 +754,12 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
qos->grp_cpu_mins,
(curr_inx == field_count));
break;
+ case PRINT_GRPCRM:
+ field->print_routine(
+ field,
+ qos->grp_cpu_run_mins,
+ (curr_inx == field_count));
+ break;
case PRINT_GRPC:
field->print_routine(field,
qos->grp_cpus,
diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c
index 60beb33..29a4b68 100644
--- a/src/slurmctld/acct_policy.c
+++ b/src/slurmctld/acct_policy.c
@@ -159,6 +159,7 @@ static void _adjust_limit_usage(int type, struct job_record
*job_ptr)
qos_ptr->usage->grp_used_jobs++;
qos_ptr->usage->grp_used_cpus += job_ptr->total_cpus;
qos_ptr->usage->grp_used_nodes += job_ptr->node_cnt;
+ qos_ptr->usage->grp_used_cpu_run_secs +=
job_ptr->total_cpus * job_ptr->time_limit * 60;
used_limits->jobs++;
break;
case ACCT_POLICY_JOB_FINI:
@@ -182,6 +183,15 @@ static void _adjust_limit_usage(int type, struct
job_record *job_ptr)
"underflow for qos %s", qos_ptr->name);
}
+ uint64_t grp_used_cpu_run_secs_old =
qos_ptr->usage->grp_used_cpu_run_secs;
+ qos_ptr->usage->grp_used_cpu_run_secs -=
job_ptr->total_cpus *
+ (job_ptr->start_time + job_ptr->time_limit*60 -
job_ptr->end_time);
+ if (grp_used_cpu_run_secs_old <
qos_ptr->usage->grp_used_cpu_run_secs) {
+ qos_ptr->usage->grp_used_cpu_run_secs = 0;
+ debug2("acct_policy_job_fini:
grp_used_cpu_run_secs "
+ "underflow for qos %s", qos_ptr->name);
+ }
+
if(used_limits->jobs)
used_limits->jobs--;
else
@@ -214,6 +224,7 @@ static void _adjust_limit_usage(int type, struct job_record
*job_ptr)
assoc_ptr->usage->used_jobs++;
assoc_ptr->usage->grp_used_cpus += job_ptr->total_cpus;
assoc_ptr->usage->grp_used_nodes += job_ptr->node_cnt;
+ assoc_ptr->usage->grp_used_cpu_run_secs +=
job_ptr->total_cpus * job_ptr->time_limit * 60;
break;
case ACCT_POLICY_JOB_FINI:
if (assoc_ptr->usage->used_jobs)
@@ -238,6 +249,17 @@ static void _adjust_limit_usage(int type, struct
job_record *job_ptr)
"underflow for account %s",
assoc_ptr->acct);
}
+
+ uint64_t grp_used_cpu_run_secs_old =
assoc_ptr->usage->grp_used_cpu_run_secs;
+ assoc_ptr->usage->grp_used_cpu_run_secs -=
job_ptr->total_cpus *
+ (job_ptr->start_time + job_ptr->time_limit*60 -
job_ptr->end_time);
+
+ if (grp_used_cpu_run_secs_old <
assoc_ptr->usage->grp_used_cpu_run_secs) {
+ assoc_ptr->usage->grp_used_cpu_run_secs = 0;
+ debug2("acct_policy_job_fini:
grp_used_cpu_run_secs "
+ "underflow for account %s",
assoc_ptr->acct);
+ }
+
break;
default:
error("acct_policy: association unknown type %d", type);
@@ -902,6 +924,25 @@ extern bool acct_policy_job_runnable(struct job_record
*job_ptr)
goto end_it;
}
+ if (qos_ptr->grp_cpu_run_mins != INFINITE) {
+ if(( qos_ptr->usage->grp_used_cpu_run_secs/60 +
+ job_ptr->details->min_cpus * job_ptr->time_limit )
+ > qos_ptr->grp_cpu_run_mins) {
+ job_ptr->state_reason =
WAIT_ASSOC_RESOURCE_LIMIT;
+ xfree(job_ptr->state_desc);
+ debug2("job %u being held, "
+ "qos %s is at or exceeds "
+ "group max running cpu minutes limit %u "
+ "with already used %lu + requested %lu ",
+ job_ptr->job_id, qos_ptr->name,
+ qos_ptr->grp_cpu_run_mins,
+ qos_ptr->usage->grp_used_cpu_run_secs/60,
+ job_ptr->details->min_cpus *
job_ptr->time_limit);
+ rc = false;
+ goto end_it;
+ }
+ }
+
if ((job_ptr->limit_set_min_nodes != ADMIN_SET_LIMIT)
&& qos_ptr->grp_nodes != INFINITE) {
if (job_ptr->details->min_nodes > qos_ptr->grp_nodes) {
@@ -1132,6 +1173,29 @@ extern bool acct_policy_job_runnable(struct job_record
*job_ptr)
goto end_it;
}
+ if ((!qos_ptr ||
+ (qos_ptr && qos_ptr->grp_cpu_run_mins == INFINITE))
+ && (assoc_ptr->grp_cpu_run_mins != INFINITE)) {
+ if(( assoc_ptr->usage->grp_used_cpu_run_secs/60 +
+ job_ptr->details->min_cpus * job_ptr->time_limit )
+ > assoc_ptr->grp_cpu_run_mins) {
+ job_ptr->state_reason =
WAIT_ASSOC_RESOURCE_LIMIT;
+ xfree(job_ptr->state_desc);
+ debug2("job %u being held, "
+ "assoc %u is at or exceeds "
+ "group max running cpu minutes limit %u "
+ "with already used %lu + requested %lu "
+ "for account %s",
+ job_ptr->job_id, assoc_ptr->id,
+ assoc_ptr->grp_cpu_run_mins,
+
assoc_ptr->usage->grp_used_cpu_run_secs/60,
+ job_ptr->details->min_cpus *
job_ptr->time_limit,
+ assoc_ptr->acct);
+ rc = false;
+ goto end_it;
+ }
+ }
+
if ((job_ptr->limit_set_min_nodes != ADMIN_SET_LIMIT)
&& (!qos_ptr ||
(qos_ptr && qos_ptr->grp_nodes == INFINITE))
pgpm251NHRiFk.pgp
Description: PGP signature
