Hello there!
The algorithm of topology aware node selection in the select/linear
plugin is suboptimal as it calls _get_avail_cpus() function for example
few times for the same node, etc. As it may affect job submission time
I've done some optimizations to it and also fixed few little places in
the plugin. See patch attached. Only the _job_test_topo() function was
changed there, not any other one. Not done any profiling though, sorry,
but it should be faster as I've removed extra bitstr_t and extra calls
from cycles. Works well in tests.
With best wishes.
Andriy.diff --git a/linear/select_linear.c b/linear/select_linear.c
index 48ae07e..43a4e71 100644
--- a/linear/select_linear.c
+++ b/linear/select_linear.c
@@ -779,7 +779,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
int rem_cpus, rem_nodes; /* remaining resources desired */
int best_fit_nodes, best_fit_cpus, best_fit_req;
int best_fit_location = 0, best_fit_sufficient;
- int avail_cpus, alloc_cpus = 0, total_cpus = 0;
+ int avail_cpus, total_cpus = 0;
if (bit_set_count(bitmap) < min_nodes)
return error_code;
@@ -828,7 +828,6 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
rem_nodes--;
max_nodes--;
rem_cpus -= avail_cpus;
- alloc_cpus += avail_cpus;
total_cpus += _get_total_cpus(index);
} else { /* node not required (yet) */
bit_clear(bitmap, index);
@@ -956,7 +955,6 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
max_nodes--;
avail_cpus = _get_avail_cpus(job_ptr, i);
rem_cpus -= avail_cpus;
- alloc_cpus += avail_cpus;
total_cpus += _get_total_cpus(i);
}
for (i = (best_fit_req - 1);
@@ -971,7 +969,6 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
max_nodes--;
avail_cpus = _get_avail_cpus(job_ptr, i);
rem_cpus -= avail_cpus;
- alloc_cpus += avail_cpus;
total_cpus += _get_total_cpus(i);
}
} else {
@@ -987,7 +984,6 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
max_nodes--;
avail_cpus = _get_avail_cpus(job_ptr, i);
rem_cpus -= avail_cpus;
- alloc_cpus += avail_cpus;
total_cpus += _get_total_cpus(i);
}
}
@@ -1028,13 +1024,13 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
{
bitstr_t **switches_bitmap; /* nodes on this switch */
int *switches_cpu_cnt; /* total CPUs on switch */
- int *switches_node_cnt; /* total nodes on switch */
+ uint32_t *switches_node_cnt; /* total nodes on switch */
int *switches_required; /* set if has required node */
- bitstr_t *avail_nodes_bitmap = NULL; /* nodes on any switch */
bitstr_t *req_nodes_bitmap = NULL;
- int rem_cpus, rem_nodes; /* remaining resources desired */
- int avail_cpus, alloc_cpus = 0, total_cpus = 0;
+ int rem_cpus; /* remaining resources desired */
+ int avail_cpus, total_cpus = 0;
+ uint32_t want_nodes, alloc_nodes = 0;
int i, j, rc = SLURM_SUCCESS;
int best_fit_inx, first, last;
int best_fit_nodes, best_fit_cpus;
@@ -1043,10 +1039,16 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
rem_cpus = job_ptr->details->min_cpus;
if (req_nodes > min_nodes)
- rem_nodes = req_nodes;
+ want_nodes = req_nodes;
else
- rem_nodes = min_nodes;
+ want_nodes = min_nodes;
+ /* Construct a set of switch array entries,
+ * use the same indexes as switch_record_table in slurmctld */
+ switches_bitmap = xmalloc(sizeof(bitstr_t *) * switch_record_cnt);
+ switches_cpu_cnt = xmalloc(sizeof(int) * switch_record_cnt);
+ switches_node_cnt = xmalloc(sizeof(uint32_t) * switch_record_cnt);
+ switches_required = xmalloc(sizeof(int) * switch_record_cnt);
if (job_ptr->details->req_node_bitmap) {
req_nodes_bitmap = bit_copy(job_ptr->details->req_node_bitmap);
i = bit_set_count(req_nodes_bitmap);
@@ -1059,22 +1061,21 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
}
}
- /* Construct a set of switch array entries,
- * use the same indexes as switch_record_table in slurmctld */
- switches_bitmap = xmalloc(sizeof(bitstr_t *) * switch_record_cnt);
- switches_cpu_cnt = xmalloc(sizeof(int) * switch_record_cnt);
- switches_node_cnt = xmalloc(sizeof(int) * switch_record_cnt);
- switches_required = xmalloc(sizeof(int) * switch_record_cnt);
- avail_nodes_bitmap = bit_alloc(node_record_count);
+ /* phase 1: make availability bitmaps for switches */
+#if SELECT_DEBUG
+ debug5("_job_test_topo: phase 1");
+#endif
+ sufficient = false;
for (i=0; i<switch_record_cnt; i++) {
switches_bitmap[i] = bit_copy(switch_record_table[i].
node_bitmap);
bit_and(switches_bitmap[i], bitmap);
- bit_or(avail_nodes_bitmap, switches_bitmap[i]);
- switches_node_cnt[i] = bit_set_count(switches_bitmap[i]);
if (req_nodes_bitmap &&
- bit_overlap(req_nodes_bitmap, switches_bitmap[i])) {
- switches_required[i] = 1;
+ !bit_super_set(req_nodes_bitmap, switches_bitmap[i]))
+ switches_node_cnt[i] = 0;
+ else {
+ switches_node_cnt[i] = bit_set_count(switches_bitmap[i]);
+ sufficient = true;
}
}
bit_nclear(bitmap, 0, node_record_count - 1);
@@ -1085,130 +1086,48 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
char *node_names = NULL;
if (switches_node_cnt[i])
node_names = bitmap2node_name(switches_bitmap[i]);
- debug("switch=%s nodes=%u:%s required:%u speed=%u",
+ debug("switch=%s nodes=%u:%s "/*required:%u*/ "speed=%u",
switch_record_table[i].name,
switches_node_cnt[i], node_names,
- switches_required[i],
switch_record_table[i].link_speed);
xfree(node_names);
}
#endif
- if (req_nodes_bitmap &&
- (!bit_super_set(req_nodes_bitmap, avail_nodes_bitmap))) {
+ /* check if requested nodes are available */
+ if (!sufficient) {
info("job %u requires nodes not available on any switch",
job_ptr->job_id);
rc = EINVAL;
goto fini;
}
- if (req_nodes_bitmap) {
- /* Accumulate specific required resources, if any */
- first = bit_ffs(req_nodes_bitmap);
- last = bit_fls(req_nodes_bitmap);
- for (i=first; ((i<=last) && (first>=0)); i++) {
- if (!bit_test(req_nodes_bitmap, i))
- continue;
- if (max_nodes <= 0) {
- info("job %u requires nodes than allowed",
- job_ptr->job_id);
- rc = EINVAL;
- goto fini;
- }
- bit_set(bitmap, i);
- bit_clear(avail_nodes_bitmap, i);
- rem_nodes--;
- max_nodes--;
- avail_cpus = _get_avail_cpus(job_ptr, i);
- rem_cpus -= avail_cpus;
- alloc_cpus += avail_cpus;
- total_cpus += _get_total_cpus(i);
- for (j=0; j<switch_record_cnt; j++) {
- if (!bit_test(switches_bitmap[j], i))
- continue;
- bit_clear(switches_bitmap[j], i);
- switches_node_cnt[j]--;
- }
- }
- if ((rem_nodes <= 0) && (rem_cpus <= 0))
- goto fini;
-
- /* Accumulate additional resources from leafs that
- * contain required nodes */
- for (j=0; j<switch_record_cnt; j++) {
- if ((switch_record_table[j].level != 0) ||
- (switches_node_cnt[j] == 0) ||
- (switches_required[j] == 0)) {
- continue;
- }
- while ((max_nodes > 0) &&
- ((rem_nodes > 0) || (rem_cpus > 0))) {
- i = bit_ffs(switches_bitmap[j]);
- if (i == -1)
- break;
- bit_clear(switches_bitmap[j], i);
- switches_node_cnt[j]--;
- if (bit_test(bitmap, i)) {
- /* node on multiple leaf switches
- * and already selected */
- continue;
- }
- bit_set(bitmap, i);
- bit_clear(avail_nodes_bitmap, i);
- rem_nodes--;
- max_nodes--;
- avail_cpus = _get_avail_cpus(job_ptr, i);
- rem_cpus -= avail_cpus;
- alloc_cpus += avail_cpus;
- total_cpus += _get_total_cpus(i);
- }
- }
- if ((rem_nodes <= 0) && (rem_cpus <= 0))
- goto fini;
-
- /* Update bitmaps and node counts for higher-level switches */
- for (j=0; j<switch_record_cnt; j++) {
- if (switches_node_cnt[j] == 0)
- continue;
- first = bit_ffs(switches_bitmap[j]);
- if (first < 0)
- continue;
- last = bit_fls(switches_bitmap[j]);
- for (i=first; i<=last; i++) {
- if (!bit_test(switches_bitmap[j], i))
- continue;
- if (!bit_test(avail_nodes_bitmap, i)) {
- /* cleared from lower level */
- bit_clear(switches_bitmap[j], i);
- switches_node_cnt[j]--;
- } else {
- switches_cpu_cnt[j] +=
- _get_avail_cpus(job_ptr, i);
- }
- }
- }
- } else {
- /* No specific required nodes, calculate CPU counts */
+ /* phase 2: accumulate all cpu resources for each switch */
+#if SELECT_DEBUG
+ debug5("_job_test_topo: phase 2");
+#endif
+ for (i = 0; i < node_record_count; i++) {
+ avail_cpus = _get_avail_cpus(job_ptr, i);
for (j=0; j<switch_record_cnt; j++) {
- first = bit_ffs(switches_bitmap[j]);
- if (first < 0)
- continue;
- last = bit_fls(switches_bitmap[j]);
- for (i=first; i<=last; i++) {
- if (!bit_test(switches_bitmap[j], i))
- continue;
- switches_cpu_cnt[j] +=
- _get_avail_cpus(job_ptr, i);
+ if (bit_test(switches_bitmap[j], i)) {
+ switches_cpu_cnt[j] += avail_cpus;
}
}
}
+ /* phase 3 */
+#if SELECT_DEBUG
+ debug5("_job_test_topo: phase 3");
+#endif
/* Determine lowest level switch satifying request with best fit */
best_fit_inx = -1;
for (j=0; j<switch_record_cnt; j++) {
+#if SELECT_DEBUG
+ debug5("checking switch %d: nodes %u cpus %d", j,
+ switches_node_cnt[j], switches_cpu_cnt[j]);
+#endif
if ((switches_cpu_cnt[j] < rem_cpus) ||
- (!_enough_nodes(switches_node_cnt[j], rem_nodes,
- min_nodes, req_nodes)))
+ (switches_node_cnt[j] < want_nodes))
continue;
if ((best_fit_inx == -1) ||
(switch_record_table[j].level <
@@ -1224,27 +1143,122 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
rc = EINVAL;
goto fini;
}
- bit_and(avail_nodes_bitmap, switches_bitmap[best_fit_inx]);
+ /* phase 4: select resources from already allocated leaves */
+#if SELECT_DEBUG
+ debug5("_job_test_topo: phase 4");
+#endif
/* Identify usable leafs (within higher switch having best fit) */
for (j=0; j<switch_record_cnt; j++) {
- if ((switch_record_table[j].level != 0) ||
+ if ((switch_record_table[j].level > 0) ||
(!bit_super_set(switches_bitmap[j],
switches_bitmap[best_fit_inx]))) {
switches_node_cnt[j] = 0;
+ } else if (req_nodes_bitmap) {
+ /* we have subnodes count zeroed yet so count them */
+ switches_node_cnt[j] = bit_set_count(switches_bitmap[j]);
+ }
+ }
+ /* set already allocated nodes and gather additional resources */
+ if (req_nodes_bitmap) {
+ /* Accumulate specific required resources, if any */
+ for (j=0; j<switch_record_cnt; j++) {
+ if (alloc_nodes > max_nodes)
+ break;
+ if (switches_node_cnt[j] == 0 ||
+ bit_overlap(req_nodes_bitmap,
+ switches_bitmap[j]) == 0)
+ continue;
+
+ /* Use nodes from this leaf */
+ first = bit_ffs(switches_bitmap[j]);
+ if (first < 0) {
+ switches_node_cnt[j] = 0;
+ continue;
+ }
+ last = bit_fls(switches_bitmap[j]);
+ for (i=first; i<=last; i++) {
+ if (!bit_test(switches_bitmap[j], i))
+ continue;
+ if (!bit_test(req_nodes_bitmap, i)) {
+ /* node wasn't requested */
+ continue;
+ }
+
+ bit_clear(switches_bitmap[j], i);
+ switches_node_cnt[j]--;
+ avail_cpus = _get_avail_cpus(job_ptr, i);
+ switches_cpu_cnt[j] -= avail_cpus;
+
+ if (bit_test(bitmap, i)) {
+ /* node on multiple leaf switches
+ * and already selected */
+ continue;
+ }
+
+ switches_required[j] = 1;
+ bit_set(bitmap, i);
+ alloc_nodes++;
+ rem_cpus -= avail_cpus;
+ total_cpus += _get_total_cpus(i);
+ }
+ }
+ /* Accumulate additional resources from leafs that
+ * contain required nodes */
+ for (j=0; j<switch_record_cnt; j++) {
+ if ((alloc_nodes > max_nodes) ||
+ ((alloc_nodes >= want_nodes) && (rem_cpus <= 0)))
+ break;
+ if (switches_required[j] == 0)
+ continue;
+
+ /* Use nodes from this leaf */
+ first = bit_ffs(switches_bitmap[j]);
+ if (first < 0) {
+ switches_node_cnt[j] = 0;
+ continue;
+ }
+ last = bit_fls(switches_bitmap[j]);
+ for (i=first; i<=last; i++) {
+ if (!bit_test(switches_bitmap[j], i))
+ continue;
+
+ /* there is no need here to reset anything
+ for switch j as we disable it after cycle
+ by setting switches_node_cnt[j] to 0 */
+ if (bit_test(bitmap, i)) {
+ /* node on multiple leaf switches
+ * and already selected */
+ continue;
+ }
+
+ bit_set(bitmap, i);
+ alloc_nodes++;
+ rem_cpus -= _get_avail_cpus(job_ptr, i);
+ total_cpus += _get_total_cpus(i);
+ if ((alloc_nodes > max_nodes) ||
+ ((alloc_nodes >= want_nodes) &&
+ (rem_cpus <= 0)))
+ break;
+ }
+ switches_node_cnt[j] = 0; /* it's used up */
}
}
+ /* phase 5 */
+#if SELECT_DEBUG
+ debug5("_job_test_topo: phase 5");
+#endif
/* Select resources from these leafs on a best-fit basis */
- while ((max_nodes > 0) && ((rem_nodes > 0) || (rem_cpus > 0))) {
+ while ((alloc_nodes <= max_nodes) &&
+ ((alloc_nodes < want_nodes) || (rem_cpus > 0))) {
best_fit_cpus = best_fit_nodes = best_fit_sufficient = 0;
for (j=0; j<switch_record_cnt; j++) {
if (switches_node_cnt[j] == 0)
continue;
sufficient = (switches_cpu_cnt[j] >= rem_cpus) &&
- _enough_nodes(switches_node_cnt[j],
- rem_nodes, min_nodes,
- req_nodes);
+ (switches_node_cnt[j] >= min_nodes -
+ alloc_nodes);
/* If first possibility OR */
/* first set large enough for request OR */
/* tightest fit (less resource waste) OR */
@@ -1253,7 +1267,7 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
(sufficient && (best_fit_sufficient == 0)) ||
(sufficient &&
(switches_cpu_cnt[j] < best_fit_cpus)) ||
- ((sufficient == 0) &&
+ (!sufficient &&
(switches_cpu_cnt[j] > best_fit_cpus))) {
best_fit_cpus = switches_cpu_cnt[j];
best_fit_nodes = switches_node_cnt[j];
@@ -1261,40 +1275,43 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
best_fit_sufficient = sufficient;
}
}
+#if SELECT_DEBUG
+ debug5("found switch %d for allocation: nodes %d cpus %d allocated %u",
+ best_fit_location, best_fit_nodes, best_fit_cpus, alloc_nodes);
+#endif
if (best_fit_nodes == 0)
break;
+
/* Use select nodes from this leaf */
first = bit_ffs(switches_bitmap[best_fit_location]);
+ if (first < 0) {
+ switches_node_cnt[best_fit_location] = 0;
+ continue;
+ }
last = bit_fls(switches_bitmap[best_fit_location]);
- for (i=first; ((i<=last) && (first>=0)); i++) {
+ for (i=first; i<=last; i++) {
if (!bit_test(switches_bitmap[best_fit_location], i))
continue;
- bit_clear(switches_bitmap[best_fit_location], i);
- switches_node_cnt[best_fit_location]--;
- avail_cpus = _get_avail_cpus(job_ptr, i);
- switches_cpu_cnt[best_fit_location] -= avail_cpus;
-
if (bit_test(bitmap, i)) {
/* node on multiple leaf switches
* and already selected */
continue;
}
+ switches_required[best_fit_location] = 1;
bit_set(bitmap, i);
- rem_nodes--;
- max_nodes--;
- rem_cpus -= avail_cpus;
- alloc_cpus += avail_cpus;
+ alloc_nodes++;
+ rem_cpus -= _get_avail_cpus(job_ptr, i);
total_cpus += _get_total_cpus(i);
- if ((max_nodes <= 0) ||
- ((rem_nodes <= 0) && (rem_cpus <= 0)))
+ if ((alloc_nodes > max_nodes) ||
+ ((alloc_nodes >= want_nodes) && (rem_cpus <= 0)))
break;
}
switches_node_cnt[best_fit_location] = 0;
}
- if ((rem_cpus <= 0) &&
- _enough_nodes(0, rem_nodes, min_nodes, req_nodes)) {
+ if ((alloc_nodes <= max_nodes) && (rem_cpus <= 0) &&
+ (alloc_nodes >= min_nodes)) {
rc = SLURM_SUCCESS;
} else
rc = EINVAL;
@@ -1302,8 +1319,9 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
fini: if (rc == SLURM_SUCCESS) {
/* Job's total_cpus is needed for SELECT_MODE_WILL_RUN */
job_ptr->total_cpus = total_cpus;
- }
- FREE_NULL_BITMAP(avail_nodes_bitmap);
+ } else if (alloc_nodes > max_nodes)
+ info("job %u requires more nodes than allowed",
+ job_ptr->job_id);
FREE_NULL_BITMAP(req_nodes_bitmap);
for (i=0; i<switch_record_cnt; i++)
FREE_NULL_BITMAP(switches_bitmap[i]);
@@ -2347,7 +2365,11 @@ extern int select_p_state_restore(char *dir_name)
return SLURM_SUCCESS;
}
-extern int select_p_job_init(List job_list)
+/*
+ * Note the initialization of job records, issued upon restart of
+ * slurmctld and used to synchronize any job state.
+ */
+extern int select_p_job_init(List job_list_arg)
{
return SLURM_SUCCESS;
}
@@ -2379,7 +2401,7 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
return SLURM_SUCCESS;
}
-extern int select_p_block_init(List part_list)
+extern int select_p_block_init(List block_list)
{
return SLURM_SUCCESS;
}
@@ -2466,6 +2488,11 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
return rc;
}
+/*
+ * Note initiation of job is about to begin. Called immediately
+ * after select_p_job_test(). Executed from slurmctld.
+ * IN job_ptr - pointer to job being initiated
+ */
extern int select_p_job_begin(struct job_record *job_ptr)
{
int rc = SLURM_SUCCESS;
@@ -2495,13 +2522,18 @@ extern int select_p_job_begin(struct job_record *job_ptr)
slurm_mutex_lock(&cr_mutex);
if (cr_ptr == NULL)
_init_node_cr();
- _add_job_to_nodes(cr_ptr, job_ptr, "select_p_job_begin", 1);
+ if (rc == SLURM_SUCCESS)
+ rc = _add_job_to_nodes(cr_ptr, job_ptr, "select_p_job_begin", 1);
gres_plugin_job_state_log(job_ptr->gres_list, job_ptr->job_id);
slurm_mutex_unlock(&cr_mutex);
return rc;
}
-/* Determine if allocated nodes are usable (powered up) */
+/*
+ * Determine if allocated nodes are usable (powered up)
+ * IN job_ptr - pointer to job being tested
+ * RET -1 on error, 1 if ready to execute, 0 otherwise
+ */
extern int select_p_job_ready(struct job_record *job_ptr)
{
int i, i_first, i_last;
@@ -2528,6 +2560,11 @@ extern int select_p_job_ready(struct job_record *job_ptr)
return READY_NODE_STATE;
}
+/*
+ * Modify internal data structures for a job that has changed size
+ * Only support jobs shrinking now.
+ * RET: 0 or an error code
+ */
extern int select_p_job_resized(struct job_record *job_ptr,
struct node_record *node_ptr)
{
@@ -2555,6 +2592,10 @@ extern int select_p_job_resized(struct job_record *job_ptr,
return rc;
}
+/*
+ * Note termination of job is starting. Executed from slurmctld.
+ * IN job_ptr - pointer to job being terminated
+ */
extern int select_p_job_fini(struct job_record *job_ptr)
{
int rc = SLURM_SUCCESS;
@@ -2578,29 +2619,45 @@ extern int select_p_job_fini(struct job_record *job_ptr)
slurm_mutex_lock(&cr_mutex);
if (cr_ptr == NULL)
_init_node_cr();
- _rm_job_from_nodes(cr_ptr, job_ptr, "select_p_job_fini", true);
+ if (_rm_job_from_nodes(cr_ptr, job_ptr, "select_p_job_fini", true) !=
+ SLURM_SUCCESS)
+ rc = SLURM_ERROR;
slurm_mutex_unlock(&cr_mutex);
return rc;
}
+/*
+ * Suspend a job. Executed from slurmctld.
+ * IN job_ptr - pointer to job being suspended
+ * RET SLURM_SUCCESS or error code
+ */
extern int select_p_job_suspend(struct job_record *job_ptr)
{
+ int rc;
+
slurm_mutex_lock(&cr_mutex);
if (cr_ptr == NULL)
_init_node_cr();
- _rm_job_from_nodes(cr_ptr, job_ptr, "select_p_job_suspend", false);
+ rc = _rm_job_from_nodes(cr_ptr, job_ptr, "select_p_job_suspend", false);
slurm_mutex_unlock(&cr_mutex);
- return SLURM_SUCCESS;
+ return rc;
}
+/*
+ * Resume a job. Executed from slurmctld.
+ * IN job_ptr - pointer to job being resumed
+ * RET SLURM_SUCCESS or error code
+ */
extern int select_p_job_resume(struct job_record *job_ptr)
{
+ int rc;
+
slurm_mutex_lock(&cr_mutex);
if (cr_ptr == NULL)
_init_node_cr();
- _add_job_to_nodes(cr_ptr, job_ptr, "select_p_job_resume", 0);
+ rc = _add_job_to_nodes(cr_ptr, job_ptr, "select_p_job_resume", 0);
slurm_mutex_unlock(&cr_mutex);
- return SLURM_SUCCESS;
+ return rc;
}
extern int select_p_pack_select_info(time_t last_query_time,
@@ -2763,11 +2820,22 @@ extern int select_p_select_nodeinfo_get(select_nodeinfo_t *nodeinfo,
return rc;
}
+/*
+ * allocate storage for a select job credential
+ * RET - storage for a select job credential
+ * NOTE: storage must be freed using select_p_select_jobinfo_free
+ */
extern select_jobinfo_t *select_p_select_jobinfo_alloc(void)
{
- return SLURM_SUCCESS;
+ return NULL;
}
+/*
+ * fill in a previously allocated select job credential
+ * IN/OUT jobinfo - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * IN data - the data to enter into job credential
+ */
extern int select_p_select_jobinfo_set(select_jobinfo_t *jobinfo,
enum select_jobdata_type data_type,
void *data)
@@ -2775,6 +2843,13 @@ extern int select_p_select_jobinfo_set(select_jobinfo_t *jobinfo,
return SLURM_SUCCESS;
}
+/*
+ * get data from a select job credential
+ * IN jobinfo - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * OUT data - the data to get from job credential, caller must xfree
+ * data for data_type == SELECT_JOBDATA_PART_ID
+ */
extern int select_p_select_jobinfo_get (select_jobinfo_t *jobinfo,
enum select_jobdata_type data_type,
void *data)
@@ -2782,23 +2857,49 @@ extern int select_p_select_jobinfo_get (select_jobinfo_t *jobinfo,
return SLURM_ERROR;
}
+/*
+ * copy a select job credential
+ * IN jobinfo - the select job credential to be copied
+ * RET - the copy or NULL on failure
+ * NOTE: returned value must be freed using select_p_select_jobinfo_free
+ */
extern select_jobinfo_t *select_p_select_jobinfo_copy(
select_jobinfo_t *jobinfo)
{
return NULL;
}
+/*
+ * free storage previously allocated for a select job credential
+ * IN jobinfo - the select job credential to be freed
+ * RET - slurm error code
+ */
extern int select_p_select_jobinfo_free (select_jobinfo_t *jobinfo)
{
return SLURM_SUCCESS;
}
+/*
+ * pack a select job credential into a buffer in machine independent form
+ * IN jobinfo - the select job credential to be saved
+ * OUT buffer - buffer with select credential appended
+ * IN protocol_version - slurm protocol version of client
+ * RET - slurm error code
+ */
extern int select_p_select_jobinfo_pack(select_jobinfo_t *jobinfo, Buf buffer,
uint16_t protocol_version)
{
return SLURM_SUCCESS;
}
+/*
+ * unpack a select job credential from a buffer
+ * OUT jobinfo - the select job credential read
+ * IN buffer - buffer with select credential read from current pointer loc
+ * IN protocol_version - slurm protocol version of client
+ * RET - slurm error code
+ * NOTE: returned value must be freed using select_p_select_jobinfo_free
+ */
extern int select_p_select_jobinfo_unpack(select_jobinfo_t **jobinfo,
Buf buffer,
uint16_t protocol_version)
@@ -2822,17 +2923,17 @@ extern char *select_p_select_jobinfo_xstrdup(select_jobinfo_t *jobinfo,
return NULL;
}
-extern int select_p_update_block (update_part_msg_t *part_desc_ptr)
+extern int select_p_update_block (update_block_msg_t *block_desc_ptr)
{
return SLURM_SUCCESS;
}
-extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
+extern int select_p_update_sub_node (update_block_msg_t *block_desc_ptr)
{
return SLURM_SUCCESS;
}
-extern int select_p_get_info_from_plugin (enum select_jobdata_type info,
+extern int select_p_get_info_from_plugin (enum select_plugindata_info dinfo,
struct job_record *job_ptr,
void *data)
{