本文整理汇总了C++中IS_JOB_PENDING函数的典型用法代码示例。如果您正苦于以下问题:C++ IS_JOB_PENDING函数的具体用法?C++ IS_JOB_PENDING怎么用?C++ IS_JOB_PENDING使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了IS_JOB_PENDING函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: jobacct_storage_g_job_start
/*
* load into the storage information about a job,
* typically when it begins execution, but possibly earlier
*/
extern int jobacct_storage_g_job_start(void *db_conn,
struct job_record *job_ptr)
{
if (slurm_acct_storage_init(NULL) < 0)
return SLURM_ERROR;
if (enforce & ACCOUNTING_ENFORCE_NO_JOBS)
return SLURM_SUCCESS;
/* A pending job's start_time is it's expected initiation time
* (changed in slurm v2.1). Rather than changing a bunch of code
* in the accounting_storage plugins and SlurmDBD, just clear
* start_time before accounting and restore it later.
* If an update for a job that is being requeued[hold] happens,
* we don't want to modify the start_time of the old record.
* Pending + Completing is equivalent to Requeue.
*/
if (IS_JOB_PENDING(job_ptr) && !IS_JOB_COMPLETING(job_ptr)) {
int rc;
time_t orig_start_time = job_ptr->start_time;
job_ptr->start_time = (time_t) 0;
rc = (*(ops.job_start))(db_conn, job_ptr);
job_ptr->start_time = orig_start_time;
return rc;
}
return (*(ops.job_start))(db_conn, job_ptr);
}
示例2: find_preemptable_jobs
extern List find_preemptable_jobs(struct job_record *job_ptr)
{
ListIterator job_iterator;
struct job_record *job_p;
List preemptee_job_list = NULL;
/* Validate the preemptor job */
if (job_ptr == NULL) {
error("find_preemptable_jobs: job_ptr is NULL");
return preemptee_job_list;
}
if (!IS_JOB_PENDING(job_ptr)) {
error("find_preemptable_jobs: job %u not pending",
job_ptr->job_id);
return preemptee_job_list;
}
if (job_ptr->part_ptr == NULL) {
error("find_preemptable_jobs: job %u has NULL partition ptr",
job_ptr->job_id);
return preemptee_job_list;
}
if (job_ptr->part_ptr->node_bitmap == NULL) {
error("find_preemptable_jobs: partition %s node_bitmap=NULL",
job_ptr->part_ptr->name);
return preemptee_job_list;
}
/* Build an array of pointers to preemption candidates */
job_iterator = list_iterator_create(job_list);
while ((job_p = (struct job_record *) list_next(job_iterator))) {
if (!IS_JOB_RUNNING(job_p) && !IS_JOB_SUSPENDED(job_p))
continue;
if ((job_p->part_ptr == NULL) ||
(job_p->part_ptr->priority_tier >=
job_ptr->part_ptr->priority_tier) ||
(job_p->part_ptr->preempt_mode == PREEMPT_MODE_OFF))
continue;
if ((job_p->node_bitmap == NULL) ||
(bit_overlap(job_p->node_bitmap,
job_ptr->part_ptr->node_bitmap) == 0))
continue;
if (job_ptr->details &&
(job_ptr->details->expanding_jobid == job_p->job_id))
continue;
/* This job is a preemption candidate */
if (preemptee_job_list == NULL) {
preemptee_job_list = list_create(NULL);
}
list_append(preemptee_job_list, job_p);
}
list_iterator_destroy(job_iterator);
if (preemptee_job_list && youngest_order)
list_sort(preemptee_job_list, _sort_by_youngest);
else if (preemptee_job_list)
list_sort(preemptee_job_list, _sort_by_prio);
return preemptee_job_list;
}
示例3: _get_job_state
/* NOTE: if job has already completed, we append "EXITCODE=#" to
* the state name */
static char * _get_job_state(struct job_record *job_ptr)
{
char *state_str;
static char return_msg[128];
if (IS_JOB_COMPLETING(job_ptr)) {
/* Give configured KillWait+10 for job
* to clear out, then then consider job
* done. Moab will allocate jobs to
* nodes that are already Idle. */
int age = (int) difftime(time(NULL),
job_ptr->end_time);
if (age < (kill_wait+10))
return "Running";
}
if (IS_JOB_RUNNING(job_ptr))
return "Running";
if (IS_JOB_SUSPENDED(job_ptr))
return "Suspended";
if (IS_JOB_PENDING(job_ptr))
return "Idle";
if (IS_JOB_COMPLETE(job_ptr) || IS_JOB_FAILED(job_ptr))
state_str = "Completed";
else /* JOB_CANCELLED, JOB_TIMEOUT, JOB_NODE_FAIL, etc. */
state_str = "Removed";
snprintf(return_msg, sizeof(return_msg), "%s;EXITCODE=%u",
state_str, WEXITSTATUS(job_ptr->exit_code));
return return_msg;
}
示例4: _get_node_cnt
static int _get_node_cnt(job_info_t * job)
{
int node_cnt = 0;
/* For PENDING jobs, return the maximum of the requested nodelist,
* requested maximum number of nodes, or requested CPUs rounded
* to nearest node.
*
* For COMPLETING jobs, the job->nodes nodelist has already been
* altered to list only the nodes still in the comp state, and
* thus we count only those nodes toward the total nodes still
* allocated to this job.
*/
if (IS_JOB_PENDING(job)) {
node_cnt = _nodes_in_list(job->req_nodes);
node_cnt = MAX(node_cnt, job->num_nodes);
if ((node_cnt == 1) && (job->num_cpus > 1)
&& job->ntasks_per_node
&& (job->ntasks_per_node != (uint16_t) NO_VAL)) {
int num_tasks = job->num_cpus;
if (job->cpus_per_task != (uint16_t) NO_VAL)
num_tasks /= job->cpus_per_task;
node_cnt = (num_tasks + 1) / job->ntasks_per_node;
if (node_cnt > num_tasks)
node_cnt = num_tasks;
else if (!node_cnt)
node_cnt = 1;
}
} else
node_cnt = _nodes_in_list(job->nodes);
return node_cnt;
}
示例5: _print_job_job_id
int _print_job_job_id(job_info_t * job, int width, bool right, char* suffix)
{
if (job == NULL) { /* Print the Header instead */
_print_str("JOBID", width, right, true);
} else if ((job->array_task_id != NO_VAL) &&
!params.array_flag && IS_JOB_PENDING(job) &&
job->node_inx) {
uint32_t i, max_task_id = 0;
char id[FORMAT_STRING_SIZE], task_str[FORMAT_STRING_SIZE];
bitstr_t *task_bits;
for (i = 1; i <= job->node_inx[0]; i++)
max_task_id = MAX(max_task_id, job->node_inx[i]);
task_bits = bit_alloc(max_task_id + 1);
for (i = 1; i <= job->node_inx[0]; i++)
bit_set(task_bits, job->node_inx[i]);
bit_fmt(task_str, sizeof(task_str), task_bits);
snprintf(id, FORMAT_STRING_SIZE, "%u_[%s]",
job->array_job_id, task_str);
_print_str(id, width, right, true);
bit_free(task_bits);
} else if (job->array_task_id != NO_VAL) {
char id[FORMAT_STRING_SIZE];
snprintf(id, FORMAT_STRING_SIZE, "%u_%u",
job->array_job_id, job->array_task_id);
_print_str(id, width, right, true);
} else {
char id[FORMAT_STRING_SIZE];
snprintf(id, FORMAT_STRING_SIZE, "%u", job->job_id);
_print_str(id, width, right, true);
}
if (suffix)
printf("%s", suffix);
return SLURM_SUCCESS;
}
示例6: _get_job_runtime
/* Code taken from job_info.c calculate cummulative run time for a job */
static time_t _get_job_runtime(struct job_record *job_ptr)
{
time_t end_time, run_time;
if (IS_JOB_PENDING(job_ptr))
run_time = 0;
else if (IS_JOB_SUSPENDED(job_ptr))
run_time = job_ptr->pre_sus_time;
else {
if (IS_JOB_RUNNING(job_ptr) || (job_ptr->end_time == 0))
end_time = time(NULL);
else
end_time = job_ptr->end_time;
if (job_ptr->suspend_time) {
run_time = (time_t)
(difftime(end_time, job_ptr->suspend_time)
+ job_ptr->pre_sus_time);
} else {
run_time = (time_t)
difftime(end_time, job_ptr->start_time);
}
}
return run_time;
}
示例7: srun_user_message
/*
* srun_user_message - Send arbitrary message to an srun job (no job steps)
*/
extern int srun_user_message(struct job_record *job_ptr, char *msg)
{
slurm_addr_t * addr;
srun_user_msg_t *msg_arg;
xassert(job_ptr);
if (!IS_JOB_PENDING(job_ptr) && !IS_JOB_RUNNING(job_ptr))
return ESLURM_ALREADY_DONE;
if (job_ptr->other_port &&
job_ptr->resp_host && job_ptr->resp_host[0]) {
addr = xmalloc(sizeof(struct sockaddr_in));
slurm_set_addr(addr, job_ptr->other_port, job_ptr->resp_host);
msg_arg = xmalloc(sizeof(srun_user_msg_t));
msg_arg->job_id = job_ptr->job_id;
msg_arg->msg = xstrdup(msg);
_srun_agent_launch(addr, job_ptr->resp_host, SRUN_USER_MSG,
msg_arg);
return SLURM_SUCCESS;
} else if (job_ptr->batch_flag && IS_JOB_RUNNING(job_ptr)) {
#ifndef HAVE_FRONT_END
struct node_record *node_ptr;
#endif
job_notify_msg_t *notify_msg_ptr;
agent_arg_t *agent_arg_ptr;
#ifdef HAVE_FRONT_END
if (job_ptr->batch_host == NULL)
return ESLURM_DISABLED; /* no allocated nodes */
agent_arg_ptr = (agent_arg_t *) xmalloc(sizeof(agent_arg_t));
agent_arg_ptr->hostlist = hostlist_create(job_ptr->batch_host);
#else
node_ptr = find_first_node_record(job_ptr->node_bitmap);
if (node_ptr == NULL)
return ESLURM_DISABLED; /* no allocated nodes */
agent_arg_ptr = (agent_arg_t *) xmalloc(sizeof(agent_arg_t));
agent_arg_ptr->hostlist = hostlist_create(node_ptr->name);
#endif
if (agent_arg_ptr->hostlist == NULL)
fatal("hostlist_create: malloc failure");
notify_msg_ptr = (job_notify_msg_t *)
xmalloc(sizeof(job_notify_msg_t));
notify_msg_ptr->job_id = job_ptr->job_id;
notify_msg_ptr->message = xstrdup(msg);
agent_arg_ptr->node_count = 1;
agent_arg_ptr->retry = 0;
agent_arg_ptr->msg_type = REQUEST_JOB_NOTIFY;
agent_arg_ptr->msg_args = (void *) notify_msg_ptr;
/* Launch the RPC via agent */
agent_queue_request(agent_arg_ptr);
return SLURM_SUCCESS;
}
return ESLURM_DISABLED;
}
示例8: _merge_job_array
static bool _merge_job_array(List l, job_info_t * job_ptr)
{
job_info_t *list_job_ptr;
ListIterator iter;
bool merge = false;
if (params.array_flag)
return merge;
if (job_ptr->array_task_id == NO_VAL)
return merge;
if (!IS_JOB_PENDING(job_ptr))
return merge;
xfree(job_ptr->node_inx);
if (!l)
return merge;
iter = list_iterator_create(l);
while ((list_job_ptr = list_next(iter))) {
if ((list_job_ptr->array_task_id == NO_VAL) ||
(job_ptr->array_job_id != list_job_ptr->array_job_id) ||
(!IS_JOB_PENDING(list_job_ptr)))
continue;
/* We re-purpose the job's node_inx array to store the
* array_task_id values */
if (!list_job_ptr->node_inx) {
list_job_ptr->node_inx = xmalloc(sizeof(int32_t) * 0xffff);
list_job_ptr->node_inx[0] = 1; /* offset */
list_job_ptr->node_inx[1] =
list_job_ptr->array_task_id;
}
list_job_ptr->node_inx[0]++;
list_job_ptr->node_inx[list_job_ptr->node_inx[0]] =
job_ptr->array_task_id;
merge = true;
break;
}
list_iterator_destroy(iter);
return merge;
}
示例9: _ft_decay_apply_new_usage
/* Apply usage with decay factor. Call standard functions */
static void _ft_decay_apply_new_usage(struct job_record *job, time_t *start)
{
if (!decay_apply_new_usage(job, start))
return;
/* Priority 0 is reserved for held jobs. Also skip priority
* calculation for non-pending jobs. */
if ((job->priority == 0) || !IS_JOB_PENDING(job))
return;
set_priority_factors(*start, job);
last_job_update = time(NULL);
}
示例10: job_modify
extern int job_modify(struct job_descriptor *job_desc,
struct job_record *job_ptr, uint32_t submit_uid)
{
uint16_t job_mcdram, job_numa;
int mcdram_cnt, numa_cnt;
char *tmp_str;
if (!job_desc->features)
return SLURM_SUCCESS;
if (!IS_JOB_PENDING(job_ptr))
return ESLURM_JOB_NOT_PENDING;
job_mcdram = knl_mcdram_parse(job_desc->features, "&");
mcdram_cnt = knl_mcdram_bits_cnt(job_mcdram);
if (mcdram_cnt > 1) { /* Multiple MCDRAM options */
return ESLURM_INVALID_KNL;
} else if (mcdram_cnt == 0) {
if (job_desc->features && job_desc->features[0])
xstrcat(job_desc->features, "&");
tmp_str = knl_mcdram_str(default_mcdram);
xstrcat(job_desc->features, tmp_str);
xfree(tmp_str);
} else if ((job_mcdram & avail_mcdram) == 0) { /* Unavailable option */
return ESLURM_INVALID_KNL;
}
job_numa = knl_numa_parse(job_desc->features, "&");
numa_cnt = knl_numa_bits_cnt(job_numa);
if (numa_cnt > 1) { /* Multiple NUMA options */
return ESLURM_INVALID_KNL;
} else if (numa_cnt == 0) {
if (job_desc->features && job_desc->features[0])
xstrcat(job_desc->features, "&");
tmp_str = knl_numa_str(default_numa);
xstrcat(job_desc->features, tmp_str);
xfree(tmp_str);
} else if ((job_numa & avail_numa) == 0) { /* Unavailable NUMA option */
return ESLURM_INVALID_KNL;
}
return SLURM_SUCCESS;
}
示例11: job_time_used
long job_time_used(job_info_t * job_ptr)
{
time_t end_time;
if ((job_ptr->start_time == 0) || IS_JOB_PENDING(job_ptr))
return 0L;
if (IS_JOB_SUSPENDED(job_ptr))
return (long) job_ptr->pre_sus_time;
if (IS_JOB_RUNNING(job_ptr) || (job_ptr->end_time == 0))
end_time = time(NULL);
else
end_time = job_ptr->end_time;
if (job_ptr->suspend_time)
return (long) (difftime(end_time, job_ptr->suspend_time)
+ job_ptr->pre_sus_time);
return (long) (difftime(end_time, job_ptr->start_time));
}
示例12: jobacct_storage_g_job_start
/*
* load into the storage information about a job,
* typically when it begins execution, but possibly earlier
*/
extern int jobacct_storage_g_job_start(void *db_conn,
struct job_record *job_ptr)
{
if (slurm_acct_storage_init(NULL) < 0)
return SLURM_ERROR;
/* A pending job's start_time is it's expected initiation time
* (changed in slurm v2.1). Rather than changing a bunch of code
* in the accounting_storage plugins and SlurmDBD, just clear
* start_time before accounting and restore it later. */
if (IS_JOB_PENDING(job_ptr)) {
int rc;
time_t orig_start_time = job_ptr->start_time;
job_ptr->start_time = (time_t) 0;
rc = (*(g_acct_storage_context->ops.job_start))(
db_conn, job_ptr);
job_ptr->start_time = orig_start_time;
return rc;
}
return (*(g_acct_storage_context->ops.job_start))(db_conn, job_ptr);
}
示例13: _print_job_reason_list
int _print_job_reason_list(job_info_t * job, int width, bool right,
char* suffix)
{
if (job == NULL) { /* Print the Header instead */
char *title = "NODELIST(REASON)";
if (params.cluster_flags & CLUSTER_FLAG_BG)
title = "MIDPLANELIST(REASON)";
_print_str(title, width, right, false);
} else if (!IS_JOB_COMPLETING(job)
&& (IS_JOB_PENDING(job)
|| IS_JOB_TIMEOUT(job)
|| IS_JOB_FAILED(job))) {
char id[FORMAT_STRING_SIZE], *reason;
if (job->state_desc)
reason = job->state_desc;
else
reason = job_reason_string(job->state_reason);
snprintf(id, FORMAT_STRING_SIZE, "(%s)", reason);
_print_str(id, width, right, true);
} else {
char *nodes = xstrdup(job->nodes);
char *ionodes = NULL;
select_g_select_jobinfo_get(job->select_jobinfo,
SELECT_JOBDATA_IONODES,
&ionodes);
if (ionodes) {
xstrfmtcat(nodes, "[%s]", ionodes);
xfree(ionodes);
_print_str(nodes, width, right, false);
} else
_print_nodes(nodes, width, right, false);
xfree(nodes);
}
if (suffix)
printf("%s", suffix);
return SLURM_SUCCESS;
}
示例14: _pending_pack_jobs
static bool _pending_pack_jobs(struct job_record *job_ptr)
{
struct job_record *pack_leader, *pack_job;
ListIterator iter;
bool pending_job = false;
if (job_ptr->pack_job_id == 0)
return false;
pack_leader = find_job_record(job_ptr->pack_job_id);
if (!pack_leader) {
error("Job pack leader %pJ not found", job_ptr);
return false;
}
if (!pack_leader->pack_job_list) {
error("Job pack leader %pJ lacks pack_job_list",
job_ptr);
return false;
}
iter = list_iterator_create(pack_leader->pack_job_list);
while ((pack_job = (struct job_record *) list_next(iter))) {
if (pack_leader->pack_job_id != pack_job->pack_job_id) {
error("%s: Bad pack_job_list for %pJ",
__func__, pack_leader);
continue;
}
if (IS_JOB_PENDING(pack_job)) {
pending_job = true;
break;
}
}
list_iterator_destroy(iter);
return pending_job;
}
示例15: _get_node_cnt
static int _get_node_cnt(job_info_t * job)
{
int node_cnt = 0, round;
/* For PENDING jobs, return the maximum of the requested nodelist,
* requested maximum number of nodes, or requested CPUs rounded
* to nearest node.
*
* For COMPLETING jobs, the job->nodes nodelist has already been
* altered to list only the nodes still in the comp state, and
* thus we count only those nodes toward the total nodes still
* allocated to this job.
*/
if (IS_JOB_PENDING(job)) {
node_cnt = _nodes_in_list(job->req_nodes);
node_cnt = MAX(node_cnt, job->num_nodes);
round = job->num_cpus + params.max_cpus - 1;
round /= params.max_cpus; /* round up */
node_cnt = MAX(node_cnt, round);
} else
node_cnt = _nodes_in_list(job->nodes);
return node_cnt;
}