本文整理汇总了C++中slurm_get_debug_flags函数的典型用法代码示例。如果您正苦于以下问题:C++ slurm_get_debug_flags函数的具体用法?C++ slurm_get_debug_flags怎么用?C++ slurm_get_debug_flags使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了slurm_get_debug_flags函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _job_prio_preemptable
/* Return true if the preemptor can preempt the preemptee, otherwise false */
static bool _job_prio_preemptable(struct job_record *preemptor,
struct job_record *preemptee)
{
uint32_t job_prio1, job_prio2;
int rc;
if (CHECK_FOR_PREEMPTOR_OVERALLOC) {
rc = _overalloc_test(preemptor, preemptee);
if (rc > 0)
return true;
else if (rc < 0)
return false;
}
job_prio1 = preemptor->priority;
job_prio2 = preemptee->priority;
if (job_prio1 > job_prio2) {
if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
info("%s: Priority of JobId %u > JobId %u (%u > %u)",
plugin_type, preemptor->job_id, preemptee->job_id,
job_prio1, job_prio2);
}
return true; /* Preemptor can preempt */
} else {
if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
info("%s: Priority of JobId %u <= JobId %u (%u <= %u)",
plugin_type, preemptor->job_id, preemptee->job_id,
job_prio1, job_prio2);
}
return false; /* Preemptor can not preempt */
}
}
示例2: _is_job_runtime_greater
/* Return true of the cummulative run time of job1 is greater than job 2 */
static bool _is_job_runtime_greater(struct job_record *job_ptr1,
struct job_record *job_ptr2)
{
time_t runtime_job1, runtime_job2;
double timediff_job1_job2 = 0.0;
runtime_job1 = _get_job_runtime(job_ptr1);
runtime_job2 = _get_job_runtime(job_ptr2);
timediff_job1_job2 = difftime(runtime_job1, runtime_job2);
if (timediff_job1_job2 > 0) {
if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
info("%s: Runtime of JobId %u > JobId %u (%u > %u)",
plugin_type, job_ptr1->job_id, job_ptr2->job_id,
(uint32_t) runtime_job1, (uint32_t) runtime_job2);
}
return true;
} else {
if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
info("%s: Runtime of JobId %u <= JobId %u (%u <= %u)",
plugin_type, job_ptr1->job_id, job_ptr2->job_id,
(uint32_t) runtime_job1, (uint32_t) runtime_job2);
}
return false;
}
}
示例3: while
extern void *_sicp_agent(void *args)
{
static time_t last_sicp_time = 0;
time_t now;
double wait_time;
while (!sicp_stop) {
_my_sleep(1);
if (sicp_stop)
break;
now = time(NULL);
wait_time = difftime(now, last_sicp_time);
if (wait_time < sicp_interval)
continue;
last_sicp_time = now;
_load_sicp_other_cluster();
pthread_mutex_lock(&sicp_lock);
list_delete_all(sicp_job_list, &_list_find_sicp_old, "");
if (slurm_get_debug_flags() & DEBUG_FLAG_SICP)
_log_sicp_recs();
pthread_mutex_unlock(&sicp_lock);
_dump_sicp_state(); /* Has own locking */
}
return NULL;
}
示例4: msg_aggr_sender_init
extern void msg_aggr_sender_init(char *host, uint16_t port, uint64_t window,
uint64_t max_msg_cnt)
{
if (msg_collection.running || (max_msg_cnt <= 1))
return;
memset(&msg_collection, 0, sizeof(msg_collection_type_t));
slurm_mutex_init(&msg_collection.aggr_mutex);
slurm_mutex_init(&msg_collection.mutex);
slurm_mutex_lock(&msg_collection.mutex);
slurm_mutex_lock(&msg_collection.aggr_mutex);
slurm_cond_init(&msg_collection.cond, NULL);
slurm_set_addr(&msg_collection.node_addr, port, host);
msg_collection.window = window;
msg_collection.max_msg_cnt = max_msg_cnt;
msg_collection.msg_aggr_list = list_create(_msg_aggr_free);
msg_collection.msg_list = list_create(slurm_free_comp_msg_list);
msg_collection.max_msgs = false;
msg_collection.debug_flags = slurm_get_debug_flags();
slurm_mutex_unlock(&msg_collection.aggr_mutex);
slurm_mutex_unlock(&msg_collection.mutex);
slurm_thread_create(&msg_collection.thread_id,
&_msg_aggregation_sender, NULL);
}
示例5: _load_config
static void _load_config(void)
{
char *sched_params, *tmp_ptr;
sched_params = slurm_get_sched_params();
debug_flags = slurm_get_debug_flags();
if (sched_params && (tmp_ptr=strstr(sched_params, "interval=")))
backfill_interval = atoi(tmp_ptr + 9);
if (backfill_interval < 1) {
fatal("Invalid backfill scheduler interval: %d",
backfill_interval);
}
if (sched_params && (tmp_ptr=strstr(sched_params, "bf_window=")))
backfill_window = atoi(tmp_ptr + 10) * 60; /* mins to secs */
if (backfill_window < 1) {
fatal("Invalid backfill scheduler window: %d",
backfill_window);
}
if (sched_params && (tmp_ptr=strstr(sched_params, "max_job_bf=")))
max_backfill_job_cnt = atoi(tmp_ptr + 11);
if (max_backfill_job_cnt < 1) {
fatal("Invalid backfill scheduler max_job_bf: %d",
max_backfill_job_cnt);
}
xfree(sched_params);
}
示例6: init
/*
* init() is called when the plugin is loaded, before any other functions
* are called. Put global initialization here.
*/
extern int init ( void )
{
verbose("%s loaded", plugin_name);
debug_flags = slurm_get_debug_flags();
return SLURM_SUCCESS;
}
示例7: _recv_msg
/*****************************************************************************\
* Read a message (request) from specified file descriptor
*
* RET - The message which must be xfreed or
* NULL on error
\*****************************************************************************/
static char * _recv_msg(slurm_fd_t new_fd)
{
char header[10];
unsigned long size;
char *buf;
if (_read_bytes((int) new_fd, header, 9) != 9) {
err_code = -240;
err_msg = "failed to read message header";
error("wiki: failed to read message header %m");
return NULL;
}
if (sscanf(header, "%lu", &size) != 1) {
err_code = -244;
err_msg = "malformed message header";
error("wiki: malformed message header (%s)", header);
return NULL;
}
buf = xmalloc(size + 1); /* need '\0' on end to print */
if (_read_bytes((int) new_fd, buf, size) != size) {
err_code = -246;
err_msg = "unable to read all message data";
error("wiki: unable to read data message");
xfree(buf);
return NULL;
}
if (slurm_get_debug_flags() & DEBUG_FLAG_WIKI)
info("wiki msg recv:%s", buf);
return buf;
}
示例8: jag_common_init
extern void jag_common_init(long in_hertz)
{
uint32_t profile_opt;
debug_flags = slurm_get_debug_flags();
acct_gather_profile_g_get(ACCT_GATHER_PROFILE_RUNNING,
&profile_opt);
/* If we are profiling energy it will be checked at a
different rate, so just grab the last one.
*/
if (profile_opt & ACCT_GATHER_PROFILE_ENERGY)
energy_profile = ENERGY_DATA_NODE_ENERGY;
if (in_hertz) {
hertz = in_hertz;
} else {
hertz = sysconf(_SC_CLK_TCK);
if (hertz < 1) {
error ("_get_process_data: unable to get clock rate");
hertz = 100; /* default on many systems */
}
}
my_pagesize = getpagesize() / 1024;
}
示例9: cpu_freq_reconfig
/*
* reset debug flag (slurmd)
*/
extern void
cpu_freq_reconfig(void)
{
/* reset local static variables */
cpu_freq_govs = 0;
debug_flags = slurm_get_debug_flags();
}
示例10: init
int init (void)
{
char *avail_mcdram_str, *avail_numa_str;
char *default_mcdram_str, *default_numa_str;
int rc;
rc = knl_conf_read(&avail_mcdram, &avail_numa,
&default_mcdram, &default_numa);
if (slurm_get_debug_flags() & DEBUG_FLAG_KNL) {
avail_mcdram_str = knl_mcdram_str(avail_mcdram);
avail_numa_str = knl_numa_str(avail_numa);
default_mcdram_str = knl_mcdram_str(default_mcdram);
default_numa_str = knl_numa_str(default_numa);
info("AvailMCDRAM=%s DefaultMCDRAM=%s",
avail_mcdram_str, default_mcdram_str);
info("AvailNUMA=%s DefaultNUMA=%s",
avail_numa_str, default_numa_str);
xfree(avail_mcdram_str);
xfree(avail_numa_str);
xfree(default_mcdram_str);
xfree(default_numa_str);
}
return rc;
}
示例11: gs_init
/* Initialize data structures and start the gang scheduling thread */
extern int gs_init(void)
{
if (timeslicer_thread_id)
return SLURM_SUCCESS;
/* initialize global variables */
if (gs_debug_flags & DEBUG_FLAG_GANG)
info("gang: entering gs_init");
timeslicer_seconds = slurmctld_conf.sched_time_slice;
gs_debug_flags = slurm_get_debug_flags();
gs_fast_schedule = slurm_get_fast_schedule();
gr_type = _get_gr_type();
preempt_job_list = list_create(_preempt_job_list_del);
/* load the physical resource count data */
_load_phys_res_cnt();
pthread_mutex_lock(&data_mutex);
_build_parts();
/* load any currently running jobs */
_scan_slurm_job_list();
pthread_mutex_unlock(&data_mutex);
/* spawn the timeslicer thread */
_spawn_timeslicer_thread();
if (gs_debug_flags & DEBUG_FLAG_GANG)
info("gang: leaving gs_init");
return SLURM_SUCCESS;
}
示例12: init
/*
* init() is called when the plugin is loaded, before any other functions
* are called. Put global initialization here.
*/
extern int init (void)
{
debug("%s loaded.", plugin_name);
#ifdef HAVE_NATIVE_CRAY
int rc;
struct stat st;
debug_flags = slurm_get_debug_flags();
// Create the run directory
errno = 0;
rc = mkdir(TASK_CRAY_RUN_DIR, 0755);
if (rc == -1 && errno != EEXIST) {
CRAY_ERR("Couldn't create %s: %m", TASK_CRAY_RUN_DIR);
return SLURM_ERROR;
}
// Determine whether to track app status with LLI
rc = stat(LLI_SPOOL_DIR, &st);
if (rc == -1) {
debug("stat %s failed, disabling exit status tracking: %m",
LLI_SPOOL_DIR);
track_status = 0;
} else {
track_status = 1;
}
#endif
return SLURM_SUCCESS;
}
示例13: _get_nb_cpus
/* This _get_nb_cpus function is greatly inspired from the Job_Size calculation
* in job_manager.c, but reused here to find out the requested resources. As
* stated in the comment of the Job_Size calculation, the first scheduling run
* may not have the actual total_cpus so we start by using the amount requested.
* Then the actual required cpus will be filled in. This function estimates
* the future value of total_cpus if it is not set.
*/
static int _get_nb_cpus(struct job_record *job_ptr)
{
uint32_t cpu_cnt = 0;
uint32_t min_nodes = 0;
uint32_t max_nodes = 0;
uint32_t req_nodes = 0;
uint32_t cpus_per_node;
cpus_per_node = (uint32_t) job_ptr->part_ptr->total_cpus /
job_ptr->part_ptr->total_nodes;
min_nodes = MAX(job_ptr->details->min_nodes,
job_ptr->part_ptr->min_nodes);
if (job_ptr->details->max_nodes == 0) {
max_nodes = job_ptr->part_ptr->max_nodes;
} else {
max_nodes = MIN(job_ptr->details->max_nodes,
job_ptr->part_ptr->max_nodes);
}
max_nodes = MIN(max_nodes, 500000); /* prevent overflows */
if (!job_ptr->limit_set_max_nodes && job_ptr->details->max_nodes)
req_nodes = max_nodes;
else
req_nodes = min_nodes;
if (job_ptr->total_cpus) {
/* This indicates that nodes have been allocated already, but
* the job might have been requeued afterward. */
cpu_cnt = job_ptr->total_cpus;
if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
info("%s: JobId=%u (%s) total_cpus=%u",
plugin_type, job_ptr->job_id, job_ptr->name,
cpu_cnt);
}
} else {
cpu_cnt = req_nodes * cpus_per_node;
if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
info("%s: JobId=%u (%s) req_cpus=%u",
plugin_type, job_ptr->job_id, job_ptr->name,
cpu_cnt);
}
}
return cpu_cnt;
}
示例14: _read_file
/* Read file to data variable */
static uint32_t _read_file(const char *file, char **data)
{
uint32_t data_size = 0;
int data_allocated, data_read, fd, fsize = 0;
struct stat f_stat;
fd = open(file, O_RDONLY);
if (fd < 0) {
if (slurm_get_debug_flags() & DEBUG_FLAG_ESEARCH)
info("%s: Could not open state file %s", plugin_type,
file);
return data_size;
}
if (fstat(fd, &f_stat)) {
if (slurm_get_debug_flags() & DEBUG_FLAG_ESEARCH)
info("%s: Could not stat state file %s", plugin_type,
file);
close(fd);
return data_size;
}
fsize = f_stat.st_size;
data_allocated = BUF_SIZE;
*data = xmalloc(data_allocated);
while (1) {
data_read = read(fd, &(*data)[data_size], BUF_SIZE);
if (data_read < 0) {
if (errno == EINTR)
continue;
else {
error("%s: Read error on %s: %m", plugin_type,
file);
break;
}
} else if (data_read == 0) /* EOF */
break;
data_size += data_read;
data_allocated += data_read;
*data = xrealloc(*data, data_allocated);
}
close(fd);
if (data_size != fsize) {
error("%s: Could not read entire jobcomp state file %s (%d of"
" %d)", plugin_type, file, data_size, fsize);
}
return data_size;
}
示例15: _load_config
static void _load_config(void)
{
char *sched_params, *tmp_ptr;
sched_params = slurm_get_sched_params();
debug_flags = slurm_get_debug_flags();
if (sched_params && (tmp_ptr=strstr(sched_params, "bf_interval=")))
backfill_interval = atoi(tmp_ptr + 12);
if (backfill_interval < 1) {
fatal("Invalid backfill scheduler bf_interval: %d",
backfill_interval);
}
if (sched_params && (tmp_ptr=strstr(sched_params, "bf_window=")))
backfill_window = atoi(tmp_ptr + 10) * 60; /* mins to secs */
if (backfill_window < 1) {
fatal("Invalid backfill scheduler window: %d",
backfill_window);
}
if (sched_params && (tmp_ptr=strstr(sched_params, "max_job_bf=")))
max_backfill_job_cnt = atoi(tmp_ptr + 11);
if (max_backfill_job_cnt < 1) {
fatal("Invalid backfill scheduler max_job_bf: %d",
max_backfill_job_cnt);
}
/* "bf_res=" is vestigial from version 2.3 and can be removed later.
* Only "bf_resolution=" is documented. */
if (sched_params && (tmp_ptr=strstr(sched_params, "bf_res=")))
backfill_resolution = atoi(tmp_ptr + 7);
if (sched_params && (tmp_ptr=strstr(sched_params, "bf_resolution=")))
backfill_resolution = atoi(tmp_ptr + 14);
if (backfill_resolution < 1) {
fatal("Invalid backfill scheduler resolution: %d",
backfill_resolution);
}
if (sched_params && (tmp_ptr=strstr(sched_params, "bf_max_job_part=")))
max_backfill_job_per_part = atoi(tmp_ptr + 16);
if (max_backfill_job_per_part < 0) {
fatal("Invalid backfill scheduler bf_max_job_part: %d",
max_backfill_job_per_part);
}
if (sched_params && (tmp_ptr=strstr(sched_params, "bf_max_job_user=")))
max_backfill_job_per_user = atoi(tmp_ptr + 16);
if (max_backfill_job_per_user < 0) {
fatal("Invalid backfill scheduler bf_max_job_user: %d",
max_backfill_job_per_user);
}
/* bf_continue makes backfill continue where it was if interrupted
*/
if (sched_params && (strstr(sched_params, "bf_continue"))) {
backfill_continue = true;
}
xfree(sched_params);
}