当前位置: 首页>>代码示例>>C++>>正文


C++ xcgroup_destroy函数代码示例

本文整理汇总了C++中xcgroup_destroy函数的典型用法代码示例。如果您正苦于以下问题:C++ xcgroup_destroy函数的具体用法?C++ xcgroup_destroy怎么用?C++ xcgroup_destroy使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了xcgroup_destroy函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: _slurm_cgroup_destroy

int _slurm_cgroup_destroy(void)
{
    xcgroup_lock(&freezer_cg);

    if (jobstep_cgroup_path[0] != '\0') {
        if (xcgroup_delete(&step_freezer_cg) != XCGROUP_SUCCESS) {
            error("_slurm_cgroup_destroy: problem deleting step "
                  "cgroup path %s: %m", step_freezer_cg.path);
            xcgroup_unlock(&freezer_cg);
            return SLURM_ERROR;
        }
        xcgroup_destroy(&step_freezer_cg);
    }

    if (job_cgroup_path[0] != '\0') {
        xcgroup_delete(&job_freezer_cg);
        xcgroup_destroy(&job_freezer_cg);
    }

    if (user_cgroup_path[0] != '\0') {
        xcgroup_delete(&user_freezer_cg);
        xcgroup_destroy(&user_freezer_cg);
    }

    if (slurm_freezer_init) {
        xcgroup_destroy(&slurm_freezer_cg);
    }

    xcgroup_unlock(&freezer_cg);
    xcgroup_destroy(&freezer_cg);
    xcgroup_ns_destroy(&freezer_ns);

    return SLURM_SUCCESS;
}
开发者ID:nqn,项目名称:slurm-mesos,代码行数:34,代码来源:proctrack_cgroup.c

示例2: _slurm_cgroup_destroy

int _slurm_cgroup_destroy(void)
{
	if (slurm_freezer_init)
		xcgroup_lock(&slurm_freezer_cg);

	if (jobstep_cgroup_path[0] != '\0') {
		if ( xcgroup_delete(&step_freezer_cg) != XCGROUP_SUCCESS ) {
			if (slurm_freezer_init)
				xcgroup_unlock(&slurm_freezer_cg);
			return SLURM_ERROR;
		}
		xcgroup_destroy(&step_freezer_cg);
	}

	if (job_cgroup_path[0] != '\0') {
		xcgroup_delete(&job_freezer_cg);
		xcgroup_destroy(&job_freezer_cg);
	}

	if (user_cgroup_path[0] != '\0') {
		xcgroup_delete(&user_freezer_cg);
		xcgroup_destroy(&user_freezer_cg);
	}

	if (slurm_freezer_init) {
		xcgroup_unlock(&slurm_freezer_cg);
		xcgroup_destroy(&slurm_freezer_cg);
	}

	xcgroup_ns_destroy(&freezer_ns);

	return SLURM_SUCCESS;
}
开发者ID:Cray,项目名称:slurm,代码行数:33,代码来源:proctrack_cgroup.c

示例3: jobacct_gather_cgroup_cpuacct_fini

extern int jobacct_gather_cgroup_cpuacct_fini(
	slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	xcgroup_t cpuacct_cg;

	if (user_cgroup_path[0] == '\0' ||
	    job_cgroup_path[0] == '\0' ||
	    jobstep_cgroup_path[0] == '\0')
		return SLURM_SUCCESS;

	/*
	 * Move the slurmstepd back to the root cpuacct cg.
	 * The release_agent will asynchroneously be called for the step
	 * cgroup. It will do the necessary cleanup.
	 */
	if (xcgroup_create(&cpuacct_ns, &cpuacct_cg, "", 0, 0)
	    == XCGROUP_SUCCESS) {
		xcgroup_set_uint32_param(&cpuacct_cg, "tasks", getpid());
		xcgroup_destroy(&cpuacct_cg);
	}

	xcgroup_destroy(&user_cpuacct_cg);
	xcgroup_destroy(&job_cpuacct_cg);
	xcgroup_destroy(&step_cpuacct_cg);

	user_cgroup_path[0]='\0';
	job_cgroup_path[0]='\0';
	jobstep_cgroup_path[0]='\0';
	xcgroup_ns_destroy(&cpuacct_ns);

	return SLURM_SUCCESS;
}
开发者ID:IFCA,项目名称:slurm,代码行数:32,代码来源:jobacct_gather_cgroup_cpuacct.c

示例4: fini_system_cgroup

extern void fini_system_cgroup(void)
{
	xcgroup_destroy(&system_cpuset_cg);
	xcgroup_destroy(&system_memory_cg);
	xcgroup_ns_destroy(&cpuset_ns);
	xcgroup_ns_destroy(&memory_ns);
}
开发者ID:jtfrey,项目名称:slurm,代码行数:7,代码来源:slurmd_cgroup.c

示例5: jobacct_cgroup_create_slurm_cg

extern char* jobacct_cgroup_create_slurm_cg(xcgroup_ns_t* ns)
 {
	/* we do it here as we do not have access to the conf structure */
	/* in libslurm (src/common/xcgroup.c) */
	xcgroup_t slurm_cg;
	char* pre = (char*) xstrdup(slurm_cgroup_conf.cgroup_prepend);
#ifdef MULTIPLE_SLURMD
	if (conf->node_name != NULL)
		xstrsubstitute(pre,"%n", conf->node_name);
	else {
		xfree(pre);
		pre = (char*) xstrdup("/slurm");
	}
#endif

	/* create slurm cgroup in the ns (it could already exist) */
	if (xcgroup_create(ns,&slurm_cg,pre,
			   getuid(), getgid()) != XCGROUP_SUCCESS) {
		return pre;
	}

	if (xcgroup_instanciate(&slurm_cg) != XCGROUP_SUCCESS) {
		error("unable to build slurm cgroup for ns %s: %m",
		      ns->subsystems);
		xcgroup_destroy(&slurm_cg);
		return pre;
	} else {
		debug3("slurm cgroup %s successfully created for ns %s: %m",
		       pre,ns->subsystems);
		xcgroup_destroy(&slurm_cg);
	}

	return pre;
}
开发者ID:BYUHPC,项目名称:slurm,代码行数:34,代码来源:jobacct_gather_cgroup.c

示例6: task_cgroup_memory_fini

extern int task_cgroup_memory_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	xcgroup_t memory_cg;

	if (user_cgroup_path[0] == '\0' ||
	     job_cgroup_path[0] == '\0' ||
	     jobstep_cgroup_path[0] == '\0')
		return SLURM_SUCCESS;
	/*
	 * Lock the root memcg and try to remove the different memcgs.
	 * The reason why we are locking here is that if a concurrent
	 * step is in the process of being executed, he could try to
	 * create the step memcg just after we remove the job memcg,
	 * resulting in a failure.
	 * First, delete step memcg as all the tasks have now exited.
	 * Then, try to remove the job memcg.
	 * If it fails, it is due to the fact that it is still in use by an
	 * other running step.
	 * After that, try to remove the user memcg. If it fails, it is due
	 * to jobs that are still running for the same user on the node or
	 * because of tasks attached directly to the user cg by an other
	 * component (PAM). The user memcg was created with the
	 * notify_on_release=1 flag (default) so it will be removed
	 * automatically after that.
	 * For now, do not try to detect if only externally attached tasks
	 * are present to see if they can be be moved to an orhpan memcg.
	 * That could be done in the future, if it is necessary.
	 */
	if (xcgroup_create(&memory_ns,&memory_cg,"",0,0) == XCGROUP_SUCCESS) {
		if (xcgroup_lock(&memory_cg) == XCGROUP_SUCCESS) {
			if (xcgroup_delete(&step_memory_cg) != SLURM_SUCCESS)
				debug2("task/cgroup: unable to remove step "
				       "memcg : %m");
			if (xcgroup_delete(&job_memory_cg) != XCGROUP_SUCCESS)
				debug2("task/cgroup: not removing "
				       "job memcg : %m");
			if (xcgroup_delete(&user_memory_cg) != XCGROUP_SUCCESS)
				debug2("task/cgroup: not removing "
				       "user memcg : %m");
			xcgroup_unlock(&memory_cg);
		} else
			error("task/cgroup: unable to lock root memcg : %m");
		xcgroup_destroy(&memory_cg);
	} else
		error("task/cgroup: unable to create root memcg : %m");

	xcgroup_destroy(&user_memory_cg);
	xcgroup_destroy(&job_memory_cg);
	xcgroup_destroy(&step_memory_cg);

	user_cgroup_path[0]='\0';
	job_cgroup_path[0]='\0';
	jobstep_cgroup_path[0]='\0';

	xcgroup_ns_destroy(&memory_ns);

	return SLURM_SUCCESS;
}
开发者ID:johntconklin,项目名称:slurm,代码行数:58,代码来源:task_cgroup_memory.c

示例7: task_cgroup_devices_fini

extern int task_cgroup_devices_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	xcgroup_t devices_cg;

	/* Similarly to task_cgroup_{memory,cpuset}_fini(), we must lock the
	 * root cgroup so we don't race with another job step that is
	 * being started.  */
        if (xcgroup_create(&devices_ns, &devices_cg,"",0,0)
	    == XCGROUP_SUCCESS) {
                if (xcgroup_lock(&devices_cg) == XCGROUP_SUCCESS) {
			/* First move slurmstepd to the root devices cg
			 * so we can remove the step/job/user devices
			 * cg's.  */
			xcgroup_move_process(&devices_cg, getpid());
                        if (xcgroup_delete(&step_devices_cg) != SLURM_SUCCESS)
                                debug2("task/cgroup: unable to remove step "
                                       "devices : %m");
                        if (xcgroup_delete(&job_devices_cg) != XCGROUP_SUCCESS)
                                debug2("task/cgroup: not removing "
                                       "job devices : %m");
                        if (xcgroup_delete(&user_devices_cg)
			    != XCGROUP_SUCCESS)
                                debug2("task/cgroup: not removing "
                                       "user devices : %m");
                        xcgroup_unlock(&devices_cg);
                } else
                        error("task/cgroup: unable to lock root devices : %m");
                xcgroup_destroy(&devices_cg);
        } else
                error("task/cgroup: unable to create root devices : %m");

	if ( user_cgroup_path[0] != '\0' )
		xcgroup_destroy(&user_devices_cg);
	if ( job_cgroup_path[0] != '\0' )
		xcgroup_destroy(&job_devices_cg);
	if ( jobstep_cgroup_path[0] != '\0' )
		xcgroup_destroy(&step_devices_cg);

	user_cgroup_path[0] = '\0';
	job_cgroup_path[0] = '\0';
	jobstep_cgroup_path[0] = '\0';

	cgroup_allowed_devices_file[0] = '\0';

	xcgroup_ns_destroy(&devices_ns);

	xcpuinfo_fini();
	return SLURM_SUCCESS;
}
开发者ID:Q-Leap-Networks,项目名称:qlustar-slurm,代码行数:49,代码来源:task_cgroup_devices.c

示例8: memcg_initialize

static int memcg_initialize (xcgroup_ns_t *ns, xcgroup_t *cg,
		char *path, uint64_t mem_limit, uid_t uid, gid_t gid)
{
	uint64_t mlb = mem_limit_in_bytes (mem_limit);
	uint64_t mls = swap_limit_in_bytes  (mem_limit);

	if (xcgroup_create (ns, cg, path, uid, gid) != XCGROUP_SUCCESS)
		return -1;

	if (xcgroup_instanciate (cg) != XCGROUP_SUCCESS) {
		xcgroup_destroy (cg);
		return -1;
	}

	xcgroup_set_param (cg, "memory.use_hierarchy","1");
	xcgroup_set_uint64_param (cg, "memory.limit_in_bytes", mlb);
	xcgroup_set_uint64_param (cg, "memory.memsw.limit_in_bytes", mls);

	info ("task/cgroup: %s: alloc=%luMB mem.limit=%luMB memsw.limit=%luMB",
		path,
		(unsigned long) mem_limit,
		(unsigned long) mlb/(1024*1024),
		(unsigned long) mls/(1024*1024));

	return 0;
}
开发者ID:Xarthisius,项目名称:slurm,代码行数:26,代码来源:task_cgroup_memory.c

示例9: task_cgroup_cpuset_fini

extern int task_cgroup_cpuset_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{

	if (user_cgroup_path[0] != '\0')
		xcgroup_destroy(&user_cpuset_cg);
	if (job_cgroup_path[0] != '\0')
		xcgroup_destroy(&job_cpuset_cg);
	if (jobstep_cgroup_path[0] != '\0')
		xcgroup_destroy(&step_cpuset_cg);

	user_cgroup_path[0]='\0';
	job_cgroup_path[0]='\0';
	jobstep_cgroup_path[0]='\0';

	xcgroup_ns_destroy(&cpuset_ns);

	return SLURM_SUCCESS;
}
开发者ID:kwangiit,项目名称:SLURMPP,代码行数:18,代码来源:task_cgroup_cpuset.c

示例10: _system_cgroup_create_slurm_cg

static char* _system_cgroup_create_slurm_cg (xcgroup_ns_t* ns)
{
	/* we do it here as we do not have access to the conf structure */
	/* in libslurm (src/common/xcgroup.c) */
	xcgroup_t slurm_cg;
	char* pre = (char*) xstrdup(slurm_cgroup_conf.cgroup_prepend);

#ifdef MULTIPLE_SLURMD
	if ( conf->node_name != NULL )
		xstrsubstitute(pre, "%n", conf->node_name);
	else {
		xfree(pre);
		pre = (char*) xstrdup("/slurm");
	}
#endif

	/* create slurm cgroup in the ns
	 * disable notify_on_release to avoid the removal/creation
	 * of this cgroup for each last/first running job on the node */
	if (xcgroup_create(ns, &slurm_cg, pre,
			   getuid(), getgid()) != XCGROUP_SUCCESS) {
		xfree(pre);
		return pre;
	}
	slurm_cg.notify = 0;
	if (xcgroup_instantiate(&slurm_cg) != XCGROUP_SUCCESS) {
		error("system cgroup: unable to build slurm cgroup for "
		      "ns %s: %m",
		      ns->subsystems);
		xcgroup_destroy(&slurm_cg);
		xfree(pre);
		return pre;
	}
	else {
		debug3("system cgroup: slurm cgroup %s successfully created "
		       "for ns %s: %m",
		       pre, ns->subsystems);
		xcgroup_destroy(&slurm_cg);
	}

	return pre;
}
开发者ID:HPCNow,项目名称:slurm,代码行数:42,代码来源:slurmd_cgroup.c

示例11: _slurm_cgroup_destroy

int _slurm_cgroup_destroy(void)
{
	xcgroup_lock(&freezer_cg);

	/*
	 *  First move slurmstepd process to the root cgroup, otherwise
	 *   the rmdir(2) triggered by the calls below will always fail,
	 *   because slurmstepd is still in the cgroup!
	 */
	_move_current_to_root_cgroup(&freezer_ns);

	if (jobstep_cgroup_path[0] != '\0') {
		if (xcgroup_delete(&step_freezer_cg) != XCGROUP_SUCCESS) {
			debug("_slurm_cgroup_destroy: problem deleting step cgroup path %s: %m",
			      step_freezer_cg.path);
			xcgroup_unlock(&freezer_cg);
			return SLURM_ERROR;
		}
		xcgroup_destroy(&step_freezer_cg);
	}

	if (job_cgroup_path[0] != '\0') {
		xcgroup_delete(&job_freezer_cg);
		xcgroup_destroy(&job_freezer_cg);
	}

	if (user_cgroup_path[0] != '\0') {
		xcgroup_delete(&user_freezer_cg);
		xcgroup_destroy(&user_freezer_cg);
	}

	if (slurm_freezer_init) {
		xcgroup_destroy(&slurm_freezer_cg);
	}

	xcgroup_unlock(&freezer_cg);
	xcgroup_destroy(&freezer_cg);
	xcgroup_ns_destroy(&freezer_ns);

	return SLURM_SUCCESS;
}
开发者ID:rohgarg,项目名称:slurm,代码行数:41,代码来源:proctrack_cgroup.c

示例12: task_cgroup_devices_fini

extern int task_cgroup_devices_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{

	if ( user_cgroup_path[0] != '\0' )
		xcgroup_destroy(&user_devices_cg);
	if ( job_cgroup_path[0] != '\0' )
		xcgroup_destroy(&job_devices_cg);
	if ( jobstep_cgroup_path[0] != '\0' )
		xcgroup_destroy(&step_devices_cg);

	user_cgroup_path[0] = '\0';
	job_cgroup_path[0] = '\0';
	jobstep_cgroup_path[0] = '\0';

	cgroup_allowed_devices_file[0] = '\0';

	xcgroup_ns_destroy(&devices_ns);

	xcpuinfo_fini();
	return SLURM_SUCCESS;
}
开发者ID:FredHutch,项目名称:slurm,代码行数:21,代码来源:task_cgroup_devices.c

示例13: task_cgroup_memory_fini

extern int task_cgroup_memory_fini(slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	xcgroup_t memory_cg;

	if (user_cgroup_path[0] == '\0' ||
	     job_cgroup_path[0] == '\0' ||
	     jobstep_cgroup_path[0] == '\0')
		return SLURM_SUCCESS;

	/*
	 * Move the slurmstepd back to the root memory cg and remove[*]
	 * the step cgroup to move its allocated pages to its parent.
	 *
	 * [*] Calling rmdir(2) on an empty cgroup moves all resident charged
	 *  pages to the parent (i.e. the job cgroup). (If force_empty were
	 *  used instead, only clean pages would be flushed). This keeps
	 *  resident pagecache pages associated with the job. It is expected
	 *  that the job epilog will then optionally force_empty the
	 *  job cgroup (to flush pagecache), and then rmdir(2) the cgroup
	 *  or wait for release notification from kernel.
	 */
	if (xcgroup_create(&memory_ns,&memory_cg,"",0,0) == XCGROUP_SUCCESS) {
		xcgroup_move_process(&memory_cg, getpid());
		xcgroup_destroy(&memory_cg);
		if (xcgroup_delete(&step_memory_cg) != XCGROUP_SUCCESS)
			error ("cgroup: rmdir step memcg failed: %m");
	}

	xcgroup_destroy(&user_memory_cg);
	xcgroup_destroy(&job_memory_cg);
	xcgroup_destroy(&step_memory_cg);

	user_cgroup_path[0]='\0';
	job_cgroup_path[0]='\0';
	jobstep_cgroup_path[0]='\0';

	xcgroup_ns_destroy(&memory_ns);

	return SLURM_SUCCESS;
}
开发者ID:Xarthisius,项目名称:slurm,代码行数:40,代码来源:task_cgroup_memory.c

示例14: _move_current_to_root_cgroup

static int _move_current_to_root_cgroup(xcgroup_ns_t *ns)
{
	xcgroup_t cg;
	int rc;

	if (xcgroup_create(ns, &cg, "", 0, 0) != XCGROUP_SUCCESS)
		return SLURM_ERROR;

	rc = xcgroup_move_process(&cg, getpid());
	xcgroup_destroy(&cg);

	return rc;
}
开发者ID:rohgarg,项目名称:slurm,代码行数:13,代码来源:proctrack_cgroup.c

示例15: jobacct_gather_cgroup_memory_fini

extern int jobacct_gather_cgroup_memory_fini(
	slurm_cgroup_conf_t *slurm_cgroup_conf)
{
	xcgroup_t memory_cg;

	if (user_cgroup_path[0] == '\0' ||
	    job_cgroup_path[0] == '\0' ||
	    jobstep_cgroup_path[0] == '\0')
		return SLURM_SUCCESS;

	/*
	 * Move the slurmstepd back to the root memory cg and force empty
	 * the step cgroup to move its allocated pages to its parent.
	 * The release_agent will asynchroneously be called for the step
	 * cgroup. It will do the necessary cleanup.
	 * It should be good if this force_empty mech could be done directly
	 * by the memcg implementation at the end of the last task managed
	 * by a cgroup. It is too difficult and near impossible to handle
	 * that cleanup correctly with current memcg.
	 */
	if (xcgroup_create(&memory_ns, &memory_cg, "", 0, 0)
	    == XCGROUP_SUCCESS) {
		xcgroup_set_uint32_param(&memory_cg, "tasks", getpid());
		xcgroup_destroy(&memory_cg);
		xcgroup_set_param(&step_memory_cg, "memory.force_empty", "1");
	}

	xcgroup_destroy(&user_memory_cg);
	xcgroup_destroy(&job_memory_cg);
	xcgroup_destroy(&step_memory_cg);

	user_cgroup_path[0]='\0';
	job_cgroup_path[0]='\0';
	jobstep_cgroup_path[0]='\0';

	xcgroup_ns_destroy(&memory_ns);

	return SLURM_SUCCESS;
}
开发者ID:IFCA,项目名称:slurm,代码行数:39,代码来源:jobacct_gather_cgroup_memory.c


注:本文中的xcgroup_destroy函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。